source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
rose_v1_scalar_output.c | /*
* Scalar-to-scalar output dependencies
* */
#include <omp.h>
int a[100];
// A private case
void foo2()
{
int i;
int tmp;
#pragma omp parallel for private (tmp,i)
for (i = 0; i <= 99; i += 1) {
tmp = a[i] + i;
}
}
// A lastprivate case
void foo()
{
int i;
int tmp;
#pragma omp parallel for private (i) lastprivate (tmp)
for (i = 0; i <= 99; i += 1) {
tmp = a[i] + i;
}
i = tmp;
}
|
time_dgetrf_tile.c | /**
*
* @generated d Tue Jan 7 11:45:24 2014
*
**/
#define _TYPE double
#define _PREC double
#define _LAMCH LAPACKE_dlamch_work
#define _NAME "PLASMA_dgetrf_Tile"
/* See Lawn 41 page 120 */
#define _FMULS FMULS_GETRF(M, N)
#define _FADDS FADDS_GETRF(M, N)
#include "./timing.c"
static int
RunTest(int *iparam, double *dparam, real_Double_t *t_)
{
PASTE_CODE_IPARAM_LOCALS( iparam );
if ( M != N && check ) {
fprintf(stderr, "Check cannot be perfomed with M != N\n");
check = 0;
}
int runtime = RT_get_runtime();
int ws = RT_get_ws();
if ( runtime == PLASMA_OMPSS ) {
PLASMA_Set(PLASMA_RUNTIME_MODE, PLASMA_QUARK);
RT_set_ws(1);
}
/* Allocate Data */
PASTE_CODE_ALLOCATE_MATRIX_TILE( descA, 1, double, PlasmaRealDouble, LDA, M, N );
double *mat_ptr = descA->mat;
#pragma omp register ( [LDA*N]mat_ptr)
// printf("register: mat: %p, size: %d\n", mat_ptr, LDA*N);
PASTE_CODE_ALLOCATE_MATRIX( piv, 1, int, min(M, N), 1 );
// RT_runtime_info();
PLASMA_dplrnt_Tile(descA, 3456);
/* Save AT in lapack layout for check */
PASTE_TILE_TO_LAPACK( descA, A, check, double, LDA, N );
if ( runtime == PLASMA_OMPSS ) {
PLASMA_Set(PLASMA_RUNTIME_MODE, PLASMA_OMPSS);
RT_set_ws(ws);
}
// RT_runtime_info();
START_TIMING();
PLASMA_dgetrf_Tile( descA, piv );
STOP_TIMING();
if ( runtime == PLASMA_OMPSS ) {
PLASMA_Set(PLASMA_RUNTIME_MODE, PLASMA_QUARK);
RT_set_ws(1);
}
/* Check the solution */
if ( check )
{
PASTE_CODE_ALLOCATE_MATRIX_TILE( descB, 1, double, PlasmaRealDouble, LDB, N, NRHS );
PLASMA_dplrnt_Tile( descB, 7732 );
PASTE_TILE_TO_LAPACK( descB, b, check, double, LDB, NRHS );
PLASMA_dgetrs_Tile( PlasmaNoTrans, descA, piv, descB );
PASTE_TILE_TO_LAPACK( descB, x, check, double, LDB, NRHS );
dparam[IPARAM_RES] = d_check_solution(M, N, NRHS, A, LDA, b, x, LDB,
&(dparam[IPARAM_ANORM]),
&(dparam[IPARAM_BNORM]),
&(dparam[IPARAM_XNORM]));
// PASTE_CODE_FREE_MATRIX( descB );
// free(A); free(b); free(x);
}
// RT_set_ws(ps);
// PASTE_CODE_FREE_MATRIX( descA );
// free( piv );
if ( runtime == PLASMA_OMPSS ) {
PLASMA_Set(PLASMA_RUNTIME_MODE, PLASMA_OMPSS);
RT_set_ws(ws);
}
return 0;
}
|
operations.c | //-----------------------------------------------------------------------
//Copyright 2019 Centrum Wiskunde & Informatica, Amsterdam
//
//Author: Daniel M. Pelt
//Contact: D.M.Pelt@cwi.nl
//Website: http://dmpelt.github.io/msdnet/
//License: MIT
//
//This file is part of MSDNet, a Python implementation of the
//Mixed-Scale Dense Convolutional Neural Network.
//-----------------------------------------------------------------------
#include <math.h>
#ifdef _MSC_VER
#define DECLDIR __declspec(dllexport)
DECLDIR void set_threads(const unsigned int nthrd){}
#else
#define DECLDIR
#include <omp.h>
// OpenMP set number of threads
DECLDIR void set_threads(const unsigned int nthrd){
omp_set_num_threads(nthrd);
}
#endif
// Flattened array operations
DECLDIR void relu(float * const data, const unsigned long n){
long i;
#pragma omp parallel for private(i)
for(i=0; i<n; i++){
if(data[i]<0){
data[i]=0;
}
}
}
DECLDIR void leakyrelu(float * const data, const unsigned long n, const float w){
long i;
#pragma omp parallel for private(i)
for(i=0; i<n; i++){
if(data[i]<0){
data[i]*=w;
}
}
}
DECLDIR void relu2(const float * const inp, float * const out, const unsigned long n){
long i;
#pragma omp parallel for private(i)
for(i=0; i<n; i++){
if(inp[i]<=0){
out[i]=0;
}
}
}
DECLDIR void leakyrelu2(const float * const inp, float * const out, const unsigned long n, const float w){
long i;
#pragma omp parallel for private(i)
for(i=0; i<n; i++){
if(inp[i]<=0){
out[i]*=w;
}
}
}
DECLDIR void combine(const float * const inp, float * const out, const unsigned long n, const float w){
long i;
#pragma omp parallel for private(i)
for(i=0; i<n; i++){
out[i]+=w*inp[i];
}
}
DECLDIR float sum(const float * const inp, const unsigned long n){
long double sum=0;
long i;
#pragma omp parallel for reduction(+:sum) private(i)
for(i=0; i<n; i++){
sum+=inp[i];
}
return (float)sum;
}
DECLDIR float std(const float * const inp, const float mn, const unsigned long n){
long double sum=0;
long i;
#pragma omp parallel for reduction(+:sum) private(i)
for(i=0; i<n; i++){
sum+=(inp[i]-mn)*(inp[i]-mn);
}
return (float)sqrt(sum/n);
}
DECLDIR float multsum(const float * const a, const float * const b, const unsigned long n){
long double sum=0;
long i;
#pragma omp parallel for reduction(+:sum) private(i)
for(i=0; i<n; i++){
sum+=a[i]*b[i];
}
return (float)sum;
}
DECLDIR void softmax(float * const im, const unsigned long n, const unsigned int nim){
#pragma omp parallel
{
float mx,sm;
long i;
unsigned int j;
#pragma omp for
for(i=0; i<n;i++){
mx=im[i];
for(j=1; j<nim; j++){
if(im[j*n+i]>mx){
mx = im[j*n+i];
}
}
sm=0;
for(j=0; j<nim; j++){
im[j*n+i] = expf(im[j*n+i]-mx);
sm += im[j*n+i];
}
for(j=0; j<nim; j++){
im[j*n+i] /= sm;
}
}
}
}
DECLDIR long double squaresum(const float * const a, const unsigned long n){
long double sum=0;
long i;
#pragma omp parallel for reduction(+:sum) private(i)
for(i=0; i<n; i++){
sum+=a[i]*a[i];
}
return sum;
}
// 2D operations
DECLDIR void conv2d(const float * const inp, float * const out, const float * const f, const unsigned int nx, const unsigned int ny, const int * const shx, const int * const shy){
#pragma omp parallel
{
long double tmp;
const float *rl, *rc, *rr;
int i;
#pragma omp for
for(i=0; i<nx; i++){
rl = inp + (shx[2*i])*ny;
rc = inp + i*ny;
rr = inp + (shx[2*i+1])*ny;
for(unsigned int j=0; j<ny; j++){
tmp=0;
tmp += rl[shy[2*j]]*f[0];
tmp += rl[j]*f[1];
tmp += rl[shy[2*j+1]]*f[2];
tmp += rc[shy[2*j]]*f[3];
tmp += rc[j]*f[4];
tmp += rc[shy[2*j+1]]*f[5];
tmp += rr[shy[2*j]]*f[6];
tmp += rr[j]*f[7];
tmp += rr[shy[2*j+1]]*f[8];
out[i*ny+j] += tmp;
}
}
}
}
DECLDIR float gradientmap2d(const float * const inp, const float * const delta, const unsigned int nx, const unsigned int ny, const int * const shx, const int * const shy){
long double sum=0;
#pragma omp parallel
{
const float *ix, *dx;
int i;
#pragma omp for reduction(+:sum)
for(i=0; i<nx; i++){
ix = inp + shx[i]*ny;
dx = delta + i*ny;
for(unsigned int j=0; j<ny; j++){
sum += ix[shy[j]]*dx[j];
}
}
}
return (float)sum;
}
|
threshold.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% TTTTT H H RRRR EEEEE SSSSS H H OOO L DDDD %
% T H H R R E SS H H O O L D D %
% T HHHHH RRRR EEE SSS HHHHH O O L D D %
% T H H R R E SS H H O O L D D %
% T H H R R EEEEE SSSSS H H OOO LLLLL DDDD %
% %
% %
% MagickCore Image Threshold Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/property.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/configure.h"
#include "MagickCore/constitute.h"
#include "MagickCore/decorate.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/effect.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/montage.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/shear.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
#include "MagickCore/xml-tree.h"
#include "MagickCore/xml-tree-private.h"
/*
Define declarations.
*/
#define ThresholdsFilename "thresholds.xml"
/*
Typedef declarations.
*/
struct _ThresholdMap
{
char
*map_id,
*description;
size_t
width,
height;
ssize_t
divisor,
*levels;
};
/*
Static declarations.
*/
static const char
*MinimalThresholdMap =
"<?xml version=\"1.0\"?>"
"<thresholds>"
" <threshold map=\"threshold\" alias=\"1x1\">"
" <description>Threshold 1x1 (non-dither)</description>"
" <levels width=\"1\" height=\"1\" divisor=\"2\">"
" 1"
" </levels>"
" </threshold>"
" <threshold map=\"checks\" alias=\"2x1\">"
" <description>Checkerboard 2x1 (dither)</description>"
" <levels width=\"2\" height=\"2\" divisor=\"3\">"
" 1 2"
" 2 1"
" </levels>"
" </threshold>"
"</thresholds>";
/*
Forward declarations.
*/
static ThresholdMap
*GetThresholdMapFile(const char *,const char *,const char *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveThresholdImage() selects an individual threshold for each pixel
% based on the range of intensity values in its local neighborhood. This
% allows for thresholding of an image whose global intensity histogram
% doesn't contain distinctive peaks.
%
% The format of the AdaptiveThresholdImage method is:
%
% Image *AdaptiveThresholdImage(const Image *image,const size_t width,
% const size_t height,const double bias,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the width of the local neighborhood.
%
% o height: the height of the local neighborhood.
%
% o bias: the mean bias.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveThresholdImage(const Image *image,
const size_t width,const size_t height,const double bias,
ExceptionInfo *exception)
{
#define AdaptiveThresholdImageTag "AdaptiveThreshold/Image"
CacheView
*image_view,
*threshold_view;
Image
*threshold_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickSizeType
number_pixels;
ssize_t
y;
/*
Initialize threshold image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
threshold_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (threshold_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(threshold_image,DirectClass,exception);
if (status == MagickFalse)
{
threshold_image=DestroyImage(threshold_image);
return((Image *) NULL);
}
/*
Threshold image.
*/
status=MagickTrue;
progress=0;
number_pixels=(MagickSizeType) width*height;
image_view=AcquireVirtualCacheView(image,exception);
threshold_view=AcquireAuthenticCacheView(threshold_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,threshold_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
channel_bias[MaxPixelChannels],
channel_sum[MaxPixelChannels];
register const Quantum
*magick_restrict p,
*magick_restrict pixels;
register Quantum
*magick_restrict q;
register ssize_t
i,
x;
ssize_t
center,
u,
v;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t)
(height/2L),image->columns+width,height,exception);
q=QueueCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) GetPixelChannels(image)*(image->columns+width)*(height/2L)+
GetPixelChannels(image)*(width/2);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(threshold_traits == UndefinedPixelTrait))
continue;
if (((threshold_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p) == 0))
{
SetPixelChannel(threshold_image,channel,p[center+i],q);
continue;
}
pixels=p;
channel_bias[channel]=0.0;
channel_sum[channel]=0.0;
for (v=0; v < (ssize_t) height; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
if (u == (ssize_t) (width-1))
channel_bias[channel]+=pixels[i];
channel_sum[channel]+=pixels[i];
pixels+=GetPixelChannels(image);
}
pixels+=GetPixelChannels(image)*image->columns;
}
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
mean;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(threshold_traits == UndefinedPixelTrait))
continue;
if (((threshold_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p) == 0))
{
SetPixelChannel(threshold_image,channel,p[center+i],q);
continue;
}
channel_sum[channel]-=channel_bias[channel];
channel_bias[channel]=0.0;
pixels=p;
for (v=0; v < (ssize_t) height; v++)
{
channel_bias[channel]+=pixels[i];
pixels+=(width-1)*GetPixelChannels(image);
channel_sum[channel]+=pixels[i];
pixels+=GetPixelChannels(image)*(image->columns+1);
}
mean=(double) (channel_sum[channel]/number_pixels+bias);
SetPixelChannel(threshold_image,channel,(Quantum) ((double)
p[center+i] <= mean ? 0 : QuantumRange),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(threshold_image);
}
if (SyncCacheViewAuthenticPixels(threshold_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_AdaptiveThresholdImage)
#endif
proceed=SetImageProgress(image,AdaptiveThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
threshold_image->type=image->type;
threshold_view=DestroyCacheView(threshold_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
threshold_image=DestroyImage(threshold_image);
return(threshold_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B i l e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BilevelImage() changes the value of individual pixels based on the
% intensity of each pixel channel. The result is a high-contrast image.
%
% More precisely each channel value of the image is 'thresholded' so that if
% it is equal to or less than the given value it is set to zero, while any
% value greater than that give is set to it maximum or QuantumRange.
%
% This function is what is used to implement the "-threshold" operator for
% the command line API.
%
% If the default channel setting is given the image is thresholded using just
% the gray 'intensity' of the image, rather than the individual channels.
%
% The format of the BilevelImage method is:
%
% MagickBooleanType BilevelImage(Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: define the threshold values.
%
% o exception: return any errors or warnings in this structure.
%
% Aside: You can get the same results as operator using LevelImages()
% with the 'threshold' value for both the black_point and the white_point.
%
*/
MagickExport MagickBooleanType BilevelImage(Image *image,const double threshold,
ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
/*
Bilevel threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
register ssize_t
i;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
q[i]=(Quantum) (pixel <= threshold ? 0 : QuantumRange);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_BilevelImage)
#endif
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l a c k T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlackThresholdImage() is like ThresholdImage() but forces all pixels below
% the threshold into black while leaving all pixels at or above the threshold
% unchanged.
%
% The format of the BlackThresholdImage method is:
%
% MagickBooleanType BlackThresholdImage(Image *image,
% const char *threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: define the threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType BlackThresholdImage(Image *image,
const char *thresholds,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (thresholds == (const char *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
GetPixelInfo(image,&threshold);
flags=ParseGeometry(thresholds,&geometry_info);
threshold.red=geometry_info.rho;
threshold.green=geometry_info.rho;
threshold.blue=geometry_info.rho;
threshold.black=geometry_info.rho;
threshold.alpha=100.0;
if ((flags & SigmaValue) != 0)
threshold.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
threshold.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
threshold.alpha=geometry_info.psi;
if (threshold.colorspace == CMYKColorspace)
{
if ((flags & PsiValue) != 0)
threshold.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
threshold.alpha=geometry_info.chi;
}
if ((flags & PercentValue) != 0)
{
threshold.red*=(MagickRealType) (QuantumRange/100.0);
threshold.green*=(MagickRealType) (QuantumRange/100.0);
threshold.blue*=(MagickRealType) (QuantumRange/100.0);
threshold.black*=(MagickRealType) (QuantumRange/100.0);
threshold.alpha*=(MagickRealType) (QuantumRange/100.0);
}
/*
White threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
register ssize_t
i;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
if (pixel < GetPixelInfoChannel(&threshold,channel))
q[i]=(Quantum) 0;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_BlackThresholdImage)
#endif
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l a m p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClampImage() set each pixel whose value is below zero to zero and any the
% pixel whose value is above the quantum range to the quantum range (e.g.
% 65535) otherwise the pixel value remains unchanged.
%
% The format of the ClampImage method is:
%
% MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception)
{
#define ClampImageTag "Clamp/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
register PixelInfo
*magick_restrict q;
q=image->colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
q->red=(double) ClampPixel(q->red);
q->green=(double) ClampPixel(q->green);
q->blue=(double) ClampPixel(q->blue);
q->alpha=(double) ClampPixel(q->alpha);
q++;
}
return(SyncImage(image,exception));
}
/*
Clamp image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampPixel(q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ClampImage)
#endif
proceed=SetImageProgress(image,ClampImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y T h r e s h o l d M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyThresholdMap() de-allocate the given ThresholdMap
%
% The format of the ListThresholdMaps method is:
%
% ThresholdMap *DestroyThresholdMap(Threshold *map)
%
% A description of each parameter follows.
%
% o map: Pointer to the Threshold map to destroy
%
*/
MagickExport ThresholdMap *DestroyThresholdMap(ThresholdMap *map)
{
assert(map != (ThresholdMap *) NULL);
if (map->map_id != (char *) NULL)
map->map_id=DestroyString(map->map_id);
if (map->description != (char *) NULL)
map->description=DestroyString(map->description);
if (map->levels != (ssize_t *) NULL)
map->levels=(ssize_t *) RelinquishMagickMemory(map->levels);
map=(ThresholdMap *) RelinquishMagickMemory(map);
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t T h r e s h o l d M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetThresholdMap() loads and searches one or more threshold map files for the
% map matching the given name or alias.
%
% The format of the GetThresholdMap method is:
%
% ThresholdMap *GetThresholdMap(const char *map_id,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o map_id: ID of the map to look for.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ThresholdMap *GetThresholdMap(const char *map_id,
ExceptionInfo *exception)
{
ThresholdMap
*map;
map=GetThresholdMapFile(MinimalThresholdMap,"built-in",map_id,exception);
if (map != (ThresholdMap *) NULL)
return(map);
#if !defined(MAGICKCORE_ZERO_CONFIGURATION_SUPPORT)
{
const StringInfo
*option;
LinkedListInfo
*options;
options=GetConfigureOptions(ThresholdsFilename,exception);
option=(const StringInfo *) GetNextValueInLinkedList(options);
while (option != (const StringInfo *) NULL)
{
map=GetThresholdMapFile((const char *) GetStringInfoDatum(option),
GetStringInfoPath(option),map_id,exception);
if (map != (ThresholdMap *) NULL)
break;
option=(const StringInfo *) GetNextValueInLinkedList(options);
}
options=DestroyConfigureOptions(options);
}
#endif
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t T h r e s h o l d M a p F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetThresholdMapFile() look for a given threshold map name or alias in the
% given XML file data, and return the allocated the map when found.
%
% The format of the ListThresholdMaps method is:
%
% ThresholdMap *GetThresholdMap(const char *xml,const char *filename,
% const char *map_id,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o xml: The threshold map list in XML format.
%
% o filename: The threshold map XML filename.
%
% o map_id: ID of the map to look for in XML list.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ThresholdMap *GetThresholdMapFile(const char *xml,const char *filename,
const char *map_id,ExceptionInfo *exception)
{
char
*p;
const char
*attribute,
*content;
double
value;
register ssize_t
i;
ThresholdMap
*map;
XMLTreeInfo
*description,
*levels,
*threshold,
*thresholds;
(void) LogMagickEvent(ConfigureEvent,GetMagickModule(),
"Loading threshold map file \"%s\" ...",filename);
map=(ThresholdMap *) NULL;
thresholds=NewXMLTree(xml,exception);
if (thresholds == (XMLTreeInfo *) NULL)
return(map);
for (threshold=GetXMLTreeChild(thresholds,"threshold");
threshold != (XMLTreeInfo *) NULL;
threshold=GetNextXMLTreeTag(threshold))
{
attribute=GetXMLTreeAttribute(threshold,"map");
if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0))
break;
attribute=GetXMLTreeAttribute(threshold,"alias");
if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0))
break;
}
if (threshold == (XMLTreeInfo *) NULL)
{
thresholds=DestroyXMLTree(thresholds);
return(map);
}
description=GetXMLTreeChild(threshold,"description");
if (description == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<description>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
return(map);
}
levels=GetXMLTreeChild(threshold,"levels");
if (levels == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<levels>, map \"%s\"", map_id);
thresholds=DestroyXMLTree(thresholds);
return(map);
}
map=(ThresholdMap *) AcquireMagickMemory(sizeof(ThresholdMap));
if (map == (ThresholdMap *) NULL)
ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap");
map->map_id=(char *) NULL;
map->description=(char *) NULL;
map->levels=(ssize_t *) NULL;
attribute=GetXMLTreeAttribute(threshold,"map");
if (attribute != (char *) NULL)
map->map_id=ConstantString(attribute);
content=GetXMLTreeContent(description);
if (content != (char *) NULL)
map->description=ConstantString(content);
attribute=GetXMLTreeAttribute(levels,"width");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels width>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->width=StringToUnsignedLong(attribute);
if (map->width == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels width>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
attribute=GetXMLTreeAttribute(levels,"height");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels height>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->height=StringToUnsignedLong(attribute);
if (map->height == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels height>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
attribute=GetXMLTreeAttribute(levels,"divisor");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels divisor>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->divisor=(ssize_t) StringToLong(attribute);
if (map->divisor < 2)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels divisor>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
content=GetXMLTreeContent(levels);
if (content == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingContent", "<levels>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->levels=(ssize_t *) AcquireQuantumMemory((size_t) map->width,map->height*
sizeof(*map->levels));
if (map->levels == (ssize_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap");
for (i=0; i < (ssize_t) (map->width*map->height); i++)
{
map->levels[i]=(ssize_t) strtol(content,&p,10);
if (p == content)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> too few values, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
if ((map->levels[i] < 0) || (map->levels[i] > map->divisor))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> %.20g out of range, map \"%s\"",
(double) map->levels[i],map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
content=p;
}
value=(double) strtol(content,&p,10);
(void) value;
if (p != content)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> too many values, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
thresholds=DestroyXMLTree(thresholds);
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ L i s t T h r e s h o l d M a p F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ListThresholdMapFile() lists the threshold maps and their descriptions
% in the given XML file data.
%
% The format of the ListThresholdMaps method is:
%
% MagickBooleanType ListThresholdMaps(FILE *file,const char*xml,
% const char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o file: An pointer to the output FILE.
%
% o xml: The threshold map list in XML format.
%
% o filename: The threshold map XML filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickBooleanType ListThresholdMapFile(FILE *file,const char *xml,
const char *filename,ExceptionInfo *exception)
{
const char
*alias,
*content,
*map;
XMLTreeInfo
*description,
*threshold,
*thresholds;
assert( xml != (char *) NULL );
assert( file != (FILE *) NULL );
(void) LogMagickEvent(ConfigureEvent,GetMagickModule(),
"Loading threshold map file \"%s\" ...",filename);
thresholds=NewXMLTree(xml,exception);
if ( thresholds == (XMLTreeInfo *) NULL )
return(MagickFalse);
(void) FormatLocaleFile(file,"%-16s %-12s %s\n","Map","Alias","Description");
(void) FormatLocaleFile(file,
"----------------------------------------------------\n");
threshold=GetXMLTreeChild(thresholds,"threshold");
for ( ; threshold != (XMLTreeInfo *) NULL;
threshold=GetNextXMLTreeTag(threshold))
{
map=GetXMLTreeAttribute(threshold,"map");
if (map == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<map>");
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
alias=GetXMLTreeAttribute(threshold,"alias");
description=GetXMLTreeChild(threshold,"description");
if (description == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<description>, map \"%s\"",map);
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
content=GetXMLTreeContent(description);
if (content == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingContent", "<description>, map \"%s\"", map);
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
(void) FormatLocaleFile(file,"%-16s %-12s %s\n",map,alias ? alias : "",
content);
}
thresholds=DestroyXMLTree(thresholds);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i s t T h r e s h o l d M a p s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ListThresholdMaps() lists the threshold maps and their descriptions
% as defined by "threshold.xml" to a file.
%
% The format of the ListThresholdMaps method is:
%
% MagickBooleanType ListThresholdMaps(FILE *file,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o file: An pointer to the output FILE.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ListThresholdMaps(FILE *file,
ExceptionInfo *exception)
{
const StringInfo
*option;
LinkedListInfo
*options;
MagickStatusType
status;
status=MagickTrue;
if (file == (FILE *) NULL)
file=stdout;
options=GetConfigureOptions(ThresholdsFilename,exception);
(void) FormatLocaleFile(file,
"\n Threshold Maps for Ordered Dither Operations\n");
option=(const StringInfo *) GetNextValueInLinkedList(options);
while (option != (const StringInfo *) NULL)
{
(void) FormatLocaleFile(file,"\nPath: %s\n\n",GetStringInfoPath(option));
status&=ListThresholdMapFile(file,(const char *) GetStringInfoDatum(option),
GetStringInfoPath(option),exception);
option=(const StringInfo *) GetNextValueInLinkedList(options);
}
options=DestroyConfigureOptions(options);
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O r d e r e d D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OrderedDitherImage() will perform a ordered dither based on a number
% of pre-defined dithering threshold maps, but over multiple intensity
% levels, which can be different for different channels, according to the
% input argument.
%
% The format of the OrderedDitherImage method is:
%
% MagickBooleanType OrderedDitherImage(Image *image,
% const char *threshold_map,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold_map: A string containing the name of the threshold dither
% map to use, followed by zero or more numbers representing the number
% of color levels tho dither between.
%
% Any level number less than 2 will be equivalent to 2, and means only
% binary dithering will be applied to each color channel.
%
% No numbers also means a 2 level (bitmap) dither will be applied to all
% channels, while a single number is the number of levels applied to each
% channel in sequence. More numbers will be applied in turn to each of
% the color channels.
%
% For example: "o3x3,6" will generate a 6 level posterization of the
% image with a ordered 3x3 diffused pixel dither being applied between
% each level. While checker,8,8,4 will produce a 332 colormaped image
% with only a single checkerboard hash pattern (50% grey) between each
% color level, to basically double the number of color levels with
% a bare minimim of dithering.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType OrderedDitherImage(Image *image,
const char *threshold_map,ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
char
token[MagickPathExtent];
const char
*p;
double
levels[CompositePixelChannel];
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
ThresholdMap
*map;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (threshold_map == (const char *) NULL)
return(MagickTrue);
p=(char *) threshold_map;
while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) &&
(*p != '\0'))
p++;
threshold_map=p;
while (((isspace((int) ((unsigned char) *p)) == 0) && (*p != ',')) &&
(*p != '\0'))
{
if ((p-threshold_map) >= (MagickPathExtent-1))
break;
token[p-threshold_map]=(*p);
p++;
}
token[p-threshold_map]='\0';
map=GetThresholdMap(token,exception);
if (map == (ThresholdMap *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","ordered-dither",threshold_map);
return(MagickFalse);
}
for (i=0; i < MaxPixelChannels; i++)
levels[i]=2.0;
p=strchr((char *) threshold_map,',');
if ((p != (char *) NULL) && (isdigit((int) ((unsigned char) *(++p))) != 0))
{
GetNextToken(p,&p,MagickPathExtent,token);
for (i=0; (i < MaxPixelChannels); i++)
levels[i]=StringToDouble(token,(char **) NULL);
for (i=0; (*p != '\0') && (i < MaxPixelChannels); i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
levels[i]=StringToDouble(token,(char **) NULL);
}
}
for (i=0; i < MaxPixelChannels; i++)
if (fabs(levels[i]) >= 1)
levels[i]-=1.0;
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
ssize_t
n;
n=0;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
ssize_t
level,
threshold;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (fabs(levels[n]) < MagickEpsilon)
{
n++;
continue;
}
threshold=(ssize_t) (QuantumScale*q[i]*(levels[n]*(map->divisor-1)+1));
level=threshold/(map->divisor-1);
threshold-=level*(map->divisor-1);
q[i]=ClampToQuantum((double) (level+(threshold >=
map->levels[(x % map->width)+map->width*(y % map->height)]))*
QuantumRange/levels[n]);
n++;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_OrderedDitherImage)
#endif
proceed=SetImageProgress(image,DitherImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
map=DestroyThresholdMap(map);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P e r c e p t i b l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PerceptibleImage() set each pixel whose value is less than |epsilon| to
% epsilon or -epsilon (whichever is closer) otherwise the pixel value remains
% unchanged.
%
% The format of the PerceptibleImage method is:
%
% MagickBooleanType PerceptibleImage(Image *image,const double epsilon,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o epsilon: the epsilon threshold (e.g. 1.0e-9).
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum PerceptibleThreshold(const Quantum quantum,
const double epsilon)
{
double
sign;
sign=(double) quantum < 0.0 ? -1.0 : 1.0;
if ((sign*quantum) >= epsilon)
return(quantum);
return((Quantum) (sign*epsilon));
}
MagickExport MagickBooleanType PerceptibleImage(Image *image,
const double epsilon,ExceptionInfo *exception)
{
#define PerceptibleImageTag "Perceptible/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
register PixelInfo
*magick_restrict q;
q=image->colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
q->red=(double) PerceptibleThreshold(ClampToQuantum(q->red),
epsilon);
q->green=(double) PerceptibleThreshold(ClampToQuantum(q->green),
epsilon);
q->blue=(double) PerceptibleThreshold(ClampToQuantum(q->blue),
epsilon);
q->alpha=(double) PerceptibleThreshold(ClampToQuantum(q->alpha),
epsilon);
q++;
}
return(SyncImage(image,exception));
}
/*
Perceptible image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PerceptibleThreshold(q[i],epsilon);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_PerceptibleImage)
#endif
proceed=SetImageProgress(image,PerceptibleImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a n d o m T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RandomThresholdImage() changes the value of individual pixels based on the
% intensity of each pixel compared to a random threshold. The result is a
% low-contrast, two color image.
%
% The format of the RandomThresholdImage method is:
%
% MagickBooleanType RandomThresholdImage(Image *image,
% const char *thresholds,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o low,high: Specify the high and low thresholds. These values range from
% 0 to QuantumRange.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RandomThresholdImage(Image *image,
const double min_threshold, const double max_threshold,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
threshold;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
GetPixelInfo(image,&threshold);
/*
Random threshold image.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
threshold;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if ((double) q[i] < min_threshold)
threshold=min_threshold;
else
if ((double) q[i] > max_threshold)
threshold=max_threshold;
else
threshold=(double) (QuantumRange*
GetPseudoRandomValue(random_info[id]));
q[i]=(double) q[i] <= threshold ? 0 : QuantumRange;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RandomThresholdImage)
#endif
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W h i t e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WhiteThresholdImage() is like ThresholdImage() but forces all pixels above
% the threshold into white while leaving all pixels at or below the threshold
% unchanged.
%
% The format of the WhiteThresholdImage method is:
%
% MagickBooleanType WhiteThresholdImage(Image *image,
% const char *threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: Define the threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType WhiteThresholdImage(Image *image,
const char *thresholds,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (thresholds == (const char *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) TransformImageColorspace(image,sRGBColorspace,exception);
GetPixelInfo(image,&threshold);
flags=ParseGeometry(thresholds,&geometry_info);
threshold.red=geometry_info.rho;
threshold.green=geometry_info.rho;
threshold.blue=geometry_info.rho;
threshold.black=geometry_info.rho;
threshold.alpha=100.0;
if ((flags & SigmaValue) != 0)
threshold.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
threshold.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
threshold.alpha=geometry_info.psi;
if (threshold.colorspace == CMYKColorspace)
{
if ((flags & PsiValue) != 0)
threshold.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
threshold.alpha=geometry_info.chi;
}
if ((flags & PercentValue) != 0)
{
threshold.red*=(MagickRealType) (QuantumRange/100.0);
threshold.green*=(MagickRealType) (QuantumRange/100.0);
threshold.blue*=(MagickRealType) (QuantumRange/100.0);
threshold.black*=(MagickRealType) (QuantumRange/100.0);
threshold.alpha*=(MagickRealType) (QuantumRange/100.0);
}
/*
White threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
register ssize_t
i;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
if (pixel > GetPixelInfoChannel(&threshold,channel))
q[i]=QuantumRange;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_WhiteThresholdImage)
#endif
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
module_bl_mynn_bl_init_driver_impl.h |
#ifndef __MODULE_BL_MYNN_BL_INIT_DRIVER_IMPL_H__
#define __MODULE_BL_MYNN_BL_INIT_DRIVER_IMPL_H__
// File granularity version.
#ifndef MODULE_BL_MYNN_BL_INIT_DRIVER_IMPL_VERSION_MAJOR
#define MODULE_BL_MYNN_BL_INIT_DRIVER_IMPL_VERSION_MAJOR 1
#endif
#ifndef MODULE_BL_MYNN_BL_INIT_DRIVER_IMPL_VERSION_MINOR
#define MODULE_BL_MYNN_BL_INIT_DRIVER_IMPL_VERSION_MINOR 0
#endif
#ifndef MODULE_BL_MYNN_BL_INIT_DRIVER_IMPL_PATCH_VERSION
#define MODULE_BL_MYNN_BL_INIT_DRIVER_IMPL_PATCH_VERSION 0
#endif
#ifndef MODULE_BL_MYNN_BL_INIT_DRIVER_IMPL_CREATE_DATE
#define MODULE_BL_MYNN_BL_INIT_DRIVER_IMPL_CREATE_DATE "Date: 30-10-2016 , Time: 14:39 PM GMT+2"
#endif
// Set this value to successful build date/time.
#ifndef MODULE_BL_MYNN_BL_INIT_DRIVER_IMPL_BUILD_DATE
#define MODULE_BL_MYNN_BL_INIT_DRIVER_IMPL_BUILD_DATE ""
#endif
#ifndef MODULE_BL_MYNN_BL_INIT_DRIVER_IMPL_AUTHOR
#define MODULE_BL_MYNN_BL_INIT_DRIVER_IMPL_AUTHOR "Name: Bernard Gingold , e-mail: beniekg@gmail.com"
#endif
#include "module_bl_mynn_F90_iface.h"
#include "PhysLib_Config.h"
#include "std_headers.h"
namespace phys_lib_wrappers {
namespace module_bl_mynn {
template<typename R32 = float,
typename I32 = int > struct Wrap_Mynn_Bl_Init_Driver {
/*******************************************
Constructors and Destructor.
********************************************/
/*
@Purpose:
Default Constructor - explicitly default.
*/
Wrap_Mynn_Bl_Init_Driver() = default;
/*
@Purpose:
1st 'main' Constructor which purpose
is to allocate and initialize scalar
and array members. Array members are
zero-filled. Caller must later initialize
input arrays to correct physical state.
*/
Wrap_Mynn_Bl_Init_Driver(_In_ const I32 IDS,
_In_ const I32 IDE,
_In_ const I32 JDS,
_In_ const I32 JDE,
_In_ const I32 KDS,
_In_ const I32 KDE,
_In_ const I32 IMS,
_In_ const I32 IME,
_In_ const I32 JMS,
_In_ const I32 JME,
_In_ const I32 KMS,
_In_ const I32 KME,
_In_ const I32 ITS,
_In_ const I32 ITE,
_In_ const I32 JTS,
_In_ const I32 JTE,
_In_ const I32 KTS,
_In_ const I32 KTE,
_In_ const I32 ALLOWED_TO_READ,
_In_ const I32 RESTART,
_In_ const I32 LEVEL)
:
m_IDS{ IDS },
m_IDE{ IDE },
m_JDS{ JDS },
m_JDE{ JDE },
m_KDS{ KDS },
m_KDE{ KDE },
m_IMS{ IMS },
m_IME{ IME },
m_JMS{ JMS },
m_JME{ JME },
m_KMS{ KMS },
m_KME{ KME },
m_ITS{ ITS },
m_ITE{ ITE },
m_JTS{ JTS },
m_JTE{ JTE },
m_KTS{ KTS },
m_KTE{ KTE },
m_ALLOWED_TO_READ{ ALLOWED_TO_READ },
m_RESTART{ RESTART },
m_LEVEL{ LEVEL },
m_RUBLTEN{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) },
m_RVBLTEN{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) },
m_RTHBLTEN{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) },
m_RQVBLTEN{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) },
m_RQCBLTEN{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) },
m_RQIBLTEN{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) },
m_QKE{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) },
m_TKE_PBL{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) },
m_EXCH_H{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) } {
// Check for memory allocation errors i.e. (malloc failures).
for (int i{ 0 }; i != this->m_nArrays3D; ++i) {
if ((&this->m_RUBLTEN)[i] == NULL) {
std::cerr << "[" << __DATE__ << ":" << __TIME__ << "]" << "FATAL ERROR: Memory allocation failure in 1st Ctor: 'Wrap_Mynn_Bl_Init_Driver'!!\n";
std::cerr << "at " << __FILE__ << ":" << __LINE__ << "(" << std::hex << "0x" << __FUNCTIONW__ << ")" << "\n";
std::cerr << "***** ERROR-DETAILS ***** \n";
std::cerr << "Failure detected at index: " << i << " heap address: " << std::hex << "0x" << (&this->m_RUBLTEN)[i] << "\n";
std::cerr << "Cannot recover, hence on first failure occurrence --> calling exit(-1)!!\n";
std::exit(-1);
}
}
// Zero-initialize arrays.
// Using OpenMP and vectorization for
// innermost stride.
#if defined (USE_ICL_OPENMP) && \
OPENMP_CURR_VER >= 40
#pragma omp parallel for if((m_IME * m_KME * m_JME) >= (1 << 20))
for (int i = m_IMS; i != m_IME; ++i) {
for(int k = m_KMS; k != m_KME; ++k) {
#if defined (USE_AUTO_VECTORIZATION)
#pragma ivdep
#pragma simd
#pragma unroll(UNROLL_4X)
#endif
for (int j = m_JMS; j != m_JME; ++j) {
this->m_RUBLTEN[i + m_IME * k + m_KME * j] = 0.0;
this->m_RVBLTEN[i + m_IME * k + m_KME * j] = 0.0;
this->m_RTHBLTEN[i + m_IME * k + m_KME * j] = 0.0;
this->m_RQVBLTEN[i + m_IME * k + m_KME * j] = 0.0;
this->m_RQCBLTEN[i + m_IME * k + m_KME * j] = 0.0;
this->m_RQIBLTEN[i + m_IME * k + m_KME * j] = 0.0;
this->m_QKE[i + m_IME * k + m_KME * j] = 0.0;
this->m_TKE_PBL[i + m_IME * k + m_KME * j] = 0.0;
this->m_EXCH_H[i + m_IME * k + m_KME * j] = 0.0;
}
}
}
#else
// Use loop blocking.
// Warning: You must not #undef 'USE_LOOP_BLOCKING' macro!!
for (int i = m_IMS; i != m_IME; i += DEFAULT_BLOCK_SIZE) {
for (int k = m_KMS; k != m_KME; k += DEFAULT_BLOCK_SIZE) {
for (int j = m_JMS; j != m_JME; j += DEFAULT_BLOCK_SIZE) {
for (int ii = i; ii < DEFAULT_BLOCK_SIZE; ++ii) {
for (int kk = k; kk < DEFAULT_BLOCK_SIZE; ++kk) {
#if defined (USE_AUTO_VECTORIZATION)
#pragma ivdep
#pragma simd
#endif
for (int jj = j; jj < DEFAULT_BLOCK_SIZE; ++jj) {
this->m_RUBLTEN[ii + m_IME * kk + m_KME * jj] = 0.0;
this->m_RVBLTEN[ii + m_IME * kk + m_KME * jj] = 0.0;
this->m_RTHBLTEN[ii + m_IME * kk + m_KME * jj] = 0.0;
this->m_RQVBLTEN[ii + m_IME * kk + m_KME * jj] = 0.0;
this->m_RQCBLTEN[ii + m_IME * kk + m_KME * jj] = 0.0;
this->m_RQIBLTEN[ii + m_IME * kk + m_KME * jj] = 0.0;
this->m_QKE[ii + m_IME * kk + m_KME * jj] = 0.0;
this->m_TKE_PBL[ii + m_IME * kk + m_KME * jj] = 0.0;
this->m_EXCH_H[ii + m_IME * kk + m_KME * jj] = 0.0;
}
}
}
}
}
}
#endif
}
/*
@Purpose:
2nd 'main' Constructor which purpose
is to allocate and initialize scalar
and array members. Array output members are
zero-filled. Caller must pass initialized
input arrays to correct physical state.
*/
Wrap_Mynn_Bl_Init_Driver(_In_ const I32 IDS,
_In_ const I32 IDE,
_In_ const I32 JDS,
_In_ const I32 JDE,
_In_ const I32 KDS,
_In_ const I32 KDE,
_In_ const I32 IMS,
_In_ const I32 IME,
_In_ const I32 JMS,
_In_ const I32 JME,
_In_ const I32 KMS,
_In_ const I32 KME,
_In_ const I32 ITS,
_In_ const I32 ITE,
_In_ const I32 JTS,
_In_ const I32 JTE,
_In_ const I32 KTS,
_In_ const I32 KTE,
_In_ const I32 ALLOWED_TO_READ,
_In_ const I32 RESTART,
_In_ const I32 LEVEL,
_In_ R32* __restrict const RUBLTEN,
_In_ R32* __restrict const RVBLTEN,
_In_ R32* __restrict const RTHBLTEN,
_In_ R32* __restrict const RQVBLTEN,
_In_ R32* __restrict const RQCBLTEN,
_In_ R32* __restrict const RQIBLTEN,
_In_ R32* __restrict const QKE,
_In_ R32* __restrict const TKE_PBL,
_In_ R32* __restrict const EXCH_H)
:
m_IDS{ IDS },
m_IDE{ IDE },
m_JDS{ JDS },
m_JDE{ JDE },
m_KDS{ KDS },
m_KDE{ KDE },
m_IMS{ IMS },
m_IME{ IME },
m_JMS{ JMS },
m_JME{ JME },
m_KMS{ KMS },
m_KME{ KME },
m_ITS{ ITS },
m_ITE{ ITE },
m_JTS{ JTS },
m_JTE{ JTE },
m_KTS{ KTS },
m_KTE{ KTE },
m_ALLOWED_TO_READ{ ALLOWED_TO_READ },
m_RESTART{ RESTART },
m_LEVEL{ LEVEL },
m_RUBLTEN{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) },
m_RVBLTEN{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) },
m_RTHBLTEN{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) },
m_RQVBLTEN{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) },
m_RQCBLTEN{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) },
m_RQIBLTEN{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) },
m_QKE{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) },
m_TKE_PBL{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) },
m_EXCH_H{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) } {
// Check for occurrence of memory allocation errors i.e. (malloc failures).
for (int i{ 0 }; i != this->m_nArrays3D; ++i) {
if ((&this->m_RUBLTEN)[i] == NULL) {
std::cerr << "[" << __DATE__ << ":" << __TIME__ << "]" << "FATAL ERROR: Memory allocation failure in 2nd Ctor: 'Wrap_Mynn_Bl_Init_Driver'!!\n";
std::cerr << "at " << __FILE__ << ":" << __LINE__ << "(" << std::hex << "0x" << __FUNCTIONW__ << ")" << "\n";
std::cerr << "***** ERROR-DETAILS ***** \n";
std::cerr << "Failure detected at index: " << i << " heap address: " << std::hex << "0x" << (&this->m_RUBLTEN)[i] << "\n";
std::cerr << "Cannot recover, hence on first failure occurrence --> calling exit(-1)!!\n";
std::exit(-1);
}
}
// Check for null pointers in call input arrays.
if (RUBLTEN == NULL ||
RVBLTEN == NULL ||
RTHBLTEN == NULL ||
RQVBLTEN == NULL ||
RQCBLTEN == NULL ||
RQIBLTEN == NULL ||
QKE == NULL ||
TKE_PBL == NULL ||
EXCH_H == NULL ) {
std::cerr << "[" << __DATE__ << ":" << __TIME__ << "]" << "FATAL ERROR: Memory allocation failure in 2nd Ctor: 'Wrap_Mynn_Bl_Init_Driver'!!\n";
std::cerr << "at " << __FILE__ << ":" << __LINE__ << "(" << std::hex << "0x" << __FUNCTIONW__ << ")" << "\n";
std::cerr << "***** ERROR-DETAILS ***** \n";
std::cerr << "One or more caller's arrays contains invalid pointer!!\n";
std::cerr << "Cannot recover, hence on first failure occurrence --> calling exit(-1)!!\n";
std::exit(-1);
}
// Copy caller's input arrays.
// Using OpenMP for outermost loop
// and vectorization for innermost loop.
#if defined (USE_ICL_OPENMP) && \
OPENMP_CURR_VER >= 40
#pragma omp parallel for if((m_IME * m_KME * m_JME) >= (1 << 20))
for (int i = m_IMS; i != m_IME; ++i) {
for(int k = m_KMS; k != m_KME; ++k) {
#if defined (USE_AUTO_VECTORIZATION)
#pragma ivdep
#pragma simd
#pragma unroll(UNROLL_4X)
#endif
for(int j = m_JMS; j != m_JME; ++j) {
this->m_RUBLTEN[i + m_IME * k + m_KME * j] = RUBLTEN[i + m_IME * k + m_KME * j];
this->m_RVBLTEN[i + m_IME * k + m_KME * j] = RVBLTEN[i + m_IME * k + m_KME * j];
this->m_RTHBLTEN[i + m_IME * k + m_KME * j] = RTHBLTEN[i + m_IME * k + m_KME * j];
this->m_RQVBLTEN[i + m_IME * k + m_KME * j] = RQVBLTEN[i + m_IME * k + m_KME * j];
this->m_RQCBLTEN[i + m_IME * k + m_KME * j] = RQCBLTEN[i + m_IME * k + m_KME * j];
this->m_RQIBLTEN[i + m_IME * k + m_KME * j] = RQIBLTEN[i + m_IME * k + m_KME * j];
this->m_QKE[i + m_IME * k + m_KME * j] = QKE[i + m_IME * k + m_KME * j];
this->m_TKE_PBL[i + m_IME * k + m_KME * j] = TKE_PBL[i + m_IME * k + m_KME * j];
this->m_EXCH_H[i + m_IME * k + m_KME * j] = EXCH_H[i + m_IME * k + m_KME * j];
}
}
}
#else
// Use loop blocking.
// Warning: You must not #undef 'USE_LOOP_BLOCKING' macro!!
for (int i = m_IMS; i != m_IME; i += DEFAULT_BLOCK_SIZE) {
for (int k = m_KMS; k != m_KME; k += DEFAULT_BLOCK_SIZE) {
for (int j = m_JMS; j != m_JME; j += DEFAULT_BLOCK_SIZE) {
for (int ii = i; ii < DEFAULT_BLOCK_SIZE; ++ii) {
for (int kk = k; kk < DEFAULT_BLOCK_SIZE; ++kk) {
#if defined (USE_AUTO_VECTORIZATION)
#pragma ivdep
#pragma simd
#endif
for (int jj = j; jj < DEFAULT_BLOCK_SIZE; ++jj) {
this->m_RUBLTEN[ii + m_IME * kk + m_KME * jj] = RUBLTEN[ii + m_IME * kk + m_KME * jj];
this->m_RVBLTEN[ii + m_IME * kk + m_KME * jj] = RVBLTEN[ii + m_IME * kk + m_KME * jj];
this->m_RTHBLTEN[ii + m_IME * kk + m_KME * jj] = RTHBLTEN[ii + m_IME * kk + m_KME * jj];
this->m_RQVBLTEN[ii + m_IME * kk + m_KME * jj] = RQVBLTEN[ii + m_IME * kk + m_KME * jj];
this->m_RQCBLTEN[ii + m_IME * kk + m_KME * jj] = RQCBLTEN[ii + m_IME * kk + m_KME * jj];
this->m_RQIBLTEN[ii + m_IME * kk + m_KME * jj] = RQIBLTEN[ii + m_IME * kk + m_KME * jj];
this->m_QKE[ii + m_IME * kk + m_KME * jj] = QKE[ii + m_IME * kk + m_KME * jj];
this->m_TKE_PBL[ii + m_IME * kk + m_KME * jj] = TKE_PBL[ii + m_IME * kk + m_KME * jj];
this->m_EXCH_H[ii + m_IME * kk + m_KME * jj] = EXCH_H[ii + m_IME * kk + m_KME * jj];
}
}
}
}
}
}
#endif
}
/*
@Purpose:
Copy Constructor implements deep copy semantics.
*/
Wrap_Mynn_Bl_Init_Driver(_In_ const Wrap_Mynn_Bl_Init_Driver &x)
:
m_IDS{ x.m_IDS },
m_IDE{ x.m_IDE },
m_JDS{ x.m_JDS },
m_JDE{ x.m_JDE },
m_KDS{ x.m_KDS },
m_KDE{ x.m_KDE },
m_IMS{ x.m_IMS },
m_IME{ x.m_IME },
m_JMS{ x.m_JMS },
m_JME{ x.m_JME },
m_KMS{ x.m_KMS },
m_KME{ x.m_KME },
m_ITS{ x.m_ITS },
m_ITE{ x.m_ITE },
m_JTS{ x.m_JTS },
m_JTE{ x.m_JTE },
m_KTS{ x.m_KTS },
m_KTE{ x.m_KTE },
m_ALLOWED_TO_READ{ x.m_ALLOWED_TO_READ },
m_RESTART{ x.m_RESTART },
m_LEVEL{ x.m_LEVEL },
m_RUBLTEN{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) },
m_RVBLTEN{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) },
m_RTHBLTEN{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) },
m_RQVBLTEN{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) },
m_RQCBLTEN{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) },
m_RQIBLTEN{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) },
m_QKE{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) },
m_TKE_PBL{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) },
m_EXCH_H{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) } {
// Check for memory allocation errors i.e. (malloc failures).
for (int i{ 0 }; i != this->m_nArrays3D; ++i) {
if ((&this->m_RUBLTEN)[i] == NULL) {
std::cerr << "[" << __DATE__ << ":" << __TIME__ << "]" << "FATAL ERROR: Memory allocation failure in Copy Ctor: 'Wrap_Mynn_Bl_Init_Driver'!!\n";
std::cerr << "at " << __FILE__ << ":" << __LINE__ << "(" << std::hex << "0x" << __FUNCTIONW__ << ")" << "\n";
std::cerr << "***** ERROR-DETAILS ***** \n";
std::cerr << "Failure detected at index: " << i << " heap address: " << std::hex << "0x" << (&this->m_RUBLTEN)[i] << "\n";
std::cerr << "Cannot recover, hence on first failure occurrence --> calling exit(-1)!!\n";
std::exit(-1);
}
}
// Copy caller's input arrays.
// Using OpenMP for outermost loop
// and vectorization for innermost loop.
#if defined (USE_ICL_OPENMP) && \
OPENMP_CURR_VER >= 40
#pragma omp parallel for if((m_IME * m_KME * m_JME) >= (1 << 20))
for (int i = m_IMS; i != m_IME; ++i) {
for (int k = m_KMS; k != m_KME; ++k) {
#if defined (USE_AUTO_VECTORIZATION)
#pragma ivdep
#pragma simd
#pragma unroll(UNROLL_4X)
#endif
for(int j = m_JMS; j != m_JME; ++j) {
this->m_RUBLTEN[i + m_IME * k + m_KME * j] = x.m_RUBLTEN[i + x.m_IME * k + x.m_KME * j];
this->m_RVBLTEN[i + m_IME * k + m_KME * j] = x.m_RVBLTEN[i + x.m_IME * k + x.m_KME * j];
this->m_RTHBLTEN[i + m_IME * k + m_KME * j] = x.m_RTHBLTEN[i + x.m_IME * k + x.m_KME * j];
this->m_RQVBLTEN[i + m_IME * k + m_KME * j] = x.m_RQVBLTEN[i + x.m_IME * k + x.m_KME * j];
this->m_RQCBLTEN[i + m_IME * k + m_KME * j] = x.m_RQCBLTEN[i + x.m_IME * k + x.m_KME * j];
this->m_RQIBLTEN[i + m_IME * k + m_KME * j] = x.m_RQIBLTEN[i + x.m_IME * k + x.m_KME * j];
this->m_QKE[i + m_IME * k + m_KME * j] = x.m_QKE[i + x.m_IME * k + x.m_KME * j];
this->m_TKE_PBL[i + m_IME * k + m_KME * j] = x.m_TKE_PBL[i + x.m_IME * k + x.m_KME * j];
this->m_EXCH_H[i + m_IME * k + m_KME * j] = x.m_EXCH_H[i + x.m_IME * k + x.m_KME * j];
}
}
}
#else
// Use loop blocking.
// Warning: You must not #undef 'USE_LOOP_BLOCKING' macro!!
for (int i = m_IMS; i != m_IME; i += DEFAULT_BLOCK_SIZE) {
for (int k = m_KMS; k != m_KME; k += DEFAULT_BLOCK_SIZE) {
for (int j = m_JMS; j != m_JME; j += DEFAULT_BLOCK_SIZE) {
for (int ii = i; ii < DEFAULT_BLOCK_SIZE; ++ii) {
for (int kk = k; kk < DEFAULT_BLOCK_SIZE; ++kk) {
#if defined (USE_AUTO_VECTORIZATION)
#pragma ivdep
#pragma simd
#endif
for (int jj = j; jj < DEFAULT_BLOCK_SIZE; ++jj) {
this->m_RUBLTEN[ii + m_IME * kk + m_KME * jj] = x.m_RUBLTEN[ii + x.m_IME * kk + x.m_KME * jj];
this->m_RVBLTEN[ii + m_IME * kk + m_KME * jj] = x.m_RVBLTEN[ii + x.m_IME * kk + x.m_KME * jj];
this->m_RTHBLTEN[ii + m_IME * kk + m_KME * jj] = x.m_RTHBLTEN[ii + x.m_IME * kk + x.m_KME * jj];
this->m_RQVBLTEN[ii + m_IME * kk + m_KME * jj] = x.m_RQVBLTEN[ii + x.m_IME * kk + x.m_KME * jj];
this->m_RQCBLTEN[ii + m_IME * kk + m_KME * jj] = x.m_RQCBLTEN[ii + x.m_IME * kk + x.m_KME * jj];
this->m_RQIBLTEN[ii + m_IME * kk + m_KME * jj] = x.m_RQIBLTEN[ii + x.m_IME * kk + x.m_KME * jj];
this->m_QKE[ii + m_IME * kk + m_KME * jj] = x.m_QKE[ii + x.m_IME * kk + x.m_KME * jj];
this->m_TKE_PBL[ii + m_IME * kk + m_KME * jj] = x.m_TKE_PBL[ii + x.m_IME * kk + x.m_KME * jj];
this->m_EXCH_H[ii + m_IME * kk + m_KME * jj] = x.m_EXCH_H[ii + x.m_IME * kk + x.m_KME * jj];
}
}
}
}
}
}
#endif
}
/*
@Purpose:
Move Constructor implements shallow copy semantics.
*/
Wrap_Mynn_Bl_Init_Driver(_In_ Wrap_Mynn_Bl_Init_Driver &&x)
:
m_IDS{ x.m_IDS },
m_IDE{ x.m_IDE },
m_JDS{ x.m_JDS },
m_JDE{ x.m_JDE },
m_KDS{ x.m_KDS },
m_KDE{ x.m_KDE },
m_IMS{ x.m_IMS },
m_IME{ x.m_IME },
m_JMS{ x.m_JMS },
m_JME{ x.m_JME },
m_KMS{ x.m_KMS },
m_KME{ x.m_KME },
m_ITS{ x.m_ITS },
m_ITE{ x.m_ITE },
m_JTS{ x.m_JTS },
m_JTE{ x.m_JTE },
m_KTS{ x.m_KTS },
m_KTE{ x.m_KTE },
m_ALLOWED_TO_READ{ x.m_ALLOWED_TO_READ },
m_RESTART{ x.m_RESTART },
m_LEVEL{ x.m_LEVEL } {
// Reassign x's pointers to *this.
for (int i{ 0 }; i != this->m_nArrays3D; ++i) {
if ((&x.m_RUBLTEN)[i]) {
(&this->m_RUBLTEN)[i] = (&x.m_RUBLTEN)[i];
}
}
// Nullify x's pointers.
for (int i{ 0 }; i != this->m_nArrays3D; ++i) {
(&x.m_RUBLTEN)[i] = NULL;
}
x.m_IMS = 0;
x.m_IME = 0;
x.m_KMS = 0;
x.m_KME = 0;
x.m_JMS = 0;
x.m_JME = 0;
}
/*
@Purpose:
Class Destructor.
*/
~Wrap_Mynn_Bl_Init_Driver() {
for (int i{ 0 }; i != this->m_nArrays3D; ++i) {
if ((&this->m_RUBLTEN)[i]) {
_mm_free((&this->m_RUBLTEN)[i]);
}
}
for (int i{ 0 }; i != this->m_nArrays3D; ++i) {
(&this->m_RUBLTEN)[i] = NULL;
}
this->m_IMS = 0;
this->m_IME = 0;
this->m_JMS = 0;
this->m_JME = 0;
this->m_KMS = 0;
this->m_KME = 0;
}
/*
@Purpose:
Copy-assign Operator implements deep copy semantics.
*/
Wrap_Mynn_Bl_Init_Driver & operator=(_In_ const Wrap_Mynn_Bl_Init_Driver &x) {
if (this == &x) return (*this);
this->m_IDS = x.m_IDS;
this->m_IDE = x.m_IDE;
this->m_JDS = x.m_JDS;
this->m_JDE = x.m_JDE;
this->m_KDS = x.m_KDS;
this->m_KDE = x.m_KDE;
this->m_IMS = x.m_IMS;
this->m_IME = x.m_IME;
this->m_JMS = x.m_JMS;
this->m_JME = x.m_JME;
this->m_KMS = x.m_KMS;
this->m_KME = x.m_KME;
this->m_ITS = x.m_ITS;
this->m_ITE = x.m_ITE;
this->m_JTS = x.m_JTS;
this->m_JTE = x.m_JTE;
this->m_KTS = x.m_KTS;
this->m_KTE = x.m_KTE;
this->m_ALLOWED_TO_READ = x.m_ALLOWED_TO_READ;
this->m_RESTART = x.m_RESTART;
this->m_LEVEL = x.m_LEVEL;
constexpr int ntPtrs3D = 9;
R32 *tPtrs3D[ntPtrs3D] = {};
for (int i{ 0 }; i != ntPtrs3D; ++i) {
tPtrs3D[i] = reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)),align32B));
}
// Check for memory allocation errors.
for (int i{ 0 }; i != m_nArrays3D; ++i) {
if (tPtrs3D[i] == NULL) {
std::cerr << "[" << __DATE__ << ":" << __TIME__ << "]" << "FATAL ERROR: Memory allocation failure in Copy Operator: 'Wrap_Mynn_Bl_Init_Driver'!!\n";
std::cerr << "at " << __FILE__ << ":" << __LINE__ << "(" << std::hex << "0x" << __FUNCTIONW__ << ")" << "\n";
std::cerr << "***** ERROR-DETAILS ***** \n";
std::cerr << "Checking allocation of temporary arrays 2D\.n";
std::cerr << "Failure detected at index: " << i << " heap address: " << std::hex << "0x" << tPtrs3D[i] << "\n";
std::cerr << "Cannot recover, hence on first failure occurrence --> calling exit(-1)!!\n";
}
}
// Copy caller's input arrays.
// Using OpenMP for outermost loop
// and vectorization for innermost loop.
#if defined (USE_ICL_OPENMP) && \
OPENMP_CURR_VER >= 40
#pragma omp parallel for if((m_IME * m_KME * m_JME) >= (1 << 20))
for (int i = m_IMS; i != m_IME; ++i) {
for(int k = m_KMS; k != m_KME; ++k) {
#if defined (USE_AUTO_VECTORIZATION)
#pragma ivdep
#pragma simd
#pragma unroll(UNROLL_4X)
#endif
for (int j = m_JMS; j != m_JME; ++j) {
tPtrs3D[0][i + m_IME * k + m_KME * j] = x.m_RUBLTEN[i + m_IME * k + m_KME * j];
tPtrs3D[1][i + m_IME * k + m_KME * j] = x.m_RVBLTEN[i + m_IME * k + m_KME * j];
tPtrs3D[2][i + m_IME * k + m_KME * j] = x.m_RTHBLTEN[i + m_IME * k + m_KME * j];
tPtrs3D[3][i + m_IME * k + m_KME * j] = x.m_RQVBLTEN[i + m_IME * k + m_KME * j];
tPtrs3D[4][i + m_IME * k + m_KME * j] = x.m_RQCBLTEN[i + m_IME * k + m_KME * j];
tPtrs3D[5][i + m_IME * k + m_KME * j] = x.m_RQIBLTEN[i + m_IME * k + m_KME * j];
tPtrs3D[6][i + m_IME * k + m_KME * j] = x.m_QKE[i + m_IME * k + m_KME * j];
tPtrs3D[7][i + m_IME * k + m_KME * j] = x.m_TKE_PBL[i + m_IME * k + m_KME * j];
tPtrs3D[8][i + m_IME * k + m_KME * j] = x.m_EXCH_H[i + m_IME * k + m_KME * j];
}
}
}
// Deallocate current context of *this.
for(int i{0}; i != this->m_nArrays3D; ++i) {
_mm_free((&this->m_RUBLTEN)[i]);
}
// Reassign temporay pointers to member pointers.
this->m_RUBLTEN = tPtrs3D[0];
this->m_RVBLTEN = tPtrs3D[1];
this->m_RTHBLTEN = tPtrs3D[2];
this->m_RQVBLTEN = tPtrs3D[3];
this->m_RQCBLTEN = tPtrs3D[4];
this->m_RQIBLTEN = tPtrs3D[5];
this->m_QKE = tPtrs3D[6];
this->m_TKE_PBL = tPtrs3D[7];
this->m_EXCH_H = tPtrs3D[8];
return (*this);
#else
// Use loop blocking.
// Warning: You must not #undef 'USE_LOOP_BLOCKING' macro!!
for (int i = m_IMS; i != m_IME; i += DEFAULT_BLOCK_SIZE) {
for (int k = m_KMS; k != m_KME; k += DEFAULT_BLOCK_SIZE) {
for (int j = m_JMS; j != m_JME; j += DEFAULT_BLOCK_SIZE) {
for (int ii = i; ii < DEFAULT_BLOCK_SIZE; ++ii) {
for (int kk = k; kk < DEFAULT_BLOCK_SIZE; ++kk) {
#if defined (USE_AUTO_VECTORIZATION)
#pragma ivdep
#pragma simd
#endif
for (int jj = j; jj < DEFAULT_BLOCK_SIZE; ++jj) {
tPtrs3D[0][ii + m_IME * kk + m_KME * jj] = x.m_RUBLTEN[ii + m_IME * kk + m_KME * jj];
tPtrs3D[1][ii + m_IME * kk + m_KME * jj] = x.m_RVBLTEN[ii + m_IME * kk + m_KME * jj];
tPtrs3D[2][ii + m_IME * kk + m_KME * jj] = x.m_RTHBLTEN[ii + m_IME * kk + m_KME * jj];
tPtrs3D[3][ii + m_IME * kk + m_KME * jj] = x.m_RQVBLTEN[ii + m_IME * kk + m_KME * jj];
tPtrs3D[4][ii + m_IME * kk + m_KME * jj] = x.m_RQCBLTEN[ii + m_IME * kk + m_KME * jj];
tPtrs3D[5][ii + m_IME * kk + m_KME * jj] = x.m_RQIBLTEN[ii + m_IME * kk + m_KME * jj];
tPtrs3D[6][ii + m_IME * kk + m_KME * jj] = x.m_QKE[ii + m_IME * kk + m_KME * jj];
tPtrs3D[7][ii + m_IME * kk + m_KME * jj] = x.m_TKE_PBL[ii + m_IME * kk + m_KME * jj];
tPtrs3D[8][ii + m_IME * kk + m_KME * jj] = x.m_EXCH_H[ii + m_IME * kk + m_KME * jj];
}
}
}
}
}
}
// Deallocate current context of *this.
for (int i{ 0 }; i != this->m_nArrays3D; ++i) {
_mm_free((&this->m_RUBLTEN)[i]);
}
// Reassign temporay pointers to member pointers.
this->m_RUBLTEN = tPtrs3D[0];
this->m_RVBLTEN = tPtrs3D[1];
this->m_RTHBLTEN = tPtrs3D[2];
this->m_RQVBLTEN = tPtrs3D[3];
this->m_RQCBLTEN = tPtrs3D[4];
this->m_RQIBLTEN = tPtrs3D[5];
this->m_QKE = tPtrs3D[6];
this->m_TKE_PBL = tPtrs3D[7];
this->m_EXCH_H = tPtrs3D[8];
return (*this);
#endif
}
/*
@Purpose:
Move-assign Operator implements shallow copy semantics.
*/
Wrap_Mynn_Bl_Init_Driver & operator=(_In_ Wrap_Mynn_Bl_Init_Driver &&x) {
if (this == &x) return (*this);
this->m_IDS = x.m_IDS;
this->m_IDE = x.m_IDE;
this->m_JDS = x.m_JDS;
this->m_JDE = x.m_JDE;
this->m_KDS = x.m_KDS;
this->m_KDE = x.m_KDE;
this->m_IMS = x.m_IMS;
this->m_IME = x.m_IME;
this->m_JMS = x.m_JMS;
this->m_JME = x.m_JME;
this->m_KMS = x.m_KMS;
this->m_KME = x.m_KME;
this->m_ITS = x.m_ITS;
this->m_ITE = x.m_ITE;
this->m_JTS = x.m_JTS;
this->m_JTE = x.m_JTE;
this->m_KTS = x.m_KTS;
this->m_KTE = x.m_KTE;
this->m_ALLOWED_TO_READ = x.m_ALLOWED_TO_READ;
this->m_RESTART = x.m_RESTART;
this->m_LEVEL = x.m_LEVEL;
// Deallocate current context.
for (int i{ 0 }; i != this->m_nArrays3D; ++i) {
if ((&this->m_RUBLTEN)[i]) {
_mm_free((&this->m_RUBLTEN)[i]);
}
}
// Reassign x's pointers to *this's pointers.
for (int i{ 0 }; i != this->m_nArrays3D; ++i) {
(&this->m_RUBLTEN)[i] = (x.m_RUBLTEN)[i];
}
// Nullify x's pointers.
for (int i{ 0 }; i != x.m_nArrays3D; ++i) {
(x.m_RUBLTEN)[i] = NULL;
}
x.m_IMS = 0;
x.m_IME = 0;
x.m_KMS = 0;
x.m_KME = 0;
x.m_JMS = 0;
x.m_JME = 0;
return (*this);
}
/*
@Purpose:
Call Fortran 90 'MYNN_BL_INIT_DRIVER' subroutine.
*/
void Call_Mynn_Bl_Init_Driver() {
MODULE_BL_MYNN_mp_MYNN_BL_INIT_DRIVER(&this->m_RUBLTEN[0], &this->m_RUBLTEN[0], &this->m_RTHBLTEN[0],
&this->m_RQVBLTEN[0], &this->m_RQCBLTEN[0], &this->m_RQIBLTEN[0],
&this->m_QKE[0], &this->m_TKE_PBL[0], &this->m_EXCH_H[0],
&this->m_RESTART, &this->m_ALLOWED_TO_READ, &this->m_LEVEL,
&this->m_IDS, &this->m_IDE, &this->m_JDS, &this->m_JDE, &this->m_KDS, &this->m_KDE,
&this->m_IMS, &this->m_IME, &this->m_JMS, &this->m_JME, &this->m_KMS, &this->m_KME,
&this->m_ITS, &this->m_ITE, &this->m_JTS, &this->m_JTE, &this->m_KTS, &this->m_KTE);
}
/*
@Purpose:
Member variables.
*/
// Memory and patch dimension variables.
I32 m_IDS;
I32 m_IDE;
I32 m_JDS;
I32 m_JDE;
I32 m_KDS;
I32 m_KDE;
I32 m_IMS;
I32 m_IME;
I32 m_JMS;
I32 m_JME;
I32 m_KMS;
I32 m_KME;
I32 m_ITS;
I32 m_ITE;
I32 m_JTS;
I32 m_JTE;
I32 m_KTS;
I32 m_KTE;
I32 m_ALLOWED_TO_READ;
I32 m_RESTART;
I32 m_LEVEL;
// Array variables.
_Field_size_(m_IME * m_KME * m_JME) R32* __restrict m_RUBLTEN;
_Field_size_(m_IME * m_KME * m_JME) R32* __restrict m_RVBLTEN;
_Field_size_(m_IME * m_KME * m_JME) R32* __restrict m_RTHBLTEN;
_Field_size_(m_IME * m_KME * m_JME) R32* __restrict m_RQVBLTEN;
_Field_size_(m_IME * m_KME * m_JME) R32* __restrict m_RQCBLTEN;
_Field_size_(m_IME * m_KME * m_JME) R32* __restrict m_RQIBLTEN;
_Field_size_(m_IME * m_KME * m_JME) R32* __restrict m_QKE;
_Field_size_(m_IME * m_KME * m_JME) R32* __restrict m_TKE_PBL;
_Field_size_(m_IME * m_KME * m_JME) R32* __restrict m_EXCH_H;
static const int m_nArrays3D = 9;
};
}
}
#endif /*__MODULE_BL_MYNN_BL_INIT_DRIVER_IMPL_H__*/ |
12_soma_par1.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
void inicializa(unsigned long **v, int size) {
(*v) = (unsigned long *) malloc(sizeof(unsigned long)*size);
for (int i = 0; i < size; i++) {
(*v)[i] = 1;
}
}
int main(int argc, char **argv) {
unsigned long *vetor;
int size = 1000000;
inicializa(&vetor, size);
unsigned long acc = 0;
unsigned long *local_acc;
#pragma omp parallel
{
#pragma omp single
{
local_acc = (unsigned long *) malloc(sizeof(unsigned long) * omp_get_num_threads());
}
// divisão do trabalho
int local_init, local_end, chunk;
chunk = size / omp_get_num_threads();
local_init = omp_get_thread_num() * chunk;
local_end = (omp_get_thread_num()+1) * chunk;
if ((omp_get_num_threads()-1) == omp_get_thread_num()) local_end = size;
local_acc[omp_get_thread_num()] = 0;
// soma sem race condition, cada thread em sua posição de memória
for (int i = local_init; i < local_end; i++) {
local_acc[omp_get_thread_num()] += vetor[i];
}
// sincronização, garantia de que todas as threads terminaram suas tarefas
#pragma omp barrier
// redução a um só valor
#pragma omp single
{
for (int i = 0; i < omp_get_num_threads(); i++) {
acc += local_acc[i];
}
}
}
printf("Resultado: %lu\n",acc);
return 0;
}
|
GB_unop__ainv_int8_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__ainv_int8_int8
// op(A') function: GB_unop_tran__ainv_int8_int8
// C type: int8_t
// A type: int8_t
// cast: int8_t cij = aij
// unaryop: cij = -aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CAST(z, aij) \
int8_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = aij ; \
Cx [pC] = -z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__ainv_int8_int8
(
int8_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = -z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__ainv_int8_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
critical.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
main(int argc, char **argv){
int i, n=20, a[n],suma=0,sumalocal;
if(argc<2){
fprintf(stderr,"\nFalta iteraciones\n"); exit(-1);
}
n=atoi(argv[1]); if (n>20) n=20;
for(i=0;i<n;i++) a[i]=i;
#pragma omp parallel private(sumalocal)
{sumalocal=0;
#pragma omp for schedule(static)
for(i=0;i<n;i++)
{
sumalocal+=a[i];
printf("thread %d suma de a[%d]=%d sumalocal=%d\n", omp_get_thread_num(),i,a[i],sumalocal);
}
#pragma omp critical
suma = suma + sumalocal;
}
printf("Fuera de 'parallel' suma=%d\n",suma); return(0);
}
|
decorate.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD EEEEE CCCC OOO RRRR AAA TTTTT EEEEE %
% D D E C O O R R A A T E %
% D D EEE C O O RRRR AAAAA T EEE %
% D D E C O O R R A A T E %
% DDDD EEEEE CCCC OOO R R A A T EEEEE %
% %
% %
% MagickCore Image Decoration Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/decorate.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
/*
Define declarations.
*/
#define AccentuateModulate ScaleCharToQuantum(80)
#define HighlightModulate ScaleCharToQuantum(125)
#define ShadowModulate ScaleCharToQuantum(135)
#define DepthModulate ScaleCharToQuantum(185)
#define TroughModulate ScaleCharToQuantum(110)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B o r d e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BorderImage() surrounds the image with a border of the color defined by
% the bordercolor member of the image structure. The width and height
% of the border are defined by the corresponding members of the border_info
% structure.
%
% The format of the BorderImage method is:
%
% Image *BorderImage(const Image *image,const RectangleInfo *border_info,
% const CompositeOperator compose,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o border_info: define the width and height of the border.
%
% o compose: the composite operator.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BorderImage(const Image *image,
const RectangleInfo *border_info,const CompositeOperator compose,
ExceptionInfo *exception)
{
Image
*border_image,
*clone_image;
FrameInfo
frame_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(border_info != (RectangleInfo *) NULL);
frame_info.width=image->columns+(border_info->width << 1);
frame_info.height=image->rows+(border_info->height << 1);
frame_info.x=(ssize_t) border_info->width;
frame_info.y=(ssize_t) border_info->height;
frame_info.inner_bevel=0;
frame_info.outer_bevel=0;
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
clone_image->alpha_color=image->border_color;
border_image=FrameImage(clone_image,&frame_info,compose,exception);
clone_image=DestroyImage(clone_image);
if (border_image != (Image *) NULL)
border_image->alpha_color=image->alpha_color;
return(border_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F r a m e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FrameImage() adds a simulated three-dimensional border around the image.
% The color of the border is defined by the alpha_color member of image.
% Members width and height of frame_info specify the border width of the
% vertical and horizontal sides of the frame. Members inner and outer
% indicate the width of the inner and outer shadows of the frame.
%
% The format of the FrameImage method is:
%
% Image *FrameImage(const Image *image,const FrameInfo *frame_info,
% const CompositeOperator compose,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o frame_info: Define the width and height of the frame and its bevels.
%
% o compose: the composite operator.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FrameImage(const Image *image,const FrameInfo *frame_info,
const CompositeOperator compose,ExceptionInfo *exception)
{
#define FrameImageTag "Frame/Image"
CacheView
*image_view,
*frame_view;
Image
*frame_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
accentuate,
highlight,
matte,
shadow,
trough;
register ssize_t
x;
size_t
bevel_width,
height,
width;
ssize_t
y;
/*
Check frame geometry.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(frame_info != (FrameInfo *) NULL);
if ((frame_info->outer_bevel < 0) || (frame_info->inner_bevel < 0))
ThrowImageException(OptionError,"FrameIsLessThanImageSize");
bevel_width=(size_t) (frame_info->outer_bevel+frame_info->inner_bevel);
x=(ssize_t) frame_info->width-frame_info->x-bevel_width;
y=(ssize_t) frame_info->height-frame_info->y-bevel_width;
if ((x < (ssize_t) image->columns) | (y < (ssize_t) image->rows))
ThrowImageException(OptionError,"FrameIsLessThanImageSize");
/*
Initialize framed image attributes.
*/
frame_image=CloneImage(image,frame_info->width,frame_info->height,MagickTrue,
exception);
if (frame_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(frame_image,DirectClass,exception) == MagickFalse)
{
frame_image=DestroyImage(frame_image);
return((Image *) NULL);
}
if ((IsPixelInfoGray(&frame_image->border_color) == MagickFalse) &&
(IsGrayColorspace(frame_image->colorspace) != MagickFalse))
(void) SetImageColorspace(frame_image,sRGBColorspace,exception);
if ((frame_image->alpha_color.alpha_trait != UndefinedPixelTrait) &&
(frame_image->alpha_trait == UndefinedPixelTrait))
(void) SetImageAlpha(frame_image,OpaqueAlpha,exception);
frame_image->page=image->page;
if ((image->page.width != 0) && (image->page.height != 0))
{
frame_image->page.width+=frame_image->columns-image->columns;
frame_image->page.height+=frame_image->rows-image->rows;
}
/*
Initialize 3D effects color.
*/
matte=image->alpha_color;
accentuate=matte;
accentuate.red=(double) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.red+(QuantumRange*AccentuateModulate)));
accentuate.green=(double) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.green+(QuantumRange*AccentuateModulate)));
accentuate.blue=(double) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.blue+(QuantumRange*AccentuateModulate)));
accentuate.black=(double) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.black+(QuantumRange*AccentuateModulate)));
accentuate.alpha=matte.alpha;
highlight=matte;
highlight.red=(double) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.red+(QuantumRange*HighlightModulate)));
highlight.green=(double) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.green+(QuantumRange*HighlightModulate)));
highlight.blue=(double) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.blue+(QuantumRange*HighlightModulate)));
highlight.black=(double) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.black+(QuantumRange*HighlightModulate)));
highlight.alpha=matte.alpha;
shadow=matte;
shadow.red=QuantumScale*matte.red*ShadowModulate;
shadow.green=QuantumScale*matte.green*ShadowModulate;
shadow.blue=QuantumScale*matte.blue*ShadowModulate;
shadow.black=QuantumScale*matte.black*ShadowModulate;
shadow.alpha=matte.alpha;
trough=matte;
trough.red=QuantumScale*matte.red*TroughModulate;
trough.green=QuantumScale*matte.green*TroughModulate;
trough.blue=QuantumScale*matte.blue*TroughModulate;
trough.black=QuantumScale*matte.black*TroughModulate;
trough.alpha=matte.alpha;
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
frame_view=AcquireAuthenticCacheView(frame_image,exception);
height=(size_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+
frame_info->inner_bevel);
if (height != 0)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
/*
Draw top of ornamental border.
*/
q=QueueCacheViewAuthenticPixels(frame_view,0,0,frame_image->columns,
height,exception);
if (q != (Quantum *) NULL)
{
/*
Draw top of ornamental border.
*/
for (y=0; y < (ssize_t) frame_info->outer_bevel; y++)
{
for (x=0; x < (ssize_t) (frame_image->columns-y); x++)
{
if (x < y)
SetPixelViaPixelInfo(frame_image,&highlight,q);
else
SetPixelViaPixelInfo(frame_image,&accentuate,q);
q+=GetPixelChannels(frame_image);
}
for ( ; x < (ssize_t) frame_image->columns; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
}
for (y=0; y < (ssize_t) (frame_info->y-bevel_width); y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
width=frame_image->columns-2*frame_info->outer_bevel;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
}
for (y=0; y < (ssize_t) frame_info->inner_bevel; y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
width=image->columns+((size_t) frame_info->inner_bevel << 1)-
y;
for (x=0; x < (ssize_t) width; x++)
{
if (x < y)
SetPixelViaPixelInfo(frame_image,&shadow,q);
else
SetPixelViaPixelInfo(frame_image,&trough,q);
q+=GetPixelChannels(frame_image);
}
for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
}
(void) SyncCacheViewAuthenticPixels(frame_view,exception);
}
}
/*
Draw sides of ornamental border.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,frame_image,1,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
size_t
width;
/*
Initialize scanline with matte color.
*/
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(frame_view,0,frame_info->y+y,
frame_image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->inner_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
/*
Set frame interior pixels.
*/
{
register const Quantum
*p;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelReadMask(image,q) == 0)
{
SetPixelBackgoundColor(frame_image,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(frame_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait frame_traits=GetPixelChannelTraits(frame_image,channel);
if ((traits == UndefinedPixelTrait) ||
(frame_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(frame_image,channel,p[i],q);
}
SetPixelRed(frame_image,GetPixelRed(image,p),q);
SetPixelGreen(frame_image,GetPixelGreen(image,p),q);
SetPixelBlue(frame_image,GetPixelBlue(image,p),q);
SetPixelAlpha(frame_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(frame_image);
}
}
for (x=0; x < (ssize_t) frame_info->inner_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
if (SyncCacheViewAuthenticPixels(frame_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FrameImage)
#endif
proceed=SetImageProgress(image,FrameImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
height=(size_t) (frame_info->inner_bevel+frame_info->height-
frame_info->y-image->rows-bevel_width+frame_info->outer_bevel);
if (height != 0)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
/*
Draw bottom of ornamental border.
*/
q=QueueCacheViewAuthenticPixels(frame_view,0,(ssize_t) (frame_image->rows-
height),frame_image->columns,height,exception);
if (q != (Quantum *) NULL)
{
/*
Draw bottom of ornamental border.
*/
for (y=frame_info->inner_bevel-1; y >= 0; y--)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < y; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++)
{
if (x >= (ssize_t) (image->columns+2*frame_info->inner_bevel-y))
SetPixelViaPixelInfo(frame_image,&highlight,q);
else
SetPixelViaPixelInfo(frame_image,&accentuate,q);
q+=GetPixelChannels(frame_image);
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
}
height=frame_info->height-frame_info->y-image->rows-bevel_width;
for (y=0; y < (ssize_t) height; y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
width=frame_image->columns-2*frame_info->outer_bevel;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
}
for (y=frame_info->outer_bevel-1; y >= 0; y--)
{
for (x=0; x < y; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
for ( ; x < (ssize_t) frame_image->columns; x++)
{
if (x >= (ssize_t) (frame_image->columns-y))
SetPixelViaPixelInfo(frame_image,&shadow,q);
else
SetPixelViaPixelInfo(frame_image,&trough,q);
q+=GetPixelChannels(frame_image);
}
}
(void) SyncCacheViewAuthenticPixels(frame_view,exception);
}
}
frame_view=DestroyCacheView(frame_view);
image_view=DestroyCacheView(image_view);
x=(ssize_t) (frame_info->outer_bevel+(frame_info->x-bevel_width)+
frame_info->inner_bevel);
y=(ssize_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+
frame_info->inner_bevel);
if (status != MagickFalse)
status=CompositeImage(frame_image,image,compose,MagickTrue,x,y,
exception);
if (status == MagickFalse)
frame_image=DestroyImage(frame_image);
return(frame_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RaiseImage() creates a simulated three-dimensional button-like effect
% by lightening and darkening the edges of the image. Members width and
% height of raise_info define the width of the vertical and horizontal
% edge of the effect.
%
% The format of the RaiseImage method is:
%
% MagickBooleanType RaiseImage(const Image *image,
% const RectangleInfo *raise_info,const MagickBooleanType raise,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o raise_info: Define the width and height of the raise area.
%
% o raise: A value other than zero creates a 3-D raise effect,
% otherwise it has a lowered effect.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RaiseImage(Image *image,
const RectangleInfo *raise_info,const MagickBooleanType raise,
ExceptionInfo *exception)
{
#define AccentuateFactor ScaleCharToQuantum(135)
#define HighlightFactor ScaleCharToQuantum(190)
#define ShadowFactor ScaleCharToQuantum(190)
#define RaiseImageTag "Raise/Image"
#define TroughFactor ScaleCharToQuantum(135)
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
Quantum
foreground,
background;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(raise_info != (RectangleInfo *) NULL);
if ((image->columns <= (raise_info->width << 1)) ||
(image->rows <= (raise_info->height << 1)))
ThrowBinaryException(OptionError,"ImageSizeMustExceedBevelWidth",
image->filename);
foreground=QuantumRange;
background=(Quantum) 0;
if (raise == MagickFalse)
{
foreground=(Quantum) 0;
background=QuantumRange;
}
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
/*
Raise image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,1,1)
#endif
for (y=0; y < (ssize_t) raise_info->height; y++)
{
register ssize_t
i,
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < y; x++)
{
if (GetPixelReadMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*HighlightFactor+(double)
foreground*(QuantumRange-HighlightFactor)));
}
q+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) (image->columns-y); x++)
{
if (GetPixelReadMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*AccentuateFactor+
(double) foreground*(QuantumRange-AccentuateFactor)));
}
q+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) image->columns; x++)
{
if (GetPixelReadMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*ShadowFactor+(double)
background*(QuantumRange-ShadowFactor)));
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RaiseImage)
#endif
proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,1,1)
#endif
for (y=(ssize_t) raise_info->height; y < (ssize_t) (image->rows-raise_info->height); y++)
{
register ssize_t
i,
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) raise_info->width; x++)
{
if (GetPixelReadMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*HighlightFactor+(double)
foreground*(QuantumRange-HighlightFactor)));
}
q+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) (image->columns-raise_info->width); x++)
q+=GetPixelChannels(image);
for ( ; x < (ssize_t) image->columns; x++)
{
if (GetPixelReadMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*ShadowFactor+(double)
background*(QuantumRange-ShadowFactor)));
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RaiseImage)
#endif
proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,1,1)
#endif
for (y=(ssize_t) (image->rows-raise_info->height); y < (ssize_t) image->rows; y++)
{
register ssize_t
i,
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->rows-y); x++)
{
if (GetPixelReadMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*HighlightFactor+(double)
foreground*(QuantumRange-HighlightFactor)));
}
q+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) (image->columns-(image->rows-y)); x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*TroughFactor+
(double) background*(QuantumRange-TroughFactor)));
}
q+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) image->columns; x++)
{
if (GetPixelReadMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*ShadowFactor+(double)
background*(QuantumRange-ShadowFactor)));
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RaiseImage)
#endif
proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
main.c | #include "common.h"
static void print_help(char *argv)
{
END("%s [-f edge_file] [-W width] [-H height] [-D degree] [-R length] [-o output_file] [-s random_seed]\
[-n calculations] [-w max_temperature] [-c min_temperature] [-g groups] [-C cooling_cycle] [-B] [-d]\
[-F fixed_temperature] [-Y] [-M] [-h]\n", argv);
}
static void set_args(const int argc, char **argv, char *infname, int *low_length, char *outfname,
int *random_seed, long long *ncalcs, double *max_temp, double *min_temp, int *groups,
int *cooling_cycle, bool *enable_hill_climbing, bool *enable_detect_temp, bool *enable_bfs,
bool *enable_halfway, double *fixed_temp, int *width, int *height, int *degree)
{
if(argc < 3)
print_help(argv[0]);
int result;
while((result = getopt(argc,argv,"f:W:H:D:R:o:s:n:w:c:g:C:BdF:YMh"))!=-1){
switch(result){
case 'f':
if(strlen(optarg) > MAX_FILENAME_LENGTH)
ERROR("Input filename is long (%s). Please change MAX_FILENAME_LENGTH.\n", optarg);
strcpy(infname, optarg);
break;
case 'W':
*width = atoi(optarg);
if(*width <= 0)
ERROR("-W value > 0\n");
break;
case 'H':
*height = atoi(optarg);
if(*height <= 0)
ERROR("-H value > 0\n");
break;
case 'D':
*degree = atoi(optarg);
if(*degree <= 0)
ERROR("-D value > 0\n");
break;
case 'R':
*low_length = atoi(optarg);
if(*low_length <= 0)
ERROR("-R value > 0\n");
break;
case 'o':
if(strlen(optarg) > MAX_FILENAME_LENGTH)
ERROR("Output filename is long (%s). Please change MAX_FILENAME_LENGTH.\n", optarg);
strcpy(outfname, optarg);
break;
case 's':
*random_seed = atoi(optarg);
if(*random_seed < 0)
ERROR("-s value >= 0\n");
break;
case 'n':
*ncalcs = atoll(optarg);
if(*ncalcs < 0)
ERROR("-n value >= 0\n");
break;
case 'w':
*max_temp = atof(optarg);
if(*max_temp <= 0)
ERROR("-w value > 0\n");
break;
case 'c':
*min_temp = atof(optarg);
if(*min_temp <= 0)
ERROR("-c value > 0\n");
break;
case 'g':
*groups = atoi(optarg);
if(*groups != 1 && *groups != 2 && *groups != 4)
ERROR("-g value == 1 or 2 or 4\n");
break;
case 'C':
*cooling_cycle = atoi(optarg);
if(*cooling_cycle <= 0)
ERROR("-C value > 0\n");
break;
case 'B':
*enable_bfs = true;
break;
case 'd':
*enable_detect_temp = true;
break;
case 'F':
*fixed_temp = atof(optarg);
if(*fixed_temp <= 0)
ERROR("-F value > 0\n");
break;
case 'Y':
*enable_hill_climbing = true;
break;
case 'M':
*enable_halfway = true;
break;
case 'h':
default:
print_help(argv[0]);
}
}
}
static int count_loop(const int lines, const int *edge)
{
int num = 0;
for(int i=0;i<lines;i++)
if(edge[i*2] == edge[i*2+1])
num++;
return num;
}
static bool confirm_dist(const int v, const int w, const int height, const int low_length)
{
return (DISTANCE(v, w, height) <= low_length);
}
static void simple_exchange_edge(const int height, const int low_length, const int lines, int* edge)
{
while(1){
int e1, e2, new_e1_v, new_e1_w, new_e2_v, new_e2_w;
do{
e1 = random() % lines;
e2 = random() % lines;
} while( e1 == e2 );
int e1_v = edge[e1*2]; int e1_w = edge[e1*2+1];
int e2_v = edge[e2*2]; int e2_w = edge[e2*2+1];
if(confirm_dist(e1_v, e2_v, height, low_length) && confirm_dist(e1_w, e2_w, height, low_length)){
new_e1_v = e1_v; new_e1_w = e2_v;
new_e2_v = e1_w; new_e2_w = e2_w;
}
else if(confirm_dist(e1_v, e2_w, height, low_length) && confirm_dist(e1_w, e2_v, height, low_length)){
new_e1_v = e1_v; new_e1_w = e2_w;
new_e2_v = e1_w; new_e2_w = e2_v;
}
else{
continue;
}
edge[2*e1] = new_e1_v; edge[2*e1+1] = new_e1_w;
edge[2*e2] = new_e2_v; edge[2*e2+1] = new_e2_w;
break;
}
}
#ifdef _OPENMP
static int top_down_step(const int nodes, const int num_frontier, const int degree,
const int* restrict adjacency, int* restrict frontier,
int* restrict next, char* restrict bitmap)
{
int count = 0;
int local_frontier[nodes];
#pragma omp parallel private(local_frontier)
{
int local_count = 0;
#pragma omp for nowait
for(int i=0;i<num_frontier;i++){
int v = frontier[i];
for(int j=0;j<degree;j++){
int n = *(adjacency + v * degree + j); // adjacency[v][j];
if(bitmap[n] == NOT_VISITED){
bitmap[n] = VISITED;
local_frontier[local_count++] = n;
}
}
} // end for i
#pragma omp critical
{
memcpy(&next[count], local_frontier, local_count*sizeof(int));
count += local_count;
}
}
return count;
}
#else
static int top_down_step(const int nodes, const int num_frontier, const int degree,
const int* restrict adjacency, int* restrict frontier,
int* restrict next, char* restrict bitmap)
{
int count = 0;
for(int i=0;i<num_frontier;i++){
int v = frontier[i];
for(int j=0;j<degree;j++){
int n = *(adjacency + v * degree + j); // int n = adjacency[v][j];
if(bitmap[n] == NOT_VISITED){
bitmap[n] = VISITED;
next[count++] = n;
}
}
}
return count;
}
#endif
static int simple_bfs(const int nodes, const int degree, int *adjacency)
{
char *bitmap = malloc(sizeof(char) * nodes);
int *frontier = malloc(sizeof(int) * nodes);
int *next = malloc(sizeof(int) * nodes);
int num_frontier = 1, root = 0, num = 0;
for(int i=0;i<nodes;i++)
bitmap[i] = NOT_VISITED;
frontier[0] = root;
bitmap[root] = VISITED;
while(1){
num_frontier = top_down_step(nodes, num_frontier, degree,
adjacency, frontier, next, bitmap);
if(num_frontier == 0) break;
int *tmp = frontier;
frontier = next;
next = tmp;
}
for(int i=0;i<nodes;i++)
if(bitmap[i] == NOT_VISITED)
num++;
free(bitmap);
free(frontier);
free(next);
return num;
}
// Inherited from http://research.nii.ac.jp/graphgolf/c/create-lattice.c
static void create_lattice(const int nodes, const int lines, const int width, const int height,
const int degree, const int low_length, int edge[lines*2])
{
int i = 0;
for(int x=0;x<width/2;x++){
for(int y=0;y<height;y++){
for(int k=0;k<degree;k++){
edge[i*2] = y + 2 * x * height;
edge[i*2+1] = edge[2*i] + height;
i++;
}
}
}
if(width%2 == 1){
for(int y=0;y<height/2;y++){
for(int k=0;k<degree;k++){
edge[i*2] = (width - 1) * height + 2 * y;
edge[i*2+1] = edge[i*2] + 1;
i++;
}
}
/* add self-loop */
if(height%2 == 1){
for(int k=0;k<degree/2;k++){
edge[i*2] = edge[i*2+1] = nodes - 1;
i++;
}
}
}
for(int i=0;i<lines*INITIAL_TIMES;i++) // Give randomness
simple_exchange_edge(height, low_length, lines, edge);
int (*adjacency)[degree] = malloc(sizeof(int)*nodes*degree); // int adjacency[nodes][degree];
create_adjacency(nodes, lines, degree, (const int (*)[2])edge, adjacency);
int min_num = simple_bfs(nodes, degree, (int *)adjacency);
int *tmp_edge = malloc(lines*2*sizeof(int));
while(1){
memcpy(tmp_edge, edge, sizeof(int)*lines*2);
simple_exchange_edge(height, low_length, lines, tmp_edge);
create_adjacency(nodes, lines, degree, (const int (*)[2])tmp_edge, adjacency);
int tmp_num = simple_bfs(nodes, degree, (int *)adjacency);
if(tmp_num == 0){
memcpy(edge, tmp_edge, sizeof(int)*lines*2);
break;
}
else{
if(tmp_num <= min_num){
min_num = tmp_num;
memcpy(edge, tmp_edge, sizeof(int)*lines*2);
}
}
}
// Remove loops
min_num = count_loop(lines, edge);
if(min_num != 0){
while(1){
memcpy(tmp_edge, edge, sizeof(int)*lines*2);
simple_exchange_edge(height, low_length, lines, tmp_edge);
int tmp_num = count_loop(lines, tmp_edge);
if(tmp_num == 0){
memcpy(edge, tmp_edge, sizeof(int)*lines*2);
break;
}
else{
if(tmp_num <= min_num){
min_num = tmp_num;
memcpy(edge, tmp_edge, sizeof(int)*lines*2);
}
}
}
}
// Remove duplicated edges
min_num = count_duplicate_all_edge(lines, (const int (*)[2])edge);
if(min_num != 0){
while(1){
memcpy(tmp_edge, edge, sizeof(int)*lines*2);
simple_exchange_edge(height, low_length, lines, tmp_edge);
int tmp_num = count_duplicate_all_edge(lines, (const int (*)[2])tmp_edge);
if(tmp_num == 0){
memcpy(edge, tmp_edge, sizeof(int)*lines*2);
break;
}
else{
if(tmp_num <= min_num){
min_num = tmp_num;
memcpy(edge, tmp_edge, sizeof(int)*lines*2);
}
}
}
}
free(tmp_edge);
free(adjacency);
// for(int i=0;i<lines;i++)
// printf("%d,%d %d,%d\n", WIDTH(edge[i*2], height), HEIGHT(edge[i*2], height),
// WIDTH(edge[i*2+1], height), HEIGHT(edge[i*2+1], height));
//EXIT(0);
}
static int count_lines(const char *fname)
{
FILE *fp = NULL;
if((fp = fopen(fname, "r")) == NULL)
ERROR("File not found\n");
int lines = 0, c;
while((c = fgetc(fp)) != EOF)
if(c == '\n')
lines++;
fclose(fp);
return lines;
}
static void read_file_lattice(int *edge, int *w, int *h, const char *fname)
{
FILE *fp;
if((fp = fopen(fname, "r")) == NULL){
PRINT_R0("File not found\n");
EXIT(1);
}
int n[4];
*w = 0;
*h = 0;
while(fscanf(fp, "%d,%d %d,%d", &n[0], &n[1], &n[2], &n[3]) != EOF){
*w = MAX(*w, n[0]);
*h = MAX(*h, n[1]);
*w = MAX(*w, n[2]);
*h = MAX(*h, n[3]);
}
*w += 1;
*h += 1;
rewind(fp);
int i = 0;
while(fscanf(fp, "%d,%d %d,%d", &n[0], &n[1], &n[2], &n[3]) != EOF){
edge[i*2 ] = n[0] * (*h) + n[1];
edge[i*2+1] = n[2] * (*h) + n[3];
i++;
}
fclose(fp);
}
static int max_node_num(const int lines, const int edge[lines*2])
{
int max = edge[0];
for(int i=1;i<lines*2;i++)
max = MAX(max, edge[i]);
return max;
}
static void create_symmetric_edge(int *edge, const int based_nodes, const int based_lines,
const int groups, const int degree, const int nodes, const int lines,
const int height, const int width, const int based_height,
const int low_length)
{
for(int i=0;i<based_lines;i++)
for(int j=0;j<2;j++)
edge[i*2+j] = WIDTH(edge[i*2+j], based_height) * height + HEIGHT(edge[i*2+j], based_height);
if(groups == 2){
for(int i=0;i<based_lines;i++)
for(int j=0;j<2;j++)
edge[(based_lines+i)*2+j] = ROTATE(edge[i*2+j], height, width, groups, 180);
}
else if(groups == 4){
for(int i=0;i<based_lines;i++){
for(int j=0;j<2;j++){
edge[(based_lines +i)*2+j] = ROTATE(edge[i*2+j], height, width, groups, 90);
edge[(based_lines*2+i)*2+j] = ROTATE(edge[i*2+j], height, width, groups, 180);
edge[(based_lines*3+i)*2+j] = ROTATE(edge[i*2+j], height, width, groups, 270);
}
}
}
int *tmp_edge = malloc(lines*2*sizeof(int));
int (*adjacency)[degree] = malloc(sizeof(int)*nodes*degree); // int adjacency[nodes][degree];
for(int i=0;i<lines*INITIAL_TIMES;i++) // Give randomness
exchange_edge(nodes, lines, degree, (int (*)[2])edge, height, width, groups, low_length, 0);
create_adjacency(nodes, lines, degree, (const int (*)[2])edge, adjacency);
int min_num = simple_bfs(nodes, degree, (int *)adjacency);
if(min_num != 0){
while(1){
memcpy(tmp_edge, edge, sizeof(int)*lines*2);
exchange_edge(nodes, lines, degree, (int (*)[2])tmp_edge, height, width, groups, low_length, 0);
create_adjacency(nodes, lines, degree, (const int (*)[2])tmp_edge, adjacency);
int tmp_num = simple_bfs(nodes, degree, (int *)adjacency);
if(tmp_num == 0){
memcpy(edge, tmp_edge, sizeof(int)*lines*2);
break;
}
else{
if(tmp_num <= min_num){
min_num = tmp_num;
memcpy(edge, tmp_edge, sizeof(int)*lines*2);
}
}
}
}
free(tmp_edge);
free(adjacency);
}
static void verfy_graph(const int nodes, const int lines, const int edge[lines*2],
const int height, const int low_length)
{
PRINT_R0("Verifing a regular graph... ");
int n[nodes];
for(int i=0;i<nodes;i++)
n[i] = 0;
for(int i=0;i<lines;i++){
n[edge[i*2 ]]++;
n[edge[i*2+1]]++;
}
int degree = 2 * lines / nodes;
for(int i=0;i<nodes;i++)
if(degree != n[i])
ERROR("NG\nNot regular graph. degree = %d n[%d] = %d.\n", degree, i, n[i]);
for(int i=0;i<lines;i++){
if(DISTANCE(edge[i*2], edge[i*2+1], height) > low_length)
ERROR("Over length in line %d: length = %d, distance = %d\n",
i+1, low_length, DISTANCE(edge[i*2], edge[i*2+1], height));
}
PRINT_R0("OK\n");
}
static int dist(const int x1, const int y1, const int x2, const int y2)
{
return(abs(x1 - x2) + abs(y1 - y2));
}
static void lower_bound_of_diam_aspl(int *low_diam, double *low_ASPL, const int m, const int n,
const int degree, const int length)
{
int moore[m*n], hist[m*n], mh[m*n];
int mn = m * n, current = degree, ii;
double sum = 0;
moore[0] = 1;
moore[1] = degree + 1;
for(ii=2;;ii++){
current = current * (degree - 1);
moore[ii] = moore[ii-1] + current;
if(moore[ii] >= mn){
moore[ii] = mn;
break;
}
}
int maxhop = MAX((m+n-2+(length-1))/length, ii);
for(int i=ii+1;i<=maxhop;i++)
moore[i] = mn;
for(int i=0;i<m;i++){
for(int j=0;j<n;j++){
for(int k=0;k<=maxhop;k++)
hist[k] = 0;
for (int i2=0;i2<m;i2++)
for(int j2=0;j2<n;j2++)
hist[(dist(i,j,i2,j2)+length-1)/length]++;
for(int k=1;k<=maxhop;k++)
hist[k] += hist[k-1];
for(int k=0;k<=maxhop;k++)
mh[k] = MIN(hist[k], moore[k]);
for(int k=1;k<=maxhop;k++)
sum += (double)(mh[k] - mh[k-1]) * k;
}
}
int dboth = 0;
for(dboth=0;;dboth++)
if(mh[dboth] == mn)
break;
*low_diam = dboth;
*low_ASPL = sum/((double)mn*(mn-1));
}
static void output_params(const int degree, const int groups, const int low_length, const int random_seed,
const double max_temp, const double min_temp, const long long ncalcs,
const int cooling_cycle, const double cooling_rate, const char *infname,
const char *outfname, const double average_time, const bool enable_hill_climbing,
const int width, const int height, const bool enable_bfs, const bool enable_fixed_temp,
const double fixed_temp)
{
#ifdef NDEBUG
PRINT_R0("NO DEBUG MODE\n");
#else
PRINT_R0("DEBUG MODE\n");
#endif
PRINT_R0("Seed : %d\n", random_seed);
PRINT_R0("Processes: %d\n", procs);
#ifdef _OPENMP
PRINT_R0("Threads : %d\n", omp_get_max_threads());
#endif
if(enable_bfs) PRINT_R0("APSP : BFS\n");
else PRINT_R0("APSP : MATRIX Opetation\n");
if(enable_hill_climbing)
PRINT_R0("Algorithm: Hill climbing Method\n");
else{
if(enable_fixed_temp)
PRINT_R0("Algorithm: Fixed Temperature Simulated Annealing : %f\n", fixed_temp);
else
PRINT_R0("Algorithm: Simulated Annealing\n");
PRINT_R0(" MAX Temperature: %f\n", max_temp);
PRINT_R0(" MIN Temperature: %f\n", min_temp);
PRINT_R0(" Cooling Cycle: %d\n", cooling_cycle);
PRINT_R0(" Cooling Rate : %f\n", cooling_rate);
}
if(groups != 1)
PRINT_R0(" Groups : %d\n", groups);
PRINT_R0("Num. of Calulations: %lld\n", ncalcs);
PRINT_R0(" Average APSP time : %f sec.\n", average_time);
PRINT_R0(" Estimated elapse time: %f sec.\n", average_time * ncalcs);
if(infname[0] != NOT_C_DEFINED)
PRINT_R0("Input filename: %s\n", infname);
PRINT_R0(" (w x h, d, r) = (%d x %d, %d, %d)\n", width, height, degree, low_length);
if(outfname[0] != NOT_C_DEFINED)
PRINT_R0("Output filename: %s\n", outfname);
PRINT_R0("---\n");
}
static void output_file(FILE *fp, const int lines, const int height, const int edge[lines*2])
{
for(int i=0;i<lines;i++)
fprintf(fp, "%d,%d %d,%d\n", WIDTH(edge[i*2], height), HEIGHT(edge[i*2], height),
WIDTH(edge[i*2+1], height), HEIGHT(edge[i*2+1], height));
}
int main(int argc, char *argv[])
{
bool enable_hill_climbing = false, enable_detect_temp = false, enable_bfs = false, enable_halfway = false;
char hostname[MPI_MAX_PROCESSOR_NAME];
char infname[MAX_FILENAME_LENGTH] = {NOT_C_DEFINED}, outfname[MAX_FILENAME_LENGTH] = {NOT_C_DEFINED};
int random_seed = 0, cooling_cycle = 1, groups = 1;
int namelen, based_lines, lines, based_width, based_height, based_nodes, nodes, *edge;
int diam = NOT_N_DEFINED, degree = NOT_N_DEFINED, low_diam = NOT_N_DEFINED;
int width = NOT_N_DEFINED, height = NOT_N_DEFINED, low_length = NOT_N_DEFINED;
long long ncalcs = DEFAULT_NCALCS, num_accepts = 0;
double ASPL = NOT_N_DEFINED, low_ASPL = NOT_N_DEFINED, cooling_rate = NOT_N_DEFINED, max_diff_energy = NOT_N_DEFINED;
double max_temp = NOT_N_DEFINED, min_temp = NOT_N_DEFINED, fixed_temp = NOT_N_DEFINED;
FILE *fp = NULL;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &procs);
MPI_Get_processor_name(hostname, &namelen);
PRINT_R0("Run on %s\n", hostname);
time_t t = time(NULL);
PRINT_R0("%s---\n", ctime(&t));
// Set arguments
set_args(argc, argv, infname, &low_length, outfname, &random_seed, &ncalcs, &max_temp,
&min_temp, &groups, &cooling_cycle, &enable_hill_climbing, &enable_detect_temp,
&enable_bfs, &enable_halfway, &fixed_temp, &width, &height, °ree);
// Set other arguments
bool enable_max_temp = (max_temp != NOT_N_DEFINED);
bool enable_min_temp = (min_temp != NOT_N_DEFINED);
bool enable_fixed_temp = (fixed_temp != NOT_N_DEFINED);
bool enable_infname = (infname[0] != NOT_C_DEFINED);
bool enable_outfname = (outfname[0] != NOT_C_DEFINED);
bool enable_whd = (width != NOT_N_DEFINED && height != NOT_N_DEFINED && degree != NOT_N_DEFINED);
// Check arguments
if(low_length == NOT_N_DEFINED) ERROR("Must need -R\n");
else if(enable_hill_climbing && enable_max_temp) ERROR("Both -Y and -w cannot be used.\n");
else if(enable_hill_climbing && enable_min_temp) ERROR("Both -Y and -c cannot be used.\n");
else if(enable_hill_climbing && enable_detect_temp) ERROR("Both -Y and -d cannot be used.\n");
else if(!enable_infname && !enable_whd) ERROR("Must set -f or \"-W and -H and -D\"\n");
else if(enable_halfway && !enable_infname) ERROR("Must set both -M and -f\n");
if(!enable_max_temp) max_temp = 100.0;
if(!enable_min_temp) min_temp = 0.217147;
if(max_temp == min_temp) ERROR("The same values in -w and -c.\n");
if(enable_detect_temp) ncalcs = DEFAULT_DETECT_NCALS;
srandom(random_seed);
if(enable_infname){
based_lines = count_lines(infname);
lines = (enable_halfway)? based_lines : based_lines * groups;
edge = malloc(sizeof(int)*lines*2); // int edge[lines][2];
read_file_lattice(edge, &based_width, &based_height, infname);
based_nodes = max_node_num(based_lines, (int *)edge) + 1;
if(enable_halfway){
based_nodes /= groups;
based_lines /= groups;
if(groups == 2){
based_height /= 2;
}
else if(groups == 4){
based_width /= 2;
based_height /= 2;
}
}
if(groups == 1){
height = based_height;
width = based_width;
}
else if(groups == 2){
height = based_height * 2;
width = based_width;
}
else{ // groups == 4
height = based_height * 2;
width = based_width * 2;
}
nodes = based_nodes * groups;
degree = 2 * lines / nodes;
}
else{
nodes = width * height;
based_nodes = nodes / groups;
lines = nodes * degree / 2;
based_lines = lines / groups;
edge = malloc(sizeof(int)*lines*2); // int edge[lines][2];
if(groups == 1){
based_width = width;
based_height = height;
}
else if(groups == 2){
based_width = width;
based_height = height/2;
}
else{ // groups == 4
based_width = width/2;
based_height = height/2;
}
}
if(groups == 4 && (based_width != based_height))
ERROR("When g = 4, width(%d) must be equal to height(%d).\n", based_width, based_height);
else if(groups == 4 && width%2 != 0 && height%2 != 0)
ERROR("When g = 4, width(%d) and height(%d) are divisible by 2.\n", width, height);
else if(groups == 2 && height%2 != 0)
ERROR("When g = 2, height(%d) os divisible by 2.\n", height);
else if(nodes%groups != 0)
ERROR("nodes(%d) must be divisible by groups(%d)\n", nodes, groups);
else if(lines%groups != 0)
ERROR("(nodes*degree/2) must be divisible by groups(%d)\n", groups);
else if(based_width*based_height != based_nodes)
ERROR("Not grid graph (width %d x height %d != nodes %d).\n", based_width, based_height, based_nodes);
if(!enable_infname)
create_lattice(based_nodes, based_lines, based_width, based_height, degree, low_length, edge);
int *rotate_hash = malloc(nodes * sizeof(int));
create_rotate_hash(nodes, height, width, groups, rotate_hash);
if(!enable_halfway && groups != 1)
create_symmetric_edge(edge, based_nodes, based_lines, groups, degree, nodes,
lines, height, width, based_height, low_length);
verfy_graph(nodes, lines, edge, height, low_length);
lower_bound_of_diam_aspl(&low_diam, &low_ASPL, width, height, degree, low_length);
check_current_edge(nodes, lines, edge, low_ASPL, low_diam, groups, height, based_height, enable_bfs, rotate_hash);
double average_time = estimated_elapse_time(nodes, lines, edge, height, width, based_height, groups,
low_length, enable_bfs, rotate_hash);
if(enable_hill_climbing){
fixed_temp = max_temp = min_temp = 0.0;
cooling_rate = 1.0;
}
else{
cooling_rate = pow(min_temp/max_temp, (double)cooling_cycle/ncalcs);
}
if(enable_outfname && rank == 0){
struct stat stat_buf;
if(stat(outfname, &stat_buf) == 0)
ERROR("Output file %s exsits. \n", outfname);
if((fp = fopen(outfname, "w")) == NULL)
ERROR("Cannot open %s\n", outfname);
}
output_params(degree, groups, low_length, random_seed, max_temp, min_temp, ncalcs,
cooling_cycle, cooling_rate, infname, outfname, average_time,
enable_hill_climbing, width, height, enable_bfs, enable_fixed_temp, fixed_temp);
// Optimization
timer_clear_all();
timer_start(TIMER_SA);
long long step = sa(nodes, lines, degree, based_nodes, ncalcs, cooling_rate, low_diam, low_ASPL, enable_bfs,
enable_hill_climbing, enable_detect_temp, &max_diff_energy, max_temp,
min_temp, fixed_temp, edge, &diam, &ASPL, cooling_cycle, &num_accepts, width,
based_width, height, based_height, low_length, groups, rotate_hash, enable_fixed_temp);
timer_stop(TIMER_SA);
if(enable_detect_temp){
// Set max temperature to accept it 50% in maximum diff energy.
PRINT_R0("Proposed max temperature is %f\n", (-1.0 * max_diff_energy) / log(0.5));
// Set min temperature to accept it 0.01% in minimum diff energy.
END("Proposed min temperature is %f\n", (-2.0) / log(0.0001));
}
// Output results
PRINT_R0("---\n");
PRINT_R0("Diam. k = %d ASPL l = %f Diam. gap = %d ASPL gap = %f\n",
diam, ASPL, diam-low_diam, ASPL-low_ASPL);
double time_sa = timer_read(TIMER_SA);
double time_apsp = timer_read(TIMER_APSP);
double time_check = timer_read(TIMER_CHECK);
PRINT_R0("Steps: %lld Elapse time: %f sec. (APSP: %f sec. Check: %f sec. Other: %f sec.)\n",
step, time_sa, time_apsp, time_check, time_sa-(time_apsp+time_check));
if(ncalcs > SKIP_ACCEPTS)
PRINT_R0("Accept rate: %f (= %lld/%lld)\n",
(double)num_accepts/(ncalcs-SKIP_ACCEPTS), num_accepts, ncalcs-SKIP_ACCEPTS);
if(rank == 0 && enable_outfname){
output_file(fp, lines, height, edge);
fclose(fp);
}
verfy_graph(nodes, lines, edge, height, low_length);
MPI_Finalize();
return 0;
}
|
JeeIOrbitalSoA.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory
//
// File created by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory
//////////////////////////////////////////////////////////////////////////////////////
#ifndef QMCPLUSPLUS_EEIJASTROW_OPTIMIZED_SOA_H
#define QMCPLUSPLUS_EEIJASTROW_OPTIMIZED_SOA_H
#include "Configuration.h"
#if !defined(QMC_BUILD_SANDBOX_ONLY)
#include "QMCWaveFunctions/WaveFunctionComponent.h"
#endif
#include "Particle/DistanceTableData.h"
#include <simd/allocator.hpp>
#include <simd/algorithm.hpp>
#include <map>
#include <numeric>
namespace qmcplusplus
{
/** @ingroup WaveFunctionComponent
* @brief Specialization for three-body Jastrow function using multiple functors
*
*Each pair-type can have distinct function \f$u(r_{ij})\f$.
*For electrons, distinct pair correlation functions are used
*for spins up-up/down-down and up-down/down-up.
*/
template<class FT>
class JeeIOrbitalSoA : public WaveFunctionComponent
{
///type of each component U, dU, d2U;
using valT = typename FT::real_type;
///element position type
using posT = TinyVector<valT, OHMMS_DIM>;
///use the same container
using RowContainer = DistanceTableData::RowContainer;
///table index for el-el
const int ee_Table_ID_;
///table index for i-el
const int ei_Table_ID_;
//nuber of particles
int Nelec, Nion;
///number of particles + padded
size_t Nelec_padded;
//number of groups of the target particleset
int eGroups, iGroups;
///reference to the sources (ions)
const ParticleSet& Ions;
///diff value
RealType DiffVal;
///\f$Uat[i] = sum_(j) u_{i,j}\f$
Vector<valT> Uat, oldUk, newUk;
///\f$dUat[i] = sum_(j) du_{i,j}\f$
using gContainer_type = VectorSoaContainer<valT, OHMMS_DIM>;
gContainer_type dUat, olddUk, newdUk;
///\f$d2Uat[i] = sum_(j) d2u_{i,j}\f$
Vector<valT> d2Uat, oldd2Uk, newd2Uk;
/// current values during PbyP
valT cur_Uat, cur_d2Uat;
posT cur_dUat, dUat_temp;
///container for the Jastrow functions
Array<FT*, 3> F;
std::map<std::string, FT*> J3Unique;
//YYYY
std::map<FT*, int> J3UniqueIndex;
/// the cutoff for e-I pairs
std::vector<valT> Ion_cutoff;
/// the electrons around ions within the cutoff radius, grouped by species
Array<std::vector<int>, 2> elecs_inside;
Array<std::vector<valT>, 2> elecs_inside_dist;
Array<std::vector<posT>, 2> elecs_inside_displ;
/// the ids of ions within the cutoff radius of an electron on which a move is proposed
std::vector<int> ions_nearby_old, ions_nearby_new;
/// work buffer size
size_t Nbuffer;
/// compressed distances
aligned_vector<valT> Distjk_Compressed, DistkI_Compressed, DistjI_Compressed;
std::vector<int> DistIndice_k;
/// compressed displacements
gContainer_type Disp_jk_Compressed, Disp_jI_Compressed, Disp_kI_Compressed;
/// work result buffer
VectorSoaContainer<valT, 9> mVGL;
// Used for evaluating derivatives with respect to the parameters
int NumVars;
Array<std::pair<int, int>, 3> VarOffset;
Vector<RealType> dLogPsi;
Array<PosType, 2> gradLogPsi;
Array<RealType, 2> lapLogPsi;
// Temporary store for parameter derivatives of functor
// The first index is the functor index in J3Unique. The second is the parameter index w.r.t. to that
// functor
std::vector<std::vector<RealType>> du_dalpha;
std::vector<std::vector<PosType>> dgrad_dalpha;
std::vector<std::vector<Tensor<RealType, 3>>> dhess_dalpha;
public:
///alias FuncType
using FuncType = FT;
JeeIOrbitalSoA(const ParticleSet& ions, ParticleSet& elecs, bool is_master = false)
: ee_Table_ID_(elecs.addTable(elecs, DT_SOA)), ei_Table_ID_(elecs.addTable(ions, DT_SOA, true)), Ions(ions), NumVars(0)
{
ClassName = "JeeIOrbitalSoA";
init(elecs);
}
~JeeIOrbitalSoA() {}
WaveFunctionComponentPtr makeClone(ParticleSet& elecs) const
{
JeeIOrbitalSoA<FT>* eeIcopy = new JeeIOrbitalSoA<FT>(Ions, elecs, false);
std::map<const FT*, FT*> fcmap;
for (int iG = 0; iG < iGroups; iG++)
for (int eG1 = 0; eG1 < eGroups; eG1++)
for (int eG2 = 0; eG2 < eGroups; eG2++)
{
if (F(iG, eG1, eG2) == 0)
continue;
typename std::map<const FT*, FT*>::iterator fit = fcmap.find(F(iG, eG1, eG2));
if (fit == fcmap.end())
{
FT* fc = new FT(*F(iG, eG1, eG2));
eeIcopy->addFunc(iG, eG1, eG2, fc);
fcmap[F(iG, eG1, eG2)] = fc;
}
}
// Ye: I don't like the following memory allocated by default.
eeIcopy->myVars.clear();
eeIcopy->myVars.insertFrom(myVars);
eeIcopy->NumVars = NumVars;
eeIcopy->dLogPsi.resize(NumVars);
eeIcopy->gradLogPsi.resize(NumVars, Nelec);
eeIcopy->lapLogPsi.resize(NumVars, Nelec);
eeIcopy->VarOffset = VarOffset;
eeIcopy->Optimizable = Optimizable;
return eeIcopy;
}
void init(ParticleSet& p)
{
Nelec = p.getTotalNum();
Nelec_padded = getAlignedSize<valT>(Nelec);
Nion = Ions.getTotalNum();
iGroups = Ions.getSpeciesSet().getTotalNum();
eGroups = p.groups();
Uat.resize(Nelec);
dUat.resize(Nelec);
d2Uat.resize(Nelec);
oldUk.resize(Nelec);
olddUk.resize(Nelec);
oldd2Uk.resize(Nelec);
newUk.resize(Nelec);
newdUk.resize(Nelec);
newd2Uk.resize(Nelec);
F.resize(iGroups, eGroups, eGroups);
F = nullptr;
elecs_inside.resize(eGroups, Nion);
elecs_inside_dist.resize(eGroups, Nion);
elecs_inside_displ.resize(eGroups, Nion);
ions_nearby_old.resize(Nion);
ions_nearby_new.resize(Nion);
Ion_cutoff.resize(Nion, 0.0);
//initialize buffers
Nbuffer = Nelec;
mVGL.resize(Nbuffer);
Distjk_Compressed.resize(Nbuffer);
DistjI_Compressed.resize(Nbuffer);
DistkI_Compressed.resize(Nbuffer);
Disp_jk_Compressed.resize(Nbuffer);
Disp_jI_Compressed.resize(Nbuffer);
Disp_kI_Compressed.resize(Nbuffer);
DistIndice_k.resize(Nbuffer);
}
void initUnique()
{
typename std::map<std::string, FT*>::iterator it(J3Unique.begin()), it_end(J3Unique.end());
du_dalpha.resize(J3Unique.size());
dgrad_dalpha.resize(J3Unique.size());
dhess_dalpha.resize(J3Unique.size());
int ifunc = 0;
while (it != it_end)
{
J3UniqueIndex[it->second] = ifunc;
FT& functor = *(it->second);
int numParams = functor.getNumParameters();
du_dalpha[ifunc].resize(numParams);
dgrad_dalpha[ifunc].resize(numParams);
dhess_dalpha[ifunc].resize(numParams);
++it;
ifunc++;
}
}
void addFunc(int iSpecies, int eSpecies1, int eSpecies2, FT* j)
{
if (eSpecies1 == eSpecies2)
{
//if only up-up is specified, assume spin-unpolarized correlations
if (eSpecies1 == 0)
for (int eG1 = 0; eG1 < eGroups; eG1++)
for (int eG2 = 0; eG2 < eGroups; eG2++)
{
if (F(iSpecies, eG1, eG2) == 0)
F(iSpecies, eG1, eG2) = j;
}
}
else
{
F(iSpecies, eSpecies1, eSpecies2) = j;
F(iSpecies, eSpecies2, eSpecies1) = j;
}
if (j)
{
RealType rcut = 0.5 * j->cutoff_radius;
for (int i = 0; i < Nion; i++)
if (Ions.GroupID[i] == iSpecies)
Ion_cutoff[i] = rcut;
}
else
{
APP_ABORT("JeeIOrbitalSoA::addFunc Jastrow function pointer is NULL");
}
std::stringstream aname;
aname << iSpecies << "_" << eSpecies1 << "_" << eSpecies2;
J3Unique[aname.str()] = j;
initUnique();
}
/** check that correlation information is complete
*/
void check_complete()
{
//check that correlation pointers are either all 0 or all assigned
bool complete = true;
for (int i = 0; i < iGroups; ++i)
{
int nfilled = 0;
bool partial;
for (int e1 = 0; e1 < eGroups; ++e1)
for (int e2 = 0; e2 < eGroups; ++e2)
if (F(i, e1, e2) != 0)
nfilled++;
partial = nfilled > 0 && nfilled < eGroups * eGroups;
if (partial)
app_log() << "J3 eeI is missing correlation for ion " << i << std::endl;
complete = complete && !partial;
}
if (!complete)
{
APP_ABORT("JeeIOrbitalSoA::check_complete J3 eeI is missing correlation components\n see preceding messages "
"for details");
}
//first set radii
for (int i = 0; i < Nion; ++i)
{
FT* f = F(Ions.GroupID[i], 0, 0);
if (f != 0)
Ion_cutoff[i] = .5 * f->cutoff_radius;
}
//then check radii
bool all_radii_match = true;
for (int i = 0; i < iGroups; ++i)
{
if (F(i, 0, 0) != 0)
{
bool radii_match = true;
RealType rcut = F(i, 0, 0)->cutoff_radius;
for (int e1 = 0; e1 < eGroups; ++e1)
for (int e2 = 0; e2 < eGroups; ++e2)
radii_match = radii_match && F(i, e1, e2)->cutoff_radius == rcut;
if (!radii_match)
app_log() << "eeI functors for ion species " << i << " have different radii" << std::endl;
all_radii_match = all_radii_match && radii_match;
}
}
if (!all_radii_match)
{
APP_ABORT("JeeIOrbitalSoA::check_radii J3 eeI are inconsistent for some ion species\n see preceding messages "
"for details");
}
}
//evaluate the distance table with els
void resetTargetParticleSet(ParticleSet& P) {}
/** check in an optimizable parameter
* @param o a super set of optimizable variables
*/
void checkInVariables(opt_variables_type& active)
{
myVars.clear();
typename std::map<std::string, FT*>::iterator it(J3Unique.begin()), it_end(J3Unique.end());
while (it != it_end)
{
(*it).second->checkInVariables(active);
(*it).second->checkInVariables(myVars);
++it;
}
}
/** check out optimizable variables
*/
void checkOutVariables(const opt_variables_type& active)
{
myVars.clear();
typename std::map<std::string, FT*>::iterator it(J3Unique.begin()), it_end(J3Unique.end());
while (it != it_end)
{
(*it).second->myVars.getIndex(active);
myVars.insertFrom((*it).second->myVars);
++it;
}
myVars.getIndex(active);
NumVars = myVars.size();
if (NumVars)
{
dLogPsi.resize(NumVars);
gradLogPsi.resize(NumVars, Nelec);
lapLogPsi.resize(NumVars, Nelec);
VarOffset.resize(iGroups, eGroups, eGroups);
int varoffset = myVars.Index[0];
for (int ig = 0; ig < iGroups; ig++)
for (int jg = 0; jg < eGroups; jg++)
for (int kg = 0; kg < eGroups; kg++)
{
FT* func_ijk = F(ig, jg, kg);
if (func_ijk == nullptr)
continue;
VarOffset(ig, jg, kg).first = func_ijk->myVars.Index.front() - varoffset;
VarOffset(ig, jg, kg).second = func_ijk->myVars.Index.size() + VarOffset(ig, jg, kg).first;
}
}
}
///reset the value of all the unique Two-Body Jastrow functions
void resetParameters(const opt_variables_type& active)
{
if (!Optimizable)
return;
typename std::map<std::string, FT*>::iterator it(J3Unique.begin()), it_end(J3Unique.end());
while (it != it_end)
{
(*it++).second->resetParameters(active);
}
for (int i = 0; i < myVars.size(); ++i)
{
int ii = myVars.Index[i];
if (ii >= 0)
myVars[i] = active[ii];
}
}
/** print the state, e.g., optimizables */
void reportStatus(std::ostream& os)
{
typename std::map<std::string, FT*>::iterator it(J3Unique.begin()), it_end(J3Unique.end());
while (it != it_end)
{
(*it).second->myVars.print(os);
++it;
}
}
void build_compact_list(ParticleSet& P)
{
const DistanceTableData& eI_table = P.getDistTable(ei_Table_ID_);
for (int iat = 0; iat < Nion; ++iat)
for (int jg = 0; jg < eGroups; ++jg)
{
elecs_inside(jg, iat).clear();
elecs_inside_dist(jg, iat).clear();
elecs_inside_displ(jg, iat).clear();
}
for (int jg = 0; jg < eGroups; ++jg)
for (int jel = P.first(jg); jel < P.last(jg); jel++)
for (int iat = 0; iat < Nion; ++iat)
if (eI_table.Distances[jel][iat] < Ion_cutoff[iat])
{
elecs_inside(jg, iat).push_back(jel);
elecs_inside_dist(jg, iat).push_back(eI_table.Distances[jel][iat]);
elecs_inside_displ(jg, iat).push_back(eI_table.Displacements[jel][iat]);
}
}
LogValueType evaluateLog(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L)
{
evaluateGL(P, G, L, true);
return LogValue;
}
ValueType ratio(ParticleSet& P, int iat)
{
UpdateMode = ORB_PBYP_RATIO;
const DistanceTableData& eI_table = P.getDistTable(ei_Table_ID_);
const DistanceTableData& ee_table = P.getDistTable(ee_Table_ID_);
cur_Uat = computeU(P, iat, P.GroupID[iat], eI_table.Temp_r.data(), ee_table.Temp_r.data(), ions_nearby_new);
DiffVal = Uat[iat] - cur_Uat;
return std::exp(DiffVal);
}
void evaluateRatios(VirtualParticleSet& VP, std::vector<ValueType>& ratios)
{
for (int k = 0; k < ratios.size(); ++k)
ratios[k] = std::exp(Uat[VP.refPtcl] -
computeU(VP.refPS,
VP.refPtcl,
VP.refPS.GroupID[VP.refPtcl],
VP.getDistTable(ei_Table_ID_).Distances[k],
VP.getDistTable(ee_Table_ID_).Distances[k],
ions_nearby_old));
}
void evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios)
{
const DistanceTableData& eI_table = P.getDistTable(ei_Table_ID_);
const DistanceTableData& ee_table = P.getDistTable(ee_Table_ID_);
for (int jg = 0; jg < eGroups; ++jg)
{
const valT sumU = computeU(P, -1, jg, eI_table.Temp_r.data(), ee_table.Temp_r.data(), ions_nearby_new);
for (int j = P.first(jg); j < P.last(jg); ++j)
{
// remove self-interaction
valT Uself(0);
for (int iat = 0; iat < Nion; ++iat)
{
const valT& r_Ij = eI_table.Temp_r[iat];
const valT& r_Ik = eI_table.Distances[j][iat];
if (r_Ij < Ion_cutoff[iat] && r_Ik < Ion_cutoff[iat])
{
const int ig = Ions.GroupID[iat];
Uself += F(ig, jg, jg)->evaluate(ee_table.Temp_r[j], r_Ij, r_Ik);
}
}
ratios[j] = std::exp(Uat[j] + Uself - sumU);
}
}
}
GradType evalGrad(ParticleSet& P, int iat) { return GradType(dUat[iat]); }
ValueType ratioGrad(ParticleSet& P, int iat, GradType& grad_iat)
{
UpdateMode = ORB_PBYP_PARTIAL;
const DistanceTableData& eI_table = P.getDistTable(ei_Table_ID_);
const DistanceTableData& ee_table = P.getDistTable(ee_Table_ID_);
computeU3(P,
iat,
eI_table.Temp_r.data(),
eI_table.Temp_dr,
ee_table.Temp_r.data(),
ee_table.Temp_dr,
cur_Uat,
cur_dUat,
cur_d2Uat,
newUk,
newdUk,
newd2Uk,
ions_nearby_new);
DiffVal = Uat[iat] - cur_Uat;
grad_iat += cur_dUat;
return std::exp(DiffVal);
}
inline void restore(int iat) {}
void acceptMove(ParticleSet& P, int iat)
{
const DistanceTableData& eI_table = P.getDistTable(ei_Table_ID_);
const DistanceTableData& ee_table = P.getDistTable(ee_Table_ID_);
// get the old value, grad, lapl
computeU3(P,
iat,
eI_table.Distances[iat],
eI_table.Displacements[iat],
ee_table.Distances[iat],
ee_table.Displacements[iat],
Uat[iat],
dUat_temp,
d2Uat[iat],
oldUk,
olddUk,
oldd2Uk,
ions_nearby_old);
if (UpdateMode == ORB_PBYP_RATIO)
{ //ratio-only during the move; need to compute derivatives
computeU3(P,
iat,
eI_table.Temp_r.data(),
eI_table.Temp_dr,
ee_table.Temp_r.data(),
ee_table.Temp_dr,
cur_Uat,
cur_dUat,
cur_d2Uat,
newUk,
newdUk,
newd2Uk,
ions_nearby_new);
}
#pragma omp simd
for (int jel = 0; jel < Nelec; jel++)
{
Uat[jel] += newUk[jel] - oldUk[jel];
d2Uat[jel] += newd2Uk[jel] - oldd2Uk[jel];
}
for (int idim = 0; idim < OHMMS_DIM; ++idim)
{
valT* restrict save_g = dUat.data(idim);
const valT* restrict new_g = newdUk.data(idim);
const valT* restrict old_g = olddUk.data(idim);
#pragma omp simd aligned(save_g, new_g, old_g)
for (int jel = 0; jel < Nelec; jel++)
save_g[jel] += new_g[jel] - old_g[jel];
}
LogValue += Uat[iat] - cur_Uat;
Uat[iat] = cur_Uat;
dUat(iat) = cur_dUat;
d2Uat[iat] = cur_d2Uat;
const int ig = P.GroupID[iat];
// update compact list elecs_inside
// if the old position exists in elecs_inside
for (int iind = 0; iind < ions_nearby_old.size(); iind++)
{
int jat = ions_nearby_old[iind];
auto iter = std::find(elecs_inside(ig, jat).begin(), elecs_inside(ig, jat).end(), iat);
auto iter_dist = elecs_inside_dist(ig, jat).begin() + std::distance(elecs_inside(ig, jat).begin(), iter);
auto iter_displ = elecs_inside_displ(ig, jat).begin() + std::distance(elecs_inside(ig, jat).begin(), iter);
if (eI_table.Temp_r[jat] < Ion_cutoff[jat]) // the new position is still inside
{
*iter_dist = eI_table.Temp_r[jat];
*iter_displ = eI_table.Temp_dr[jat];
*std::find(ions_nearby_new.begin(), ions_nearby_new.end(), jat) = -1;
}
else
{
*iter = elecs_inside(ig, jat).back();
elecs_inside(ig, jat).pop_back();
*iter_dist = elecs_inside_dist(ig, jat).back();
elecs_inside_dist(ig, jat).pop_back();
*iter_displ = elecs_inside_displ(ig, jat).back();
elecs_inside_displ(ig, jat).pop_back();
}
}
// if the old position doesn't exist in elecs_inside but the new position do
for (int iind = 0; iind < ions_nearby_new.size(); iind++)
{
int jat = ions_nearby_new[iind];
if (jat >= 0)
{
elecs_inside(ig, jat).push_back(iat);
elecs_inside_dist(ig, jat).push_back(eI_table.Temp_r[jat]);
elecs_inside_displ(ig, jat).push_back(eI_table.Temp_dr[jat]);
}
}
}
inline void recompute(ParticleSet& P)
{
const DistanceTableData& eI_table = P.getDistTable(ei_Table_ID_);
const DistanceTableData& ee_table = P.getDistTable(ee_Table_ID_);
build_compact_list(P);
for (int jel = 0; jel < Nelec; ++jel)
{
computeU3(P,
jel,
eI_table.Distances[jel],
eI_table.Displacements[jel],
ee_table.Distances[jel],
ee_table.Displacements[jel],
Uat[jel],
dUat_temp,
d2Uat[jel],
newUk,
newdUk,
newd2Uk,
ions_nearby_new,
true);
dUat(jel) = dUat_temp;
// add the contribution from the upper triangle
#pragma omp simd
for (int kel = 0; kel < jel; kel++)
{
Uat[kel] += newUk[kel];
d2Uat[kel] += newd2Uk[kel];
}
for (int idim = 0; idim < OHMMS_DIM; ++idim)
{
valT* restrict save_g = dUat.data(idim);
const valT* restrict new_g = newdUk.data(idim);
#pragma omp simd aligned(save_g, new_g)
for (int kel = 0; kel < jel; kel++)
save_g[kel] += new_g[kel];
}
}
}
inline valT computeU(const ParticleSet& P,
int jel,
int jg,
const RealType* distjI,
const RealType* distjk,
std::vector<int>& ions_nearby)
{
ions_nearby.clear();
for (int iat = 0; iat < Nion; ++iat)
if (distjI[iat] < Ion_cutoff[iat])
ions_nearby.push_back(iat);
valT Uj = valT(0);
for (int kg = 0; kg < eGroups; ++kg)
{
int kel_counter = 0;
for (int iind = 0; iind < ions_nearby.size(); ++iind)
{
const int iat = ions_nearby[iind];
const int ig = Ions.GroupID[iat];
const valT r_jI = distjI[iat];
for (int kind = 0; kind < elecs_inside(kg, iat).size(); kind++)
{
const int kel = elecs_inside(kg, iat)[kind];
if (kel != jel)
{
DistkI_Compressed[kel_counter] = elecs_inside_dist(kg, iat)[kind];
Distjk_Compressed[kel_counter] = distjk[kel];
DistjI_Compressed[kel_counter] = r_jI;
kel_counter++;
if (kel_counter == Nbuffer)
{
const FT& feeI(*F(ig, jg, kg));
Uj += feeI.evaluateV(kel_counter,
Distjk_Compressed.data(),
DistjI_Compressed.data(),
DistkI_Compressed.data());
kel_counter = 0;
}
}
}
if ((iind + 1 == ions_nearby.size() || ig != Ions.GroupID[ions_nearby[iind + 1]]) && kel_counter > 0)
{
const FT& feeI(*F(ig, jg, kg));
Uj +=
feeI.evaluateV(kel_counter, Distjk_Compressed.data(), DistjI_Compressed.data(), DistkI_Compressed.data());
kel_counter = 0;
}
}
}
return Uj;
}
inline void computeU3_engine(const ParticleSet& P,
const FT& feeI,
int kel_counter,
valT& Uj,
posT& dUj,
valT& d2Uj,
Vector<valT>& Uk,
gContainer_type& dUk,
Vector<valT>& d2Uk)
{
constexpr valT czero(0);
constexpr valT cone(1);
constexpr valT ctwo(2);
constexpr valT lapfac = OHMMS_DIM - cone;
valT* restrict val = mVGL.data(0);
valT* restrict gradF0 = mVGL.data(1);
valT* restrict gradF1 = mVGL.data(2);
valT* restrict gradF2 = mVGL.data(3);
valT* restrict hessF00 = mVGL.data(4);
valT* restrict hessF11 = mVGL.data(5);
valT* restrict hessF22 = mVGL.data(6);
valT* restrict hessF01 = mVGL.data(7);
valT* restrict hessF02 = mVGL.data(8);
feeI.evaluateVGL(kel_counter,
Distjk_Compressed.data(),
DistjI_Compressed.data(),
DistkI_Compressed.data(),
val,
gradF0,
gradF1,
gradF2,
hessF00,
hessF11,
hessF22,
hessF01,
hessF02);
// compute the contribution to jel, kel
Uj = simd::accumulate_n(val, kel_counter, Uj);
valT gradF0_sum = simd::accumulate_n(gradF0, kel_counter, czero);
valT gradF1_sum = simd::accumulate_n(gradF1, kel_counter, czero);
valT hessF00_sum = simd::accumulate_n(hessF00, kel_counter, czero);
valT hessF11_sum = simd::accumulate_n(hessF11, kel_counter, czero);
d2Uj -= hessF00_sum + hessF11_sum + lapfac * (gradF0_sum + gradF1_sum);
std::fill_n(hessF11, kel_counter, czero);
for (int idim = 0; idim < OHMMS_DIM; ++idim)
{
valT* restrict jk = Disp_jk_Compressed.data(idim);
valT* restrict jI = Disp_jI_Compressed.data(idim);
valT* restrict kI = Disp_kI_Compressed.data(idim);
valT dUj_x(0);
#pragma omp simd aligned(gradF0, gradF1, gradF2, hessF11, jk, jI, kI) reduction(+ : dUj_x)
for (int kel_index = 0; kel_index < kel_counter; kel_index++)
{
// recycle hessF11
hessF11[kel_index] += kI[kel_index] * jk[kel_index];
dUj_x += gradF1[kel_index] * jI[kel_index];
// destroy jk, kI
const valT temp = jk[kel_index] * gradF0[kel_index];
dUj_x += temp;
jk[kel_index] *= jI[kel_index];
kI[kel_index] = kI[kel_index] * gradF2[kel_index] - temp;
}
dUj[idim] += dUj_x;
valT* restrict jk0 = Disp_jk_Compressed.data(0);
if (idim > 0)
{
#pragma omp simd aligned(jk, jk0)
for (int kel_index = 0; kel_index < kel_counter; kel_index++)
jk0[kel_index] += jk[kel_index];
}
valT* restrict dUk_x = dUk.data(idim);
for (int kel_index = 0; kel_index < kel_counter; kel_index++)
dUk_x[DistIndice_k[kel_index]] += kI[kel_index];
}
valT sum(0);
valT* restrict jk0 = Disp_jk_Compressed.data(0);
#pragma omp simd aligned(jk0, hessF01) reduction(+ : sum)
for (int kel_index = 0; kel_index < kel_counter; kel_index++)
sum += hessF01[kel_index] * jk0[kel_index];
d2Uj -= ctwo * sum;
#pragma omp simd aligned(hessF00, hessF22, gradF0, gradF2, hessF02, hessF11)
for (int kel_index = 0; kel_index < kel_counter; kel_index++)
hessF00[kel_index] = hessF00[kel_index] + hessF22[kel_index] + lapfac * (gradF0[kel_index] + gradF2[kel_index]) -
ctwo * hessF02[kel_index] * hessF11[kel_index];
for (int kel_index = 0; kel_index < kel_counter; kel_index++)
{
const int kel = DistIndice_k[kel_index];
Uk[kel] += val[kel_index];
d2Uk[kel] -= hessF00[kel_index];
}
}
inline void computeU3(const ParticleSet& P,
int jel,
const RealType* distjI,
const RowContainer& displjI,
const RealType* distjk,
const RowContainer& displjk,
valT& Uj,
posT& dUj,
valT& d2Uj,
Vector<valT>& Uk,
gContainer_type& dUk,
Vector<valT>& d2Uk,
std::vector<int>& ions_nearby,
bool triangle = false)
{
constexpr valT czero(0);
Uj = czero;
dUj = posT();
d2Uj = czero;
const int jg = P.GroupID[jel];
const int kelmax = triangle ? jel : Nelec;
std::fill_n(Uk.data(), kelmax, czero);
std::fill_n(d2Uk.data(), kelmax, czero);
for (int idim = 0; idim < OHMMS_DIM; ++idim)
std::fill_n(dUk.data(idim), kelmax, czero);
ions_nearby.clear();
for (int iat = 0; iat < Nion; ++iat)
if (distjI[iat] < Ion_cutoff[iat])
ions_nearby.push_back(iat);
for (int kg = 0; kg < eGroups; ++kg)
{
int kel_counter = 0;
for (int iind = 0; iind < ions_nearby.size(); ++iind)
{
const int iat = ions_nearby[iind];
const int ig = Ions.GroupID[iat];
const valT r_jI = distjI[iat];
const posT disp_Ij = displjI[iat];
for (int kind = 0; kind < elecs_inside(kg, iat).size(); kind++)
{
const int kel = elecs_inside(kg, iat)[kind];
if (kel < kelmax && kel != jel)
{
DistkI_Compressed[kel_counter] = elecs_inside_dist(kg, iat)[kind];
DistjI_Compressed[kel_counter] = r_jI;
Distjk_Compressed[kel_counter] = distjk[kel];
Disp_kI_Compressed(kel_counter) = elecs_inside_displ(kg, iat)[kind];
Disp_jI_Compressed(kel_counter) = disp_Ij;
Disp_jk_Compressed(kel_counter) = displjk[kel];
DistIndice_k[kel_counter] = kel;
kel_counter++;
if (kel_counter == Nbuffer)
{
const FT& feeI(*F(ig, jg, kg));
computeU3_engine(P, feeI, kel_counter, Uj, dUj, d2Uj, Uk, dUk, d2Uk);
kel_counter = 0;
}
}
}
if ((iind + 1 == ions_nearby.size() || ig != Ions.GroupID[ions_nearby[iind + 1]]) && kel_counter > 0)
{
const FT& feeI(*F(ig, jg, kg));
computeU3_engine(P, feeI, kel_counter, Uj, dUj, d2Uj, Uk, dUk, d2Uk);
kel_counter = 0;
}
}
}
}
inline void registerData(ParticleSet& P, WFBufferType& buf)
{
if (Bytes_in_WFBuffer == 0)
{
Bytes_in_WFBuffer = buf.current();
buf.add(Uat.begin(), Uat.end());
buf.add(dUat.data(), dUat.end());
buf.add(d2Uat.begin(), d2Uat.end());
Bytes_in_WFBuffer = buf.current() - Bytes_in_WFBuffer;
// free local space
Uat.free();
dUat.free();
d2Uat.free();
}
else
{
buf.forward(Bytes_in_WFBuffer);
}
}
inline LogValueType updateBuffer(ParticleSet& P, WFBufferType& buf, bool fromscratch = false)
{
evaluateGL(P, P.G, P.L, false);
buf.forward(Bytes_in_WFBuffer);
return LogValue;
}
inline void copyFromBuffer(ParticleSet& P, WFBufferType& buf)
{
Uat.attachReference(buf.lendReference<valT>(Nelec), Nelec);
dUat.attachReference(Nelec, Nelec_padded, buf.lendReference<valT>(Nelec_padded * OHMMS_DIM));
d2Uat.attachReference(buf.lendReference<valT>(Nelec), Nelec);
build_compact_list(P);
}
void evaluateGL(ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L,
bool fromscratch = false)
{
if (fromscratch)
recompute(P);
LogValue = valT(0);
for (int iat = 0; iat < Nelec; ++iat)
{
LogValue += Uat[iat];
G[iat] += dUat[iat];
L[iat] += d2Uat[iat];
}
LogValue = - LogValue * 0.5;
}
void evaluateDerivatives(ParticleSet& P,
const opt_variables_type& optvars,
std::vector<ValueType>& dlogpsi,
std::vector<ValueType>& dhpsioverpsi)
{
bool recalculate(false);
std::vector<bool> rcsingles(myVars.size(), false);
for (int k = 0; k < myVars.size(); ++k)
{
int kk = myVars.where(k);
if (kk < 0)
continue;
if (optvars.recompute(kk))
recalculate = true;
rcsingles[k] = true;
}
if (recalculate)
{
constexpr valT czero(0);
constexpr valT cone(1);
constexpr valT cminus(-1);
constexpr valT ctwo(2);
constexpr valT lapfac = OHMMS_DIM - cone;
const DistanceTableData& ee_table = P.getDistTable(ee_Table_ID_);
build_compact_list(P);
dLogPsi = czero;
gradLogPsi = PosType();
lapLogPsi = czero;
for (int iat = 0; iat < Nion; ++iat)
{
const int ig = Ions.GroupID[iat];
for (int jg = 0; jg < eGroups; ++jg)
for (int jind = 0; jind < elecs_inside(jg, iat).size(); jind++)
{
const int jel = elecs_inside(jg, iat)[jind];
const valT r_Ij = elecs_inside_dist(jg, iat)[jind];
const posT disp_Ij = cminus * elecs_inside_displ(jg, iat)[jind];
const valT r_Ij_inv = cone / r_Ij;
for (int kg = 0; kg < eGroups; ++kg)
for (int kind = 0; kind < elecs_inside(kg, iat).size(); kind++)
{
const int kel = elecs_inside(kg, iat)[kind];
if (kel < jel)
{
const valT r_Ik = elecs_inside_dist(kg, iat)[kind];
const posT disp_Ik = cminus * elecs_inside_displ(kg, iat)[kind];
const valT r_Ik_inv = cone / r_Ik;
const valT r_jk = ee_table.Distances[jel][kel];
const posT disp_jk = ee_table.Displacements[jel][kel];
const valT r_jk_inv = cone / r_jk;
FT& func = *F(ig, jg, kg);
int idx = J3UniqueIndex[F(ig, jg, kg)];
func.evaluateDerivatives(r_jk, r_Ij, r_Ik, du_dalpha[idx], dgrad_dalpha[idx], dhess_dalpha[idx]);
int first = VarOffset(ig, jg, kg).first;
int last = VarOffset(ig, jg, kg).second;
std::vector<RealType>& dlog = du_dalpha[idx];
std::vector<PosType>& dgrad = dgrad_dalpha[idx];
std::vector<Tensor<RealType, 3>>& dhess = dhess_dalpha[idx];
for (int p = first, ip = 0; p < last; p++, ip++)
{
RealType& dval = dlog[ip];
PosType& dg = dgrad[ip];
Tensor<RealType, 3>& dh = dhess[ip];
dg[0] *= r_jk_inv;
dg[1] *= r_Ij_inv;
dg[2] *= r_Ik_inv;
PosType gr_ee = dg[0] * disp_jk;
gradLogPsi(p, jel) -= dg[1] * disp_Ij - gr_ee;
lapLogPsi(p, jel) -=
(dh(0, 0) + lapfac * dg[0] - ctwo * dh(0, 1) * dot(disp_jk, disp_Ij) * r_jk_inv * r_Ij_inv +
dh(1, 1) + lapfac * dg[1]);
gradLogPsi(p, kel) -= dg[2] * disp_Ik + gr_ee;
lapLogPsi(p, kel) -=
(dh(0, 0) + lapfac * dg[0] + ctwo * dh(0, 2) * dot(disp_jk, disp_Ik) * r_jk_inv * r_Ik_inv +
dh(2, 2) + lapfac * dg[2]);
dLogPsi[p] -= dval;
}
}
}
}
}
for (int k = 0; k < myVars.size(); ++k)
{
int kk = myVars.where(k);
if (kk < 0)
continue;
dlogpsi[kk] = (ValueType)dLogPsi[k];
RealType sum = 0.0;
for (int i = 0; i < Nelec; i++)
{
#if defined(QMC_COMPLEX)
sum -= 0.5 * lapLogPsi(k, i);
for (int jdim = 0; jdim < OHMMS_DIM; ++jdim)
sum -= P.G[i][jdim].real() * gradLogPsi(k, i)[jdim];
#else
sum -= 0.5 * lapLogPsi(k, i) + dot(P.G[i], gradLogPsi(k, i));
#endif
}
dhpsioverpsi[kk] = (ValueType)sum;
}
}
}
};
} // namespace qmcplusplus
#endif
|
ccsd_t.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <complex.h>
#include "config.h"
#include "np_helper/np_helper.h"
#include "vhf/fblas.h"
typedef struct {
void *cache[6];
short a;
short b;
short c;
short _padding;
} CacheJob;
/*
* 4 * w + w.transpose(1,2,0) + w.transpose(2,0,1)
* - 2 * w.transpose(2,1,0) - 2 * w.transpose(0,2,1)
* - 2 * w.transpose(1,0,2)
*/
static void add_and_permute(double *out, double *w, double *v, int n)
{
int nn = n * n;
int nnn = nn * n;
int i, j, k;
for (i = 0; i < nnn; i++) {
v[i] += w[i];
}
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
for (k = 0; k < n; k++) {
out[i*nn+j*n+k] = v[i*nn+j*n+k] * 4
+ v[j*nn+k*n+i]
+ v[k*nn+i*n+j]
- v[k*nn+j*n+i] * 2
- v[i*nn+k*n+j] * 2
- v[j*nn+i*n+k] * 2;
} } }
}
/*
* t2T = t2.transpose(2,3,1,0)
* ov = vv_op[:,nocc:]
* oo = vv_op[:,:nocc]
* w = numpy.einsum('if,fjk->ijk', ov, t2T[c])
* w-= numpy.einsum('ijm,mk->ijk', vooo[a], t2T[c,b])
* v = numpy.einsum('ij,k->ijk', oo, t1T[c]*.5)
* v+= numpy.einsum('ij,k->ijk', t2T[b,a], fov[:,c]*.5)
* v+= w
*/
static void get_wv(double *w, double *v, double *cache,
double *fvohalf, double *vooo,
double *vv_op, double *t1Thalf, double *t2T,
int nocc, int nvir, int a, int b, int c, int *idx)
{
const double D0 = 0;
const double D1 = 1;
const double DN1 =-1;
const char TRANS_N = 'N';
const int nmo = nocc + nvir;
const int noo = nocc * nocc;
const size_t nooo = nocc * noo;
const size_t nvoo = nvir * noo;
int i, j, k, n;
double *pt2T;
dgemm_(&TRANS_N, &TRANS_N, &noo, &nocc, &nvir,
&D1, t2T+c*nvoo, &noo, vv_op+nocc, &nmo,
&D0, cache, &noo);
dgemm_(&TRANS_N, &TRANS_N, &nocc, &noo, &nocc,
&DN1, t2T+c*nvoo+b*noo, &nocc, vooo+a*nooo, &nocc,
&D1, cache, &nocc);
pt2T = t2T + b * nvoo + a * noo;
for (n = 0, i = 0; i < nocc; i++) {
for (j = 0; j < nocc; j++) {
for (k = 0; k < nocc; k++, n++) {
w[idx[n]] += cache[n];
v[idx[n]] +=(vv_op[i*nmo+j] * t1Thalf[c*nocc+k]
+ pt2T[i*nocc+j] * fvohalf[c*nocc+k]);
} } }
}
static void sym_wv(double *w, double *v, double *cache,
double *fvohalf, double *vooo,
double *vv_op, double *t1Thalf, double *t2T,
int nocc, int nvir, int a, int b, int c, int nirrep,
int *o_ir_loc, int *v_ir_loc, int *oo_ir_loc, int *orbsym,
int *idx)
{
const double D0 = 0;
const double D1 = 1;
const char TRANS_N = 'N';
const int nmo = nocc + nvir;
const int noo = nocc * nocc;
const size_t nooo = nocc * noo;
const size_t nvoo = nvir * noo;
int a_irrep = orbsym[nocc+a];
int b_irrep = orbsym[nocc+b];
int c_irrep = orbsym[nocc+c];
int ab_irrep = a_irrep ^ b_irrep;
int bc_irrep = c_irrep ^ b_irrep;
int i, j, k, n;
int fr, f0, f1, df, mr, m0, m1, dm, mk0;
int ir, i0, i1, di, kr, k0, k1, dk, jr;
int ijr, ij0, ij1, dij, jkr, jk0, jk1, djk;
double *pt2T;
/* symmetry adapted
* w = numpy.einsum('if,fjk->ijk', ov, t2T[c]) */
pt2T = t2T + c * nvoo;
for (ir = 0; ir < nirrep; ir++) {
i0 = o_ir_loc[ir];
i1 = o_ir_loc[ir+1];
di = i1 - i0;
if (di > 0) {
fr = ir ^ ab_irrep;
f0 = v_ir_loc[fr];
f1 = v_ir_loc[fr+1];
df = f1 - f0;
if (df > 0) {
jkr = fr ^ c_irrep;
jk0 = oo_ir_loc[jkr];
jk1 = oo_ir_loc[jkr+1];
djk = jk1 - jk0;
if (djk > 0) {
dgemm_(&TRANS_N, &TRANS_N, &djk, &di, &df,
&D1, pt2T+f0*noo+jk0, &noo, vv_op+i0*nmo+nocc+f0, &nmo,
&D0, cache, &djk);
for (n = 0, i = o_ir_loc[ir]; i < o_ir_loc[ir+1]; i++) {
for (jr = 0; jr < nirrep; jr++) {
kr = jkr ^ jr;
for (j = o_ir_loc[jr]; j < o_ir_loc[jr+1]; j++) {
for (k = o_ir_loc[kr]; k < o_ir_loc[kr+1]; k++, n++) {
w[idx[i*noo+j*nocc+k]] += cache[n];
} }
} }
}
}
}
}
/* symmetry adapted
* w-= numpy.einsum('ijm,mk->ijk', eris_vooo[a], t2T[c,b]) */
pt2T = t2T + c * nvoo + b * noo;
vooo += a * nooo;
mk0 = oo_ir_loc[bc_irrep];
for (mr = 0; mr < nirrep; mr++) {
m0 = o_ir_loc[mr];
m1 = o_ir_loc[mr+1];
dm = m1 - m0;
if (dm > 0) {
kr = mr ^ bc_irrep;
k0 = o_ir_loc[kr];
k1 = o_ir_loc[kr+1];
dk = k1 - k0;
if (dk > 0) {
ijr = mr ^ a_irrep;
ij0 = oo_ir_loc[ijr];
ij1 = oo_ir_loc[ijr+1];
dij = ij1 - ij0;
if (dij > 0) {
dgemm_(&TRANS_N, &TRANS_N, &dk, &dij, &dm,
&D1, pt2T+mk0, &dk, vooo+ij0*nocc+m0, &nocc,
&D0, cache, &dk);
for (n = 0, ir = 0; ir < nirrep; ir++) {
jr = ijr ^ ir;
for (i = o_ir_loc[ir]; i < o_ir_loc[ir+1]; i++) {
for (j = o_ir_loc[jr]; j < o_ir_loc[jr+1]; j++) {
for (k = o_ir_loc[kr]; k < o_ir_loc[kr+1]; k++, n++) {
w[idx[i*noo+j*nocc+k]] -= cache[n];
} }
} }
}
mk0 += dm * dk;
}
}
}
pt2T = t2T + b * nvoo + a * noo;
for (n = 0, i = 0; i < nocc; i++) {
for (j = 0; j < nocc; j++) {
for (k = 0; k < nocc; k++, n++) {
v[idx[n]] +=(vv_op[i*nmo+j] * t1Thalf[c*nocc+k]
+ pt2T[i*nocc+j] * fvohalf[c*nocc+k]);
} } }
}
double _ccsd_t_get_energy(double *w, double *v, double *mo_energy, int nocc,
int a, int b, int c, double fac)
{
int i, j, k, n;
double abc = mo_energy[nocc+a] + mo_energy[nocc+b] + mo_energy[nocc+c];
double et = 0;
for (n = 0, i = 0; i < nocc; i++) {
for (j = 0; j < nocc; j++) {
for (k = 0; k < nocc; k++, n++) {
et += fac * w[n] * v[n] / (mo_energy[i] + mo_energy[j] + mo_energy[k] - abc);
} } }
return et;
}
static double contract6(int nocc, int nvir, int a, int b, int c,
double *mo_energy, double *t1T, double *t2T,
int nirrep, int *o_ir_loc, int *v_ir_loc,
int *oo_ir_loc, int *orbsym, double *fvo,
double *vooo, double *cache1, void **cache,
int *permute_idx)
{
int nooo = nocc * nocc * nocc;
int *idx0 = permute_idx;
int *idx1 = idx0 + nooo;
int *idx2 = idx1 + nooo;
int *idx3 = idx2 + nooo;
int *idx4 = idx3 + nooo;
int *idx5 = idx4 + nooo;
double *v0 = cache1;
double *w0 = v0 + nooo;
double *z0 = w0 + nooo;
double *wtmp = z0;
int i;
for (i = 0; i < nooo; i++) {
w0[i] = 0;
v0[i] = 0;
}
if (nirrep == 1) {
get_wv(w0, v0, wtmp, fvo, vooo, cache[0], t1T, t2T, nocc, nvir, a, b, c, idx0);
get_wv(w0, v0, wtmp, fvo, vooo, cache[1], t1T, t2T, nocc, nvir, a, c, b, idx1);
get_wv(w0, v0, wtmp, fvo, vooo, cache[2], t1T, t2T, nocc, nvir, b, a, c, idx2);
get_wv(w0, v0, wtmp, fvo, vooo, cache[3], t1T, t2T, nocc, nvir, b, c, a, idx3);
get_wv(w0, v0, wtmp, fvo, vooo, cache[4], t1T, t2T, nocc, nvir, c, a, b, idx4);
get_wv(w0, v0, wtmp, fvo, vooo, cache[5], t1T, t2T, nocc, nvir, c, b, a, idx5);
} else {
sym_wv(w0, v0, wtmp, fvo, vooo, cache[0], t1T, t2T, nocc, nvir, a, b, c,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx0);
sym_wv(w0, v0, wtmp, fvo, vooo, cache[1], t1T, t2T, nocc, nvir, a, c, b,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx1);
sym_wv(w0, v0, wtmp, fvo, vooo, cache[2], t1T, t2T, nocc, nvir, b, a, c,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx2);
sym_wv(w0, v0, wtmp, fvo, vooo, cache[3], t1T, t2T, nocc, nvir, b, c, a,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx3);
sym_wv(w0, v0, wtmp, fvo, vooo, cache[4], t1T, t2T, nocc, nvir, c, a, b,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx4);
sym_wv(w0, v0, wtmp, fvo, vooo, cache[5], t1T, t2T, nocc, nvir, c, b, a,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx5);
}
add_and_permute(z0, w0, v0, nocc);
double et;
if (a == c) {
et = _ccsd_t_get_energy(w0, z0, mo_energy, nocc, a, b, c, 1./6);
} else if (a == b || b == c) {
et = _ccsd_t_get_energy(w0, z0, mo_energy, nocc, a, b, c, .5);
} else {
et = _ccsd_t_get_energy(w0, z0, mo_energy, nocc, a, b, c, 1.);
}
return et;
}
size_t _ccsd_t_gen_jobs(CacheJob *jobs, int nocc, int nvir,
int a0, int a1, int b0, int b1,
void *cache_row_a, void *cache_col_a,
void *cache_row_b, void *cache_col_b, size_t stride)
{
size_t nov = nocc * (nocc+nvir) * stride;
int da = a1 - a0;
int db = b1 - b0;
size_t m, a, b, c;
if (b1 <= a0) {
m = 0;
for (a = a0; a < a1; a++) {
for (b = b0; b < b1; b++) {
for (c = 0; c < b0; c++, m++) {
jobs[m].a = a;
jobs[m].b = b;
jobs[m].c = c;
jobs[m].cache[0] = cache_row_a + nov*(a1*(a-a0)+b );
jobs[m].cache[1] = cache_row_a + nov*(a1*(a-a0)+c );
jobs[m].cache[2] = cache_col_a + nov*(da*(b) +a-a0);
jobs[m].cache[3] = cache_row_b + nov*(b1*(b-b0)+c );
jobs[m].cache[4] = cache_col_a + nov*(da*(c) +a-a0);
jobs[m].cache[5] = cache_col_b + nov*(db*(c) +b-b0);
}
for (c = b0; c <= b; c++, m++) {
jobs[m].a = a;
jobs[m].b = b;
jobs[m].c = c;
jobs[m].cache[0] = cache_row_a + nov*(a1*(a-a0)+b );
jobs[m].cache[1] = cache_row_a + nov*(a1*(a-a0)+c );
jobs[m].cache[2] = cache_col_a + nov*(da*(b) +a-a0);
jobs[m].cache[3] = cache_row_b + nov*(b1*(b-b0)+c );
jobs[m].cache[4] = cache_col_a + nov*(da*(c) +a-a0);
jobs[m].cache[5] = cache_row_b + nov*(b1*(c-b0)+b );
}
} }
} else {
m = 0;
for (a = a0; a < a1; a++) {
for (b = a0; b <= a; b++) {
for (c = 0; c < a0; c++, m++) {
jobs[m].a = a;
jobs[m].b = b;
jobs[m].c = c;
jobs[m].cache[0] = cache_row_a + nov*(a1*(a-a0)+b);
jobs[m].cache[1] = cache_row_a + nov*(a1*(a-a0)+c);
jobs[m].cache[2] = cache_row_a + nov*(a1*(b-a0)+a);
jobs[m].cache[3] = cache_row_a + nov*(a1*(b-a0)+c);
jobs[m].cache[4] = cache_col_a + nov*(da*(c)+a-a0);
jobs[m].cache[5] = cache_col_a + nov*(da*(c)+b-a0);
}
for (c = a0; c <= b; c++, m++) {
jobs[m].a = a;
jobs[m].b = b;
jobs[m].c = c;
jobs[m].cache[0] = cache_row_a + nov*(a1*(a-a0)+b);
jobs[m].cache[1] = cache_row_a + nov*(a1*(a-a0)+c);
jobs[m].cache[2] = cache_row_a + nov*(a1*(b-a0)+a);
jobs[m].cache[3] = cache_row_a + nov*(a1*(b-a0)+c);
jobs[m].cache[4] = cache_row_a + nov*(a1*(c-a0)+a);
jobs[m].cache[5] = cache_row_a + nov*(a1*(c-a0)+b);
}
} }
}
return m;
}
void _make_permute_indices(int *idx, int n)
{
const int nn = n * n;
const int nnn = nn * n;
int *idx0 = idx;
int *idx1 = idx0 + nnn;
int *idx2 = idx1 + nnn;
int *idx3 = idx2 + nnn;
int *idx4 = idx3 + nnn;
int *idx5 = idx4 + nnn;
int i, j, k, m;
for (m = 0, i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
for (k = 0; k < n; k++, m++) {
idx0[m] = i * nn + j * n + k;
idx1[m] = i * nn + k * n + j;
idx2[m] = j * nn + i * n + k;
idx3[m] = k * nn + i * n + j;
idx4[m] = j * nn + k * n + i;
idx5[m] = k * nn + j * n + i;
} } }
}
void CCsd_t_contract(double *e_tot,
double *mo_energy, double *t1T, double *t2T,
double *vooo, double *fvo,
int nocc, int nvir, int a0, int a1, int b0, int b1,
int nirrep, int *o_ir_loc, int *v_ir_loc,
int *oo_ir_loc, int *orbsym,
void *cache_row_a, void *cache_col_a,
void *cache_row_b, void *cache_col_b)
{
int da = a1 - a0;
int db = b1 - b0;
CacheJob *jobs = malloc(sizeof(CacheJob) * da*db*b1);
size_t njobs = _ccsd_t_gen_jobs(jobs, nocc, nvir, a0, a1, b0, b1,
cache_row_a, cache_col_a,
cache_row_b, cache_col_b, sizeof(double));
int *permute_idx = malloc(sizeof(int) * nocc*nocc*nocc * 6);
_make_permute_indices(permute_idx, nocc);
#pragma omp parallel default(none) \
shared(njobs, nocc, nvir, mo_energy, t1T, t2T, nirrep, o_ir_loc, \
v_ir_loc, oo_ir_loc, orbsym, vooo, fvo, jobs, e_tot, permute_idx)
{
int a, b, c;
size_t k;
double *cache1 = malloc(sizeof(double) * (nocc*nocc*nocc*3+2));
double *t1Thalf = malloc(sizeof(double) * nvir*nocc * 2);
double *fvohalf = t1Thalf + nvir*nocc;
for (k = 0; k < nvir*nocc; k++) {
t1Thalf[k] = t1T[k] * .5;
fvohalf[k] = fvo[k] * .5;
}
double e = 0;
#pragma omp for schedule (dynamic, 4)
for (k = 0; k < njobs; k++) {
a = jobs[k].a;
b = jobs[k].b;
c = jobs[k].c;
e += contract6(nocc, nvir, a, b, c, mo_energy, t1Thalf, t2T,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym,
fvohalf, vooo, cache1, jobs[k].cache, permute_idx);
}
free(t1Thalf);
free(cache1);
#pragma omp critical
*e_tot += e;
}
free(permute_idx);
}
/*
* Complex version of all functions
*/
static void zadd_and_permute(double complex *out, double complex *w,
double complex *v, int n)
{
int nn = n * n;
int nnn = nn * n;
int i, j, k;
for (i = 0; i < nnn; i++) {
v[i] += w[i];
}
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
for (k = 0; k < n; k++) {
out[i*nn+j*n+k] = v[i*nn+j*n+k] * 4
+ v[j*nn+k*n+i]
+ v[k*nn+i*n+j]
- v[k*nn+j*n+i] * 2
- v[i*nn+k*n+j] * 2
- v[j*nn+i*n+k] * 2;
} } }
}
static void zget_wv(double complex *w, double complex *v,
double complex *cache, double complex *fvohalf,
double complex *vooo, double complex *vv_op,
double complex *t1Thalf, double complex *t2T,
int nocc, int nvir, int a, int b, int c, int *idx)
{
const double complex D0 = 0;
const double complex D1 = 1;
const double complex DN1 =-1;
const char TRANS_N = 'N';
const int nmo = nocc + nvir;
const int noo = nocc * nocc;
const size_t nooo = nocc * noo;
const size_t nvoo = nvir * noo;
int i, j, k, n;
double complex *pt2T;
zgemm_(&TRANS_N, &TRANS_N, &noo, &nocc, &nvir,
&D1, t2T+c*nvoo, &noo, vv_op+nocc, &nmo,
&D0, cache, &noo);
zgemm_(&TRANS_N, &TRANS_N, &nocc, &noo, &nocc,
&DN1, t2T+c*nvoo+b*noo, &nocc, vooo+a*nooo, &nocc,
&D1, cache, &nocc);
pt2T = t2T + b * nvoo + a * noo;
for (n = 0, i = 0; i < nocc; i++) {
for (j = 0; j < nocc; j++) {
for (k = 0; k < nocc; k++, n++) {
w[idx[n]] += cache[n];
v[idx[n]] +=(vv_op[i*nmo+j] * t1Thalf[c*nocc+k]
+ pt2T[i*nocc+j] * fvohalf[c*nocc+k]);
} } }
}
double _ccsd_t_zget_energy(double complex *w, double complex *v,
double *mo_energy, int nocc,
int a, int b, int c, double fac)
{
int i, j, k, n;
double abc = mo_energy[nocc+a] + mo_energy[nocc+b] + mo_energy[nocc+c];
double et = 0;
for (n = 0, i = 0; i < nocc; i++) {
for (j = 0; j < nocc; j++) {
for (k = 0; k < nocc; k++, n++) {
et += fac / (mo_energy[i] + mo_energy[j] + mo_energy[k] - abc) * w[n] * conj(v[n]);
} } }
return et;
}
static double complex
zcontract6(int nocc, int nvir, int a, int b, int c,
double *mo_energy, double complex *t1T, double complex *t2T,
int nirrep, int *o_ir_loc, int *v_ir_loc,
int *oo_ir_loc, int *orbsym, double complex *fvo,
double complex *vooo, double complex *cache1, void **cache,
int *permute_idx)
{
int nooo = nocc * nocc * nocc;
int *idx0 = permute_idx;
int *idx1 = idx0 + nooo;
int *idx2 = idx1 + nooo;
int *idx3 = idx2 + nooo;
int *idx4 = idx3 + nooo;
int *idx5 = idx4 + nooo;
double complex *v0 = cache1;
double complex *w0 = v0 + nooo;
double complex *z0 = w0 + nooo;
double complex *wtmp = z0;
int i;
for (i = 0; i < nooo; i++) {
w0[i] = 0;
v0[i] = 0;
}
zget_wv(w0, v0, wtmp, fvo, vooo, cache[0], t1T, t2T, nocc, nvir, a, b, c, idx0);
zget_wv(w0, v0, wtmp, fvo, vooo, cache[1], t1T, t2T, nocc, nvir, a, c, b, idx1);
zget_wv(w0, v0, wtmp, fvo, vooo, cache[2], t1T, t2T, nocc, nvir, b, a, c, idx2);
zget_wv(w0, v0, wtmp, fvo, vooo, cache[3], t1T, t2T, nocc, nvir, b, c, a, idx3);
zget_wv(w0, v0, wtmp, fvo, vooo, cache[4], t1T, t2T, nocc, nvir, c, a, b, idx4);
zget_wv(w0, v0, wtmp, fvo, vooo, cache[5], t1T, t2T, nocc, nvir, c, b, a, idx5);
zadd_and_permute(z0, w0, v0, nocc);
double complex et;
if (a == c) {
et = _ccsd_t_zget_energy(w0, z0, mo_energy, nocc, a, b, c, 1./6);
} else if (a == b || b == c) {
et = _ccsd_t_zget_energy(w0, z0, mo_energy, nocc, a, b, c, .5);
} else {
et = _ccsd_t_zget_energy(w0, z0, mo_energy, nocc, a, b, c, 1.);
}
return et;
}
void CCsd_t_zcontract(double complex *e_tot,
double *mo_energy, double complex *t1T, double complex *t2T,
double complex *vooo, double complex *fvo,
int nocc, int nvir, int a0, int a1, int b0, int b1,
int nirrep, int *o_ir_loc, int *v_ir_loc,
int *oo_ir_loc, int *orbsym,
void *cache_row_a, void *cache_col_a,
void *cache_row_b, void *cache_col_b)
{
int da = a1 - a0;
int db = b1 - b0;
CacheJob *jobs = malloc(sizeof(CacheJob) * da*db*b1);
size_t njobs = _ccsd_t_gen_jobs(jobs, nocc, nvir, a0, a1, b0, b1,
cache_row_a, cache_col_a,
cache_row_b, cache_col_b,
sizeof(double complex));
int *permute_idx = malloc(sizeof(int) * nocc*nocc*nocc * 6);
_make_permute_indices(permute_idx, nocc);
#pragma omp parallel default(none) \
shared(njobs, nocc, nvir, mo_energy, t1T, t2T, nirrep, o_ir_loc, \
v_ir_loc, oo_ir_loc, orbsym, vooo, fvo, jobs, e_tot, permute_idx)
{
int a, b, c;
size_t k;
double complex *cache1 = malloc(sizeof(double complex) * (nocc*nocc*nocc*3+2));
double complex *t1Thalf = malloc(sizeof(double complex) * nvir*nocc * 2);
double complex *fvohalf = t1Thalf + nvir*nocc;
for (k = 0; k < nvir*nocc; k++) {
t1Thalf[k] = t1T[k] * .5;
fvohalf[k] = fvo[k] * .5;
}
double complex e = 0;
#pragma omp for schedule (dynamic, 4)
for (k = 0; k < njobs; k++) {
a = jobs[k].a;
b = jobs[k].b;
c = jobs[k].c;
e += zcontract6(nocc, nvir, a, b, c, mo_energy, t1Thalf, t2T,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym,
fvohalf, vooo, cache1, jobs[k].cache, permute_idx);
}
free(t1Thalf);
free(cache1);
#pragma omp critical
*e_tot += e;
}
free(permute_idx);
}
/*****************************************************************************
*
* mpi4pyscf
*
*****************************************************************************/
static void MPICCget_wv(double *w, double *v, double *cache,
double *fvohalf, double *vooo,
double *vv_op, double *t1Thalf,
double *t2T_a, double *t2T_c,
int nocc, int nvir, int a, int b, int c,
int a0, int b0, int c0, int *idx)
{
const double D0 = 0;
const double D1 = 1;
const double DN1 = -1;
const char TRANS_N = 'N';
const int nmo = nocc + nvir;
const int noo = nocc * nocc;
const size_t nooo = nocc * noo;
const size_t nvoo = nvir * noo;
int i, j, k, n;
double *pt2T;
dgemm_(&TRANS_N, &TRANS_N, &noo, &nocc, &nvir,
&D1, t2T_c+(c-c0)*nvoo, &noo, vv_op+nocc, &nmo,
&D0, cache, &noo);
dgemm_(&TRANS_N, &TRANS_N, &nocc, &noo, &nocc,
&DN1, t2T_c+(c-c0)*nvoo+b*noo, &nocc, vooo+(a-a0)*nooo, &nocc,
&D1, cache, &nocc);
pt2T = t2T_a + (a-a0) * nvoo + b * noo;
for (n = 0, i = 0; i < nocc; i++) {
for (j = 0; j < nocc; j++) {
for (k = 0; k < nocc; k++, n++) {
w[idx[n]] += cache[n];
v[idx[n]] +=(vv_op[i*nmo+j] * t1Thalf[c*nocc+k]
+ pt2T[i*nocc+j] * fvohalf[c*nocc+k]);
} } }
}
static double MPICCcontract6(int nocc, int nvir, int a, int b, int c,
double *mo_energy, double *t1T, double *fvo,
int *slices, double **data_ptrs, double *cache1,
int *permute_idx)
{
const int a0 = slices[0];
const int a1 = slices[1];
const int b0 = slices[2];
const int b1 = slices[3];
const int c0 = slices[4];
const int c1 = slices[5];
const int da = a1 - a0;
const int db = b1 - b0;
const int dc = c1 - c0;
const int nooo = nocc * nocc * nocc;
const int nmo = nocc + nvir;
const size_t nop = nocc * nmo;
int *idx0 = permute_idx;
int *idx1 = idx0 + nooo;
int *idx2 = idx1 + nooo;
int *idx3 = idx2 + nooo;
int *idx4 = idx3 + nooo;
int *idx5 = idx4 + nooo;
double *vvop_ab = data_ptrs[0] + ((a-a0)*db+b-b0) * nop;
double *vvop_ac = data_ptrs[1] + ((a-a0)*dc+c-c0) * nop;
double *vvop_ba = data_ptrs[2] + ((b-b0)*da+a-a0) * nop;
double *vvop_bc = data_ptrs[3] + ((b-b0)*dc+c-c0) * nop;
double *vvop_ca = data_ptrs[4] + ((c-c0)*da+a-a0) * nop;
double *vvop_cb = data_ptrs[5] + ((c-c0)*db+b-b0) * nop;
double *vooo_a = data_ptrs[6];
double *vooo_b = data_ptrs[7];
double *vooo_c = data_ptrs[8];
double *t2T_a = data_ptrs[9 ];
double *t2T_b = data_ptrs[10];
double *t2T_c = data_ptrs[11];
double *v0 = cache1;
double *w0 = v0 + nooo;
double *z0 = w0 + nooo;
double *wtmp = z0;
int i;
for (i = 0; i < nooo; i++) {
w0[i] = 0;
v0[i] = 0;
}
MPICCget_wv(w0, v0, wtmp, fvo, vooo_a, vvop_ab, t1T, t2T_a, t2T_c, nocc, nvir, a, b, c, a0, b0, c0, idx0);
MPICCget_wv(w0, v0, wtmp, fvo, vooo_a, vvop_ac, t1T, t2T_a, t2T_b, nocc, nvir, a, c, b, a0, c0, b0, idx1);
MPICCget_wv(w0, v0, wtmp, fvo, vooo_b, vvop_ba, t1T, t2T_b, t2T_c, nocc, nvir, b, a, c, b0, a0, c0, idx2);
MPICCget_wv(w0, v0, wtmp, fvo, vooo_b, vvop_bc, t1T, t2T_b, t2T_a, nocc, nvir, b, c, a, b0, c0, a0, idx3);
MPICCget_wv(w0, v0, wtmp, fvo, vooo_c, vvop_ca, t1T, t2T_c, t2T_b, nocc, nvir, c, a, b, c0, a0, b0, idx4);
MPICCget_wv(w0, v0, wtmp, fvo, vooo_c, vvop_cb, t1T, t2T_c, t2T_a, nocc, nvir, c, b, a, c0, b0, a0, idx5);
add_and_permute(z0, w0, v0, nocc);
double et;
if (a == c) {
et = _ccsd_t_get_energy(w0, z0, mo_energy, nocc, a, b, c, 1./6);
} else if (a == b || b == c) {
et = _ccsd_t_get_energy(w0, z0, mo_energy, nocc, a, b, c, .5);
} else {
et = _ccsd_t_get_energy(w0, z0, mo_energy, nocc, a, b, c, 1.);
}
return et;
}
size_t _MPICCsd_t_gen_jobs(CacheJob *jobs, int nocc, int nvir,
int *slices, double **data_ptrs)
{
const int a0 = slices[0];
const int a1 = slices[1];
const int b0 = slices[2];
const int b1 = slices[3];
const int c0 = slices[4];
const int c1 = slices[5];
size_t m, a, b, c;
m = 0;
for (a = a0; a < a1; a++) {
for (b = b0; b < MIN(b1, a+1); b++) {
for (c = c0; c < MIN(c1, b+1); c++, m++) {
jobs[m].a = a;
jobs[m].b = b;
jobs[m].c = c;
} } }
return m;
}
void MPICCsd_t_contract(double *e_tot, double *mo_energy, double *t1T,
double *fvo, int nocc, int nvir,
int *slices, double **data_ptrs)
{
const int a0 = slices[0];
const int a1 = slices[1];
const int b0 = slices[2];
const int b1 = slices[3];
const int c0 = slices[4];
const int c1 = slices[5];
int da = a1 - a0;
int db = b1 - b0;
int dc = c1 - c0;
CacheJob *jobs = malloc(sizeof(CacheJob) * da*db*dc);
size_t njobs = _MPICCsd_t_gen_jobs(jobs, nocc, nvir, slices, data_ptrs);
int *permute_idx = malloc(sizeof(int) * nocc*nocc*nocc * 6);
_make_permute_indices(permute_idx, nocc);
#pragma omp parallel default(none) \
shared(njobs, nocc, nvir, mo_energy, t1T, fvo, jobs, e_tot, slices, \
data_ptrs, permute_idx)
{
int a, b, c;
size_t k;
double *cache1 = malloc(sizeof(double) * (nocc*nocc*nocc*3+2));
double *t1Thalf = malloc(sizeof(double) * nvir*nocc * 2);
double *fvohalf = t1Thalf + nvir*nocc;
for (k = 0; k < nvir*nocc; k++) {
t1Thalf[k] = t1T[k] * .5;
fvohalf[k] = fvo[k] * .5;
}
double e = 0;
#pragma omp for schedule (dynamic, 4)
for (k = 0; k < njobs; k++) {
a = jobs[k].a;
b = jobs[k].b;
c = jobs[k].c;
e += MPICCcontract6(nocc, nvir, a, b, c, mo_energy, t1Thalf,
fvohalf, slices, data_ptrs, cache1,
permute_idx);
}
free(t1Thalf);
free(cache1);
#pragma omp critical
*e_tot += e;
}
free(permute_idx);
}
/*****************************************************************************
*
* pyscf periodic ccsd(t) with k-points
*
*****************************************************************************/
size_t _CCsd_t_gen_jobs_full(CacheJob *jobs, int nocc, int nvir,
int *slices)
{
const int a0 = slices[0];
const int a1 = slices[1];
const int b0 = slices[2];
const int b1 = slices[3];
const int c0 = slices[4];
const int c1 = slices[5];
size_t m, a, b, c;
m = 0;
for (a = a0; a < a1; a++) {
for (b = b0; b < b1; b++) {
for (c = c0; c < c1; c++, m++) {
jobs[m].a = a;
jobs[m].b = b;
jobs[m].c = c;
} } }
return m;
}
static void CCzget_wv(double complex *w, double complex *v, double complex *cache,
double complex *fvohalf, double complex *vooo,
double complex *vv_op, double complex *vv_op2,
double complex *t1Thalf, double complex *t2T_c1,
double complex *t2T_c2, double complex *t2T_c3,
int nocc, int nvir, int a, int b, int c,
int a0, int b0, int c0, int *idx, int bool_add_v)
{
const double complex D0 = 0;
const double complex D1 = 1;
const double complex DN1 = -1;
const char TRANS_N = 'N';
const int nmo = nocc + nvir;
const int noo = nocc * nocc;
const size_t nooo = nocc * noo;
const size_t nvoo = nvir * noo;
int i, j, k, n;
double complex *pt2T;
zgemm_(&TRANS_N, &TRANS_N, &noo, &nocc, &nvir,
&D1, t2T_c1+(c-c0)*nvoo, &noo, vv_op+nocc, &nmo,
&D0, cache, &noo);
zgemm_(&TRANS_N, &TRANS_N, &nocc, &noo, &nocc,
&DN1, t2T_c2+(c-c0)*nvoo+b*noo, &nocc, vooo+(a-a0)*nooo, &nocc,
&D1, cache, &nocc);
pt2T = t2T_c3 + (b-b0)*nvoo + a*noo;
for (n = 0, i = 0; i < nocc; i++) {
for (j = 0; j < nocc; j++) {
for (k = 0; k < nocc; k++, n++) {
w[idx[n]] += cache[n];
if(bool_add_v == 1){
v[idx[n]] += (vv_op2[j*nmo+i] * t1Thalf[c*nocc+k]
+ pt2T[i*nocc+j] * fvohalf[c*nocc+k]);
}
} } }
}
static void zcontract6_t3T(int nocc, int nvir, int a, int b, int c,
int *mo_offset, double complex *t3Tw,
double complex *t3Tv, double *mo_energy,
double complex *t1T, double complex *fvo, int *slices,
double complex **data_ptrs, double complex *cache1,
int *permute_idx)
{
const int a0 = slices[0];
const int a1 = slices[1];
const int b0 = slices[2];
const int b1 = slices[3];
const int c0 = slices[4];
const int c1 = slices[5];
const int da = a1 - a0;
const int db = b1 - b0;
const int dc = c1 - c0;
const int nooo = nocc * nocc * nocc;
const int nmo = nocc + nvir;
const int nop = nocc * nmo;
const int nov = nocc * nvir;
int *idx0 = permute_idx;
int *idx1 = idx0 + nooo;
int *idx2 = idx1 + nooo;
int *idx3 = idx2 + nooo;
int *idx4 = idx3 + nooo;
int *idx5 = idx4 + nooo;
int ki = mo_offset[0];
int kj = mo_offset[1];
int kk = mo_offset[2];
int ka = mo_offset[3];
int kb = mo_offset[4];
int kc = mo_offset[5];
double complex *t1T_a = t1T + ka * nov;
double complex *t1T_b = t1T + kb * nov;
double complex *t1T_c = t1T + kc * nov;
double complex *fvo_a = fvo + ka * nov;
double complex *fvo_b = fvo + kb * nov;
double complex *fvo_c = fvo + kc * nov;
double complex *vvop_ab = data_ptrs[0] + ((a-a0)*db+b-b0) * nop;
double complex *vvop_ac = data_ptrs[1] + ((a-a0)*dc+c-c0) * nop;
double complex *vvop_ba = data_ptrs[2] + ((b-b0)*da+a-a0) * nop;
double complex *vvop_bc = data_ptrs[3] + ((b-b0)*dc+c-c0) * nop;
double complex *vvop_ca = data_ptrs[4] + ((c-c0)*da+a-a0) * nop;
double complex *vvop_cb = data_ptrs[5] + ((c-c0)*db+b-b0) * nop;
double complex *vooo_aj = data_ptrs[6];
double complex *vooo_ak = data_ptrs[7];
double complex *vooo_bi = data_ptrs[8];
double complex *vooo_bk = data_ptrs[9];
double complex *vooo_ci = data_ptrs[10];
double complex *vooo_cj = data_ptrs[11];
double complex *t2T_cj = data_ptrs[12];
double complex *t2T_cb = data_ptrs[13];
double complex *t2T_bk = data_ptrs[14];
double complex *t2T_bc = data_ptrs[15];
double complex *t2T_ci = data_ptrs[16];
double complex *t2T_ca = data_ptrs[17];
double complex *t2T_ak = data_ptrs[18];
double complex *t2T_ac = data_ptrs[19];
double complex *t2T_bi = data_ptrs[20];
double complex *t2T_ba = data_ptrs[21];
double complex *t2T_aj = data_ptrs[22];
double complex *t2T_ab = data_ptrs[23];
double abc = mo_energy[nocc+a+ka*nmo] + mo_energy[nocc+b+kb*nmo] + mo_energy[nocc+c+kc*nmo];
double div;
double complex *v0 = cache1;
double complex *w0 = v0 + nooo;
double complex *z0 = w0 + nooo;
double complex *wtmp = z0;
int i, j, k, n;
int offset;
for (i = 0; i < nooo; i++) {
w0[i] = 0;
v0[i] = 0;
}
/*
* t2T = t2.transpose(2,3,1,0)
* ov = vv_op[:,nocc:]
* oo = vv_op[:,:nocc]
* w = numpy.einsum('if,fjk->ijk', ov, t2T[c])
* w-= numpy.einsum('ijm,mk->ijk', vooo[a], t2T[c,b])
* v = numpy.einsum('ij,k->ijk', oo, t1T[c]*.5)
* v+= numpy.einsum('ij,k->ijk', t2T[b,a], fov[:,c]*.5)
* v+= w
*/
CCzget_wv(w0, v0, wtmp, fvo_c, vooo_aj, vvop_ab, vvop_ba, t1T_c, t2T_cj, t2T_cb, t2T_ba,
nocc, nvir, a, b, c, a0, b0, c0, idx0, (kk==kc));
CCzget_wv(w0, v0, wtmp, fvo_b, vooo_ak, vvop_ac, vvop_ca, t1T_b, t2T_bk, t2T_bc, t2T_ca,
nocc, nvir, a, c, b, a0, c0, b0, idx1, (kj==kb));
CCzget_wv(w0, v0, wtmp, fvo_c, vooo_bi, vvop_ba, vvop_ab, t1T_c, t2T_ci, t2T_ca, t2T_ab,
nocc, nvir, b, a, c, b0, a0, c0, idx2, (kk==kc));
CCzget_wv(w0, v0, wtmp, fvo_a, vooo_bk, vvop_bc, vvop_cb, t1T_a, t2T_ak, t2T_ac, t2T_cb,
nocc, nvir, b, c, a, b0, c0, a0, idx3, (ka==ki));
CCzget_wv(w0, v0, wtmp, fvo_b, vooo_ci, vvop_ca, vvop_ac, t1T_b, t2T_bi, t2T_ba, t2T_ac,
nocc, nvir, c, a, b, c0, a0, b0, idx4, (kb==kj));
CCzget_wv(w0, v0, wtmp, fvo_a, vooo_cj, vvop_cb, vvop_bc, t1T_a, t2T_aj, t2T_ab, t2T_bc,
nocc, nvir, c, b, a, c0, b0, a0, idx5, (ka==ki));
offset = (((a-a0)*db + b-b0)*dc + c-c0)*nooo;
for (n = 0, i = 0; i < nocc; i++) {
for (j = 0; j < nocc; j++) {
for (k = 0; k < nocc; k++, n++) {
//div = 1. / (mo_energy[i+ki*nmo] + mo_energy[j+kj*nmo] + mo_energy[k+kk*nmo] - abc);
t3Tw[offset + n] = w0[n];
t3Tv[offset + n] = v0[n];
} } }
}
void CCsd_zcontract_t3T(double complex *t3Tw, double complex *t3Tv, double *mo_energy,
double complex *t1T, double complex *fvo, int nocc, int nvir, int nkpts,
int *mo_offset, int *slices, double complex **data_ptrs)
{
const int a0 = slices[0];
const int a1 = slices[1];
const int b0 = slices[2];
const int b1 = slices[3];
const int c0 = slices[4];
const int c1 = slices[5];
int da = a1 - a0;
int db = b1 - b0;
int dc = c1 - c0;
CacheJob *jobs = malloc(sizeof(CacheJob) * da*db*dc);
size_t njobs = _CCsd_t_gen_jobs_full(jobs, nocc, nvir, slices);
int *permute_idx = malloc(sizeof(int) * nocc*nocc*nocc * 6);
_make_permute_indices(permute_idx, nocc);
#pragma omp parallel default(none) \
shared(njobs, nocc, nvir, nkpts, t3Tw, t3Tv, mo_offset, mo_energy, t1T, fvo, jobs, slices, \
data_ptrs, permute_idx)
{
int a, b, c;
size_t k;
complex double *cache1 = malloc(sizeof(double complex) * (nocc*nocc*nocc*3+2));
complex double *t1Thalf = malloc(sizeof(double complex) * nkpts*nvir*nocc*2);
complex double *fvohalf = t1Thalf + nkpts*nvir*nocc;
for (k = 0; k < nkpts*nvir*nocc; k++) {
t1Thalf[k] = t1T[k] * .5;
fvohalf[k] = fvo[k] * .5;
}
#pragma omp for schedule (dynamic, 4)
for (k = 0; k < njobs; k++) {
a = jobs[k].a;
b = jobs[k].b;
c = jobs[k].c;
zcontract6_t3T(nocc, nvir, a, b, c, mo_offset, t3Tw, t3Tv, mo_energy, t1Thalf,
fvohalf, slices, data_ptrs, cache1,
permute_idx);
}
free(t1Thalf);
free(cache1);
}
free(jobs);
free(permute_idx);
}
|
hypergeometric_distribution.c | #include <stdio.h>
#include <stdlib.h>
#include <gmp.h>
#define TOLERANCE 0.000000001
#define MALICIOUS_RATE 0.9
void factorial(mpf_t res, int n)
{
int i;
mpf_t p;
mpf_init_set_ui(p,1);
for (i=1; i <= n ; ++i){
mpf_mul_ui(p,p,i);
}
mpf_set(res, p);
mpf_clear(p);
}
// Combination: factorial(n) / ( factorial(r) * factorial(n-r) )
void combination(mpf_t comb, int n, int r) {
mpf_t fact_n;
mpf_init(fact_n);
mpf_t fact_r;
mpf_init(fact_r);
mpf_t fact_n_minus_r;
mpf_init(fact_n_minus_r);
factorial(fact_n, n);
factorial(fact_r, r);
factorial(fact_n_minus_r, n - r);
mpf_t fact_r_mul_fact_r_minus_r;
mpf_init(fact_r_mul_fact_r_minus_r);
mpf_mul(fact_r, fact_r, fact_n_minus_r);
mpf_div(comb, fact_n, fact_r);
mpf_clear(fact_n);
mpf_clear(fact_r);
mpf_clear(fact_n_minus_r);
mpf_clear(fact_r_mul_fact_r_minus_r);
}
void hypergeometric_distribution(int nb_nodes) {
int nb_malicious = nb_nodes * MALICIOUS_RATE;
int nb_good = nb_nodes - nb_malicious;
int n = 1;
mpf_t tolerance;
mpf_init_set_d(tolerance, TOLERANCE);
int abort = 0;
#pragma omp parallel for schedule(dynamic) shared(abort)
for (n = 1; n <= nb_nodes; n++) {
#pragma omp flush(abort)
if (abort == 0) {
mpf_t sum;
mpf_t sum_minus_1;
mpf_init(sum);
mpf_init(sum_minus_1);
for (int k = 1; k <= nb_good; k++) {
if (abort == 0) {
if (n - k >= 0 && nb_good - k >= 0 && nb_malicious >= n - k) {
mpf_t comb_nb_good_with_k;
mpf_t comb_nb_malicious_with_n_minus_k;
mpf_t comb_nb_nodes_with_n;
mpf_init(comb_nb_good_with_k);
mpf_init(comb_nb_malicious_with_n_minus_k);
mpf_init(comb_nb_nodes_with_n);
// combination(nb_good, k) * combination(nb_malicious, n - k ) / combination(nb_nodes, n)
combination(comb_nb_good_with_k, nb_good, k);
combination(comb_nb_malicious_with_n_minus_k, nb_malicious, n - k);
mpf_mul(comb_nb_good_with_k, comb_nb_good_with_k, comb_nb_malicious_with_n_minus_k);
combination(comb_nb_nodes_with_n, nb_nodes, n);
mpf_div(comb_nb_good_with_k, comb_nb_good_with_k, comb_nb_nodes_with_n);
mpf_add(sum, sum, comb_nb_good_with_k);
mpf_ui_sub(sum_minus_1, 1, sum);
if (mpf_cmp(sum_minus_1, tolerance) == -1) {
#pragma omp critical
{
if (abort == 0) {
printf("%d\r\n", n);
#pragma omp atomic write
abort = 1;
#pragma omp cancel for
}
}
}
#pragma omp cancellation point for
}
}
}
}
}
}
int main(int argc, char *argv[]) {
if( argc == 2 ) {
int nb_nodes = atoi(argv[1]);
hypergeometric_distribution(nb_nodes);
}
return 0;
} |
sxc_fmt_plug.c | /* SXC cracker patch for JtR. Hacked together during Summer of 2012 by
* Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted. */
#if FMT_EXTERNS_H
extern struct fmt_main fmt_sxc;
#elif FMT_REGISTERS_H
john_register_one(&fmt_sxc);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "johnswap.h"
#include "sha.h"
#include <openssl/blowfish.h>
#include "pbkdf2_hmac_sha1.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 2 // tuned on core i7
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "sxc"
#define FORMAT_NAME "StarOffice .sxc"
#define FORMAT_TAG "$sxc$*"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME " Blowfish"
#else
#define ALGORITHM_NAME "SHA1 Blowfish 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 20
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define SALT_ALIGN sizeof(int)
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests sxc_tests[] = {
{"$sxc$*0*0*1024*16*4448359828281a1e6842c31453473abfeae584fb*8*dc0248bea0c7508c*16*1d53770002fe9d8016064e5ef9423174*860*864*f00399ab17b9899cd517758ecf918d4da78099ccd3557aef5e22e137fd5b81f732fc7c167c4de0cf263b4f82b50e3d6abc65da613a36b0025d89e1a09adeb4106da28040d1019bb4b36630fc8bc94fe5b515504bf8a92ea630bb95ace074868e7c10743ec970c89895f44b975a30b6ca032354f3e73ec86b2cc7a4f7a185884026d971b37b1e0e650376a2552e27ba955c700f8903a82a6df11f6cc2ecf63290f02ffdd278f890d1db75f9e8bd0f437c4ec613d3c6dcb421bbd1067be633593ba9bd58f77ef08e0cca64c732f892567d20de8d4c444fa9c1c1adc5e4657ef9740cb69ce55c8f9e6b1cfed0739ef002f1e1c1e54a5df50a759d92354f78eb90a9d9378f36df7d1edd8002ea0d637604fcd2408494c2d42b1771e2a2a20b55044836f76db4ed71e8a53f55a04f9437946603e7246c2d2d70caf6be0de82e8977fab4de84ca3783baedac041195d8b51166b502ff80c17db78f63d3632df1d5ef5b14d8d5553fc40b072030f9e3374c93e929a490c6cfb170f04433fc46f43b9c7d27f3f8c4ed759d4a20c2e53a0701b7c3d9201390a9b5597ce8ba35bd765b662e2242b9821bbb63b6be502d2150fff37e4b7f2a6b592fd0e319a7349df320e7fe7da600a2a05628dc00e04d480c085417f676bd0518bc39d9a9be34fc0cb192d5fa5e0c657cdf7c1ad265a2e81b90ac8b28d326f98b8f33c123df83edc964d2c17a904d0df8bd9ecbf629929d6e48cadc97f49a8941ada3d219e8c0f04f37cecc9a50cc5307fd2a488c34829b05cd1615ae0d1ef0ce450529aa755f9ae38332187ffe4144990de3265afaacb9f0f0fb9c67f6210369f7a0cc5bb346412db08e0f4732f91aa8d4b32fe6eece4fba118f118f6df2fb6c53fa9bc164c9ab7a9d414d33281eb0c3cd02abe0a4dd1c170e41c1c960a8f12a48a7b5e1f748c08e1b150a4e389c110ea3368bc6c6ef2bee98dc92c6825cbf6aee20e690e116c0e6cf48d49b38035f6a9b0cd6053b9f5b9f8360024c9c608cbba3fe5e7966b656fa08dec3e3ce3178a0c0007b7d177c7c44e6a68f4c7325cb98264b1e0f391c75a6a8fd3691581fb68ef459458830f2138d0fd743631efd92b742dfeb62c5ea8502515eb65af414bf805992f9272a7b1b745970fd54e128751f8f6c0a4d5bc7872bc09c04037e1e91dc7192d68f780cdb0f7ef6b282ea883be462ffeffb7b396e30303030", "openwall"},
{"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*259cafe530bd09f8*16*8f53ea878d0795cfe05dcc65fb272c20*1024*1024*ffb0f736b69d8433f958e8f475f609948ad7c9dd052f2b92c14cb1b395ffcac043a3def76d58442e131701b3b53d56ea570633bb20c83068542420160f5db3cee5eece05b67b54d0d4cdf3fbfd928d94852924e391aa3f70cad598b48b946399b0cd1e9e7e7d081a888933f8a1973e83166799396e8290771463c623620b51fb5310b9b0e0de3e5597b66a091301ada3ba6a5d7515d1fcf0eff65e543b50f8fe2222619600054eaf69c7aa680c96bc403f115cab32d6d8e8bc0db3904a71ce3eb1283ca73fd6f75845d9b7d0275e4727a0f56bfbf962a9079f51849de2c9dee7f1dadbbae944f442169281004773309b0f8d68f2cf83076fd8b19afbccf5ab7dc58fb9554fee82e2c491d6434a4cef6f3209775343c840bc4bdfab6705000e67412ac74d00f5b6aba1fd21bca5213234a5a1100a9d93daa141a4936723ea77d008a35c9078167a3529706432b36b9ec012c060d093535c85ca6feb75165d620d7d663c3e76b9bf3af614556ed8560b446a8a73649cb935383a30b4fd8fd75522203e4575cf4bc2b7f01a9294310fe021c71acbf68f6f1e95f48c30c14151c51d4fb878a16272ee73753bc750cbd48007c842412ca1dcb6214215b082c00d619a5318e2ebe9149410f501170093784afc2bd71dd9f5a87b349b96661747b1627e8cba8a5c98559fb146fa7e30db4c6f648ce3c2209f84551a7a1cd46d9172ae1354b6d093f89f6f5f58d29c1d7af8830df62e67753caa8166322caa0f8adf4b61d2013d35baa7c002e1d4c83b1cba8aaa57cf4946627fa63ba7a6a5a5c803e8d5a4794845ab670ef950b918a360cd9f12e8f3424ecab1f505cb494ad35f28d12ff183471d0f47bd67e6abd3b8c8e206d11149474a19b5c13d165d8f6dc39cf579fe1000295328aeeb82e0ae8020d2f61e4c3d6e68c25a655ab72aad5e9e74af4cf27c74158fdb1a29a3d76cd658976fa0a30743247408df00a23b593f68861348a6c46af05d21a4b81fedbf5715462ec8ffc5f001a85c43058ac1fab488236588ef0bf08dd8dd7c7fce630a0a996395b503647d9a2f0dd63dd2f939eca8e1849ee4ed41a6d5672d947177e8f890692de879a20dd9e366ec494d270faf0d24fc076172a25998aac218586404687e7c77b55e77e0eff9b1c65c3f8da99deaa86411ab6aca2531d84b364349591bc73e7504163afd23c5208e321883ee611ea7e4e5885086e4fa7196e16b948cb54808b64b94106c74900e3190fd5f6068b490fd0c9c64481771527a0e2d00899fd5b7a9e7f508cc6770018fadf09d965d7a12ad3624d2161d9546d4a7937b5f961d7f7c4714786380c147e1ec6b0583503bd5a139b892831d1ea925993bb86f12e75d9010ceba230a1c286fa3d1d654a1672313cbf0763c05c622cee452f76957c42ba0e853ecda163d15e8600a702ccdc9e8f88a", "Ghe+t0Blaster"},
{"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*9bb755c8a4fe8c34*16*112b9d41098c8677615755361da473a6*1024*1024*b95f0f2e0e1c7b4ee61168b646804d4b70b615f3c978cec65c9a7ab515417c79625d104373fd5012c3da6b356f8408a3a75edcc8b2aad0aa38bb33edd8933bdadbffde35a350ade73ccb9df29c2996082f5e94e324496835f8dfebe15ca38950e0f435d711ef964aa09915d58287967b5e321ca195a7f90253157afe82329da9a496c97292419b9a94cdb92f919e6d54700466aff61c200c5a355905b5a37c12d77b0e4ffd23f0204cfa664f4c0545f233db8d35af5fe337b459135da398fd23101becb194db305496474ba4179a7355285a9ec935044e1831f290f5f87ed3e00925e7fb4fc6bc38d9f0cfe9abf72560400490d2fd398d2d49516b618f99168602f323dd1786bcca394830341dfbeb377f9b7ef161dc1470f5e92b6152fa7a4f428e8ae40100791491a9e1c9385298522320488f00535866ac6e08354a75b8b2fd293066da7eb6b4ad7f3e13c8dc98cd815b2393f147fdac6279f76fdac9abd0a94131fa84fe4e99634a362a56d60ce588f6e0b66d6f8b6d411511272ffe32181d20e7d2c3d4b680764607afb2c29dcb94a845b920e96f6c27575534f8b7f9ddd93bdcef0d717d0a899fa937e7d2eeeb6d5b0338757f6e69dac72524d4b6f74edce1f937008eb3653bcc31a88712af940cf47ec3f3efd83e4da89d1a6cb7da6cf8d7d41430bc81a4b5d7bb46cad687f2f505e3379143ae274eed6201c3b17c1e05e516a14cbf2351ccf9fdd46e1309afb170bd01eb8f6a1d8e12441525199455fb550e3fc689b1801332b2d985e336b158f846fcbca18fbe6ea21438cf1fb5fdbce8d6350e65d6468342880845675ec721af2fb9df917a3968b4a1a477fc4c74ee38a71a230d77c2a7cf66ae6b83804488cbd25213ebc470cd845a2691b16161a640ebb385aa2381dc91f692f6c4ca2709b5a7e94dfb4548000a29b56f1da08701945d6209fabbd1621b28849fc27810775f1a0e0204d3ae9040a8cfb1386499a39d87149cfc1579de7d059662ad25a67abd42b30bb3608f09142ca030351c3a1e921e4c7bbc11aab846ef42eb5d1418c15ada77539aca096e0678439cd1b60950d2aa0cc4d2004b1ac48dc6a454c5a8e9ea7e910047c7c83895fd614fd9dfd961631eb23757646143c2aeb03c1a6476e78fc4ccf0f02cc1f88ec1b0080a170ac6871dc183939f7a4376965b0dfa7922012582eec4846ee621edc5547a2b9c4893e7f67f76541a4bd4a91827a57b3db5cdea29a2a3cc20238d89c8145c14b037360ad27f54f87317ef70472d6b1fd9f1168bcf8aba6071257b3adebab8d4e115188ed4af3fc3574fdccb4bc7eeb00a6a442f1b96a989b735f5e6059ec72c1677b77f437dcb93066f8591a11071799c3a0ec3b48f6160976aff1928c375358837e1ef02e20397b2e9d8d9c4bff23172c9b4c0b941cb1b49b5bc070f72a14cd384", "M1racl33"},
{"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*ceb1edb1e3cb72fd*16*f7104c9b2789540f5fd4beef009c0139*1024*1024*709130b940a9663d0a5687133c6f78535d05a72936faed8c2f3c1b4e29423baaabcee4f0d7d57e3ad8d8c090486f974c4d0ce4be5b29ef8e1b02c01b4af1959ed0b277146a45aec35a48997b584b82697803193644eefd88a7eefcae8819839e13702f887278a597dd954babd82bf71bf8ca8559af0e5537be0264e358d36b4f5067960edf608de731e04d117de953386aadee71849edbc494fac3e6b14567a9e9c545a06d402acd3158441829f25478ed0f9086dabd2d3913b123b43c27176f8f08f30312d84e82d47654097a2bce95554357db3ce3d45a7441472067f55e4ea6244a3dedc23db4bea8f549109ffac382cf5b652c5b1ee431bcab1051567c263a9d668c5d6a15a6f8da754914746c1d3c7eb6347bdd8d6a3ac82e4c742fcf8721913c111dfd5398f2698db00f7220d2a3562e02f7f7a6505af3ba1ee10b46f2ab5b5d2f52d288fd12814c6edbcb8d50b6e8716fba0d5962747b971689fe75e94fa36ec39598ea30e15ab2b9c9f22ca04b890a13b18fb3c7a962050426bb2da08c8b993608b9c1ffd0a21e0c74e993242ead8eb30f86d7d2dcdbd4774d85c2e06adbe4b40050ff0ac1a8afe8fbc2175ec4da4676a691b1fce38421175734c20f07a604fea5287e1c33b420aa9db4de9bd97382c161b4ec0818add675e52ebf036aad779f24b824be4b2b013c470ff66cbf44f5800e128a3b328e80a5fd6295b9b3a94e915f9add6710cb9444432751a7a31c3a3422f48a5eabc26d9a52571b8447bdd0a5977ff7153d95337cef7ff2ec29774332fbeed6ee5eed5e12288cc13e14ba9d5ff3dd052e28ba96715f5b95d7ea214ebcd9e60b26308eb11370b824b5cff2644dd2117985b3c25ba8076d4025cf3a3a62da62d5e11d44422a142048e8cd00c7de6a0a55fd5dc09a3ed01dfe35b88268f351b6ff289fee8e52ac29fe32d9990e0d6d87f39727b6a762bac9d509c6ea235fc8bedc3bec2143eae9fd2cb831b798ef8261d72785002638b940947de0aad64f791f9a27e5b091e55adf4aee0649f6785bdd37e0248fedd1759d771aeacacb3ff6e7cf2d045f791428ab61710b54e869213393caf1b6bc99066678351deafc290cecc1f6b40b5532adbbab9a70408c61a437d4483b6a75cb61a55b20881efc0d849e0f60c1887f0fa091672179a145c4ab1b6487a0e939e0123d5aaffa3aec66ab593f9c25d27f22f4a73a999a4ab45e8bc7d71a85e2d40afadad1a1dc0b8389f96f91614293fa205583ef1c3440e3df50e8aa5f1a13e5929b72cd003461ff03d44d8c84bdada176b24459021d398b2b91b61a9c0b553a8714c703d32452c691a33f1581e98c2439514ca3e7deeef90850f8d6d89bf1d3a5762a56ef769ea588f5c1705bfb7b944cfbbb0632718ee3722f4e1929b35706d6413a315a11bc16349af109a7e675df2ab1eebe93", "excel123"},
{NULL}
};
#if defined (_OPENMP)
static int omp_t = 1;
#endif
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[32 / sizeof(ARCH_WORD_32)];
static struct custom_salt {
int cipher_type; // FIXME: cipher_type seems to be ignored
int checksum_type;
int iterations;
int key_size;
int iv_length;
int salt_length;
int original_length;
int length;
unsigned char iv[16];
unsigned char salt[32];
unsigned char content[1024];
} *cur_salt;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy;
char *keeptr;
char *p;
int res, extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN;
if ((p = strtokm(ctcopy, "*")) == NULL) /* cipher type */
goto err;
res = atoi(p);
if (res != 0 && res != 1)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* checksum type */
goto err;
res = atoi(p);
if (res != 0 && res != 1)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iterations */
goto err;
res = atoi(p);
if (res <= 0)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* key size */
goto err;
res = atoi(p);
if (res != 16 && res != 32)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* checksum field (skipped) */
goto err;
if (hexlenl(p, &extra) != BINARY_SIZE * 2 || extra)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iv length */
goto err;
res = atoi(p);
if (res <= 0 || res > 16)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iv */
goto err;
if (hexlenl(p, &extra) != res * 2 || extra)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt length */
goto err;
res = atoi(p);
if (res <= 0 || res > 32)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt */
goto err;
if (hexlenl(p, &extra) != res * 2 || extra)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* original length */
goto err;
res = atoi(p);
if (res <= 0 || res > 1024) /* 1024 because of "unsigned char output[1024];" in crypt_all */
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* length */
goto err;
res = atoi(p);
if (res <= 0 || res > 1024)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* content */
goto err;
if (hexlenl(p, &extra) != res * 2 || extra)
goto err;
if (strtokm(NULL, "*") != NULL) /* the end */
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += FORMAT_TAG_LEN; /* skip over "$sxc$*" */
p = strtokm(ctcopy, "*");
cs.cipher_type = atoi(p);
p = strtokm(NULL, "*");
cs.checksum_type = atoi(p);
p = strtokm(NULL, "*");
cs.iterations = atoi(p);
p = strtokm(NULL, "*");
cs.key_size = atoi(p);
strtokm(NULL, "*");
/* skip checksum field */
p = strtokm(NULL, "*");
cs.iv_length = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.iv_length; i++)
cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.salt_length = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.salt_length; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.original_length = atoi(p);
p = strtokm(NULL, "*");
cs.length = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.length; i++)
cs.content[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE+1];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
ctcopy += 6; /* skip over "$sxc$*" */
strtokm(ctcopy, "*");
strtokm(NULL, "*");
strtokm(NULL, "*");
strtokm(NULL, "*");
p = strtokm(NULL, "*");
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
MEM_FREE(keeptr);
return out;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
unsigned char key[MAX_KEYS_PER_CRYPT][32];
unsigned char hash[MAX_KEYS_PER_CRYPT][32];
BF_KEY bf_key;
int bf_ivec_pos;
unsigned char ivec[8];
unsigned char output[1024];
int i;
SHA_CTX ctx;
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
SHA1_Init(&ctx);
SHA1_Update(&ctx, (unsigned char *)saved_key[index+i], strlen(saved_key[index+i]));
SHA1_Final((unsigned char *)hash[i], &ctx);
}
#ifdef SIMD_COEF_32
{
int lens[MAX_KEYS_PER_CRYPT];
unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT];
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = 20;
pin[i] = (unsigned char*)hash[i];
pout[i] = key[i];
}
pbkdf2_sha1_sse((const unsigned char**)pin, lens, cur_salt->salt,
cur_salt->salt_length,
cur_salt->iterations, pout,
cur_salt->key_size, 0);
}
#else
pbkdf2_sha1(hash[0], 20, cur_salt->salt,
cur_salt->salt_length,
cur_salt->iterations, key[0],
cur_salt->key_size, 0);
#endif
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
bf_ivec_pos = 0;
memcpy(ivec, cur_salt->iv, 8);
BF_set_key(&bf_key, cur_salt->key_size, key[i]);
BF_cfb64_encrypt(cur_salt->content, output, cur_salt->length, &bf_key, ivec, &bf_ivec_pos, 0);
SHA1_Init(&ctx);
SHA1_Update(&ctx, output, cur_salt->original_length);
SHA1_Final((unsigned char*)crypt_out[index+i], &ctx);
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void sxc_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->iterations;
}
struct fmt_main fmt_sxc = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
},
{ FORMAT_TAG },
sxc_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash /* Not usable with $SOURCE_HASH$ */
},
fmt_default_salt_hash,
NULL,
set_salt,
sxc_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash /* Not usable with $SOURCE_HASH$ */
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
mediancut.c | /*
** © 2009-2018 by Kornel Lesiński.
** © 1989, 1991 by Jef Poskanzer.
** © 1997, 2000, 2002 by Greg Roelofs; based on an idea by Stefan Schneider.
**
** See COPYRIGHT file for license.
*/
#include <stdlib.h>
#include <stddef.h>
#include "libimagequant.h"
#include "pam.h"
#include "mediancut.h"
#define index_of_channel(ch) (offsetof(f_pixel,ch)/sizeof(float))
static f_pixel averagepixels(unsigned int clrs, const hist_item achv[]);
struct box {
f_pixel color;
f_pixel variance;
double sum, total_error, max_error;
unsigned int ind;
unsigned int colors;
};
ALWAYS_INLINE static double variance_diff(double val, const double good_enough);
inline static double variance_diff(double val, const double good_enough)
{
val *= val;
if (val < good_enough*good_enough) return val*0.25;
return val;
}
/** Weighted per-channel variance of the box. It's used to decide which channel to split by */
static f_pixel box_variance(const hist_item achv[], const struct box *box)
{
f_pixel mean = box->color;
double variancea=0, variancer=0, varianceg=0, varianceb=0;
for(unsigned int i = 0; i < box->colors; ++i) {
const f_pixel px = achv[box->ind + i].acolor;
double weight = achv[box->ind + i].adjusted_weight;
variancea += variance_diff(mean.a - px.a, 2.0/256.0)*weight;
variancer += variance_diff(mean.r - px.r, 1.0/256.0)*weight;
varianceg += variance_diff(mean.g - px.g, 1.0/256.0)*weight;
varianceb += variance_diff(mean.b - px.b, 1.0/256.0)*weight;
}
return (f_pixel){
.a = variancea*(4.0/16.0),
.r = variancer*(7.0/16.0),
.g = varianceg*(9.0/16.0),
.b = varianceb*(5.0/16.0),
};
}
static double box_max_error(const hist_item achv[], const struct box *box)
{
f_pixel mean = box->color;
double max_error = 0;
for(unsigned int i = 0; i < box->colors; ++i) {
const double diff = colordifference(mean, achv[box->ind + i].acolor);
if (diff > max_error) {
max_error = diff;
}
}
return max_error;
}
ALWAYS_INLINE static double color_weight(f_pixel median, hist_item h);
static inline void hist_item_swap(hist_item *l, hist_item *r)
{
if (l != r) {
hist_item t = *l;
*l = *r;
*r = t;
}
}
ALWAYS_INLINE static unsigned int qsort_pivot(const hist_item *const base, const unsigned int len);
inline static unsigned int qsort_pivot(const hist_item *const base, const unsigned int len)
{
if (len < 32) {
return len/2;
}
const unsigned int aidx=8, bidx=len/2, cidx=len-1;
const unsigned int a=base[aidx].tmp.sort_value, b=base[bidx].tmp.sort_value, c=base[cidx].tmp.sort_value;
return (a < b) ? ((b < c) ? bidx : ((a < c) ? cidx : aidx ))
: ((b > c) ? bidx : ((a < c) ? aidx : cidx ));
}
ALWAYS_INLINE static unsigned int qsort_partition(hist_item *const base, const unsigned int len);
inline static unsigned int qsort_partition(hist_item *const base, const unsigned int len)
{
unsigned int l = 1, r = len;
if (len >= 8) {
hist_item_swap(&base[0], &base[qsort_pivot(base,len)]);
}
const unsigned int pivot_value = base[0].tmp.sort_value;
while (l < r) {
if (base[l].tmp.sort_value >= pivot_value) {
l++;
} else {
while(l < --r && base[r].tmp.sort_value <= pivot_value) {}
hist_item_swap(&base[l], &base[r]);
}
}
l--;
hist_item_swap(&base[0], &base[l]);
return l;
}
/** quick select algorithm */
static void hist_item_sort_range(hist_item base[], unsigned int len, unsigned int sort_start)
{
for(;;) {
const unsigned int l = qsort_partition(base, len), r = l+1;
if (l > 0 && sort_start < l) {
len = l;
}
else if (r < len && sort_start > r) {
base += r; len -= r; sort_start -= r;
}
else break;
}
}
/** sorts array to make sum of weights lower than halfvar one side, returns edge between <halfvar and >halfvar parts of the set */
static hist_item *hist_item_sort_halfvar(hist_item base[], unsigned int len, double *const lowervar, const double halfvar)
{
do {
const unsigned int l = qsort_partition(base, len), r = l+1;
// check if sum of left side is smaller than half,
// if it is, then it doesn't need to be sorted
unsigned int t = 0; double tmpsum = *lowervar;
while (t <= l && tmpsum < halfvar) tmpsum += base[t++].color_weight;
if (tmpsum < halfvar) {
*lowervar = tmpsum;
} else {
if (l > 0) {
hist_item *res = hist_item_sort_halfvar(base, l, lowervar, halfvar);
if (res) return res;
} else {
// End of left recursion. This will be executed in order from the first element.
*lowervar += base[0].color_weight;
if (*lowervar > halfvar) return &base[0];
}
}
if (len > r) {
base += r; len -= r; // tail-recursive "call"
} else {
*lowervar += base[r].color_weight;
return (*lowervar > halfvar) ? &base[r] : NULL;
}
} while(1);
}
static f_pixel get_median(const struct box *b, hist_item achv[]);
typedef struct {
unsigned int chan; float variance;
} channelvariance;
static int comparevariance(const void *ch1, const void *ch2)
{
return ((const channelvariance*)ch1)->variance > ((const channelvariance*)ch2)->variance ? -1 :
(((const channelvariance*)ch1)->variance < ((const channelvariance*)ch2)->variance ? 1 : 0);
}
/** Finds which channels need to be sorted first and preproceses achv for fast sort */
static double prepare_sort(struct box *b, hist_item achv[])
{
/*
** Sort dimensions by their variance, and then sort colors first by dimension with highest variance
*/
channelvariance channels[4] = {
{index_of_channel(a), b->variance.a},
{index_of_channel(r), b->variance.r},
{index_of_channel(g), b->variance.g},
{index_of_channel(b), b->variance.b},
};
qsort(channels, 4, sizeof(channels[0]), comparevariance);
const unsigned int ind1 = b->ind;
const unsigned int colors = b->colors;
#if __GNUC__ >= 9
#pragma omp parallel for if (colors > 25000) \
schedule(static) default(none) shared(achv, channels, colors, ind1)
#else
#pragma omp parallel for if (colors > 25000) \
schedule(static) default(none) shared(achv, channels)
#endif
for(unsigned int i=0; i < colors; i++) {
const float *chans = (const float *)&achv[ind1 + i].acolor;
// Only the first channel really matters. When trying median cut many times
// with different histogram weights, I don't want sort randomness to influence outcome.
achv[ind1 + i].tmp.sort_value = ((unsigned int)(chans[channels[0].chan]*65535.0)<<16) |
(unsigned int)((chans[channels[2].chan] + chans[channels[1].chan]/2.0 + chans[channels[3].chan]/4.0)*65535.0);
}
const f_pixel median = get_median(b, achv);
// box will be split to make color_weight of each side even
const unsigned int ind = b->ind, end = ind+b->colors;
double totalvar = 0;
#pragma omp parallel for if (end - ind > 15000) \
schedule(static) default(shared) reduction(+:totalvar)
for(unsigned int j=ind; j < end; j++) totalvar += (achv[j].color_weight = color_weight(median, achv[j]));
return totalvar / 2.0;
}
/** finds median in unsorted set by sorting only minimum required */
static f_pixel get_median(const struct box *b, hist_item achv[])
{
const unsigned int median_start = (b->colors-1)/2;
hist_item_sort_range(&(achv[b->ind]), b->colors,
median_start);
if (b->colors&1) return achv[b->ind + median_start].acolor;
// technically the second color is not guaranteed to be sorted correctly
// but most of the time it is good enough to be useful
return averagepixels(2, &achv[b->ind + median_start]);
}
/*
** Find the best splittable box. -1 if no boxes are splittable.
*/
static int best_splittable_box(struct box bv[], unsigned int boxes, const double max_mse)
{
int bi=-1; double maxsum=0;
for(unsigned int i=0; i < boxes; i++) {
if (bv[i].colors < 2) {
continue;
}
// looks only at max variance, because it's only going to split by it
const double cv = MAX(bv[i].variance.r, MAX(bv[i].variance.g,bv[i].variance.b));
double thissum = bv[i].sum * MAX(bv[i].variance.a, cv);
if (bv[i].max_error > max_mse) {
thissum = thissum* bv[i].max_error/max_mse;
}
if (thissum > maxsum) {
maxsum = thissum;
bi = i;
}
}
return bi;
}
inline static double color_weight(f_pixel median, hist_item h)
{
float diff = colordifference(median, h.acolor);
return sqrt(diff) * (sqrt(1.0+h.adjusted_weight)-1.0);
}
static void set_colormap_from_boxes(colormap *map, struct box bv[], unsigned int boxes, hist_item *achv);
static void adjust_histogram(hist_item *achv, const struct box bv[], unsigned int boxes);
static double box_error(const struct box *box, const hist_item achv[])
{
f_pixel avg = box->color;
double total_error=0;
for (unsigned int i = 0; i < box->colors; ++i) {
total_error += colordifference(avg, achv[box->ind + i].acolor) * achv[box->ind + i].perceptual_weight;
}
return total_error;
}
static bool total_box_error_below_target(double target_mse, struct box bv[], unsigned int boxes, const histogram *hist)
{
target_mse *= hist->total_perceptual_weight;
double total_error=0;
for(unsigned int i=0; i < boxes; i++) {
// error is (re)calculated lazily
if (bv[i].total_error >= 0) {
total_error += bv[i].total_error;
}
if (total_error > target_mse) return false;
}
for(unsigned int i=0; i < boxes; i++) {
if (bv[i].total_error < 0) {
bv[i].total_error = box_error(&bv[i], hist->achv);
total_error += bv[i].total_error;
}
if (total_error > target_mse) return false;
}
return true;
}
static void box_init(struct box *box, const hist_item *achv, const unsigned int ind, const unsigned int colors, const double sum) {
box->ind = ind;
box->colors = colors;
box->sum = sum;
box->total_error = -1;
box->color = averagepixels(colors, &achv[ind]);
box->variance = box_variance(achv, box);
box->max_error = box_max_error(achv, box);
}
/*
** Here is the fun part, the median-cut colormap generator. This is based
** on Paul Heckbert's paper, "Color Image Quantization for Frame Buffer
** Display," SIGGRAPH 1982 Proceedings, page 297.
*/
LIQ_PRIVATE colormap *mediancut(histogram *hist, unsigned int newcolors, const double target_mse, const double max_mse, void* (*malloc)(size_t), void (*free)(void*))
{
hist_item *achv = hist->achv;
LIQ_ARRAY(struct box, bv, newcolors);
unsigned int boxes = 1;
/*
** Set up the initial box.
*/
{
double sum = 0;
for(unsigned int i=0; i < hist->size; i++) {
sum += achv[i].adjusted_weight;
}
box_init(&bv[0], achv, 0, hist->size, sum);
/*
** Main loop: split boxes until we have enough.
*/
while (boxes < newcolors) {
// first splits boxes that exceed quality limit (to have colors for things like odd green pixel),
// later raises the limit to allow large smooth areas/gradients get colors.
const double current_max_mse = max_mse + (boxes/(double)newcolors)*16.0*max_mse;
const int bi = best_splittable_box(bv, boxes, current_max_mse);
if (bi < 0) {
break; /* ran out of colors! */
}
unsigned int indx = bv[bi].ind;
unsigned int clrs = bv[bi].colors;
/*
Classic implementation tries to get even number of colors or pixels in each subdivision.
Here, instead of popularity I use (sqrt(popularity)*variance) metric.
Each subdivision balances number of pixels (popular colors) and low variance -
boxes can be large if they have similar colors. Later boxes with high variance
will be more likely to be split.
Median used as expected value gives much better results than mean.
*/
const double halfvar = prepare_sort(&bv[bi], achv);
double lowervar=0;
// hist_item_sort_halfvar sorts and sums lowervar at the same time
// returns item to break at …minus one, which does smell like an off-by-one error.
hist_item *break_p = hist_item_sort_halfvar(&achv[indx], clrs, &lowervar, halfvar);
unsigned int break_at = MIN(clrs-1, (unsigned int)(break_p - &achv[indx] + 1));
/*
** Split the box.
*/
double sm = bv[bi].sum;
double lowersum = 0;
for(unsigned int i=0; i < break_at; i++) lowersum += achv[indx + i].adjusted_weight;
box_init(&bv[bi], achv, indx, break_at, lowersum);
box_init(&bv[boxes], achv, indx + break_at, clrs - break_at, sm - lowersum);
++boxes;
if (total_box_error_below_target(target_mse, bv, boxes, hist)) {
break;
}
}
}
colormap *map = pam_colormap(boxes, malloc, free);
set_colormap_from_boxes(map, bv, boxes, achv);
adjust_histogram(achv, bv, boxes);
return map;
}
static void set_colormap_from_boxes(colormap *map, struct box* bv, unsigned int boxes, hist_item *achv)
{
/*
** Ok, we've got enough boxes. Now choose a representative color for
** each box. There are a number of possible ways to make this choice.
** One would be to choose the center of the box; this ignores any structure
** within the boxes. Another method would be to average all the colors in
** the box - this is the method specified in Heckbert's paper.
*/
for(unsigned int bi = 0; bi < boxes; ++bi) {
map->palette[bi].acolor = bv[bi].color;
/* store total color popularity (perceptual_weight is approximation of it) */
map->palette[bi].popularity = 0;
for(unsigned int i=bv[bi].ind; i < bv[bi].ind+bv[bi].colors; i++) {
map->palette[bi].popularity += achv[i].perceptual_weight;
}
}
}
/* increase histogram popularity by difference from the final color (this is used as part of feedback loop) */
static void adjust_histogram(hist_item *achv, const struct box* bv, unsigned int boxes)
{
for(unsigned int bi = 0; bi < boxes; ++bi) {
for(unsigned int i=bv[bi].ind; i < bv[bi].ind+bv[bi].colors; i++) {
achv[i].tmp.likely_colormap_index = bi;
}
}
}
static f_pixel averagepixels(unsigned int clrs, const hist_item achv[])
{
double r = 0, g = 0, b = 0, a = 0, sum = 0;
#pragma omp parallel for if (clrs > 25000) \
schedule(static) default(shared) reduction(+:a) reduction(+:r) reduction(+:g) reduction(+:b) reduction(+:sum)
for(unsigned int i = 0; i < clrs; i++) {
const f_pixel px = achv[i].acolor;
const double weight = achv[i].adjusted_weight;
sum += weight;
a += px.a * weight;
r += px.r * weight;
g += px.g * weight;
b += px.b * weight;
}
if (sum) {
a /= sum;
r /= sum;
g /= sum;
b /= sum;
}
assert(!isnan(r) && !isnan(g) && !isnan(b) && !isnan(a));
return (f_pixel){.r=r, .g=g, .b=b, .a=a};
}
|
parallel_for_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s -Wuninitialized
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel for'}}
#pragma omp parallel for
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel for'}}
#pragma omp parallel for foo
void test_no_clause() {
int i;
#pragma omp parallel for
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp parallel for' must be a for loop}}
#pragma omp parallel for
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp parallel for
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for' are ignored}}
#pragma omp parallel for foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for' are ignored}}
#pragma omp parallel for;
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for' are ignored}}
#pragma omp parallel for private(x);
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for' are ignored}}
#pragma omp parallel for, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_collapse() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp parallel for collapse
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for collapse(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for collapse()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for collapse(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for collapse(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel for' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp parallel for collapse 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}}
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}}
#pragma omp parallel for collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel for collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}}
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp parallel for collapse(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp parallel for collapse(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp parallel for collapse(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp parallel for collapse(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp parallel for collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
// expected-note@+1 {{defined as firstprivate}}
#pragma omp parallel for collapse(2) firstprivate(i)
for (i = 0; i < 16; ++i)
// expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for' directive into a parallel or another task region?}}
for (int j = 0; j < 16; ++j)
// expected-error@+2 2 {{reduction variable must be shared}}
// expected-error@+1 {{region cannot be closely nested inside 'parallel for' region; perhaps you forget to enclose 'omp for' directive into a parallel region?}}
#pragma omp for reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_private() {
int i;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel for private(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for private(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for private(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for private()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for private(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp parallel for private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel for private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp parallel for lastprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for lastprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for lastprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for lastprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for lastprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp parallel for lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel for lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp parallel for firstprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for firstprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel for firstprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for firstprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp parallel for firstprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp parallel for firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel for lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel for lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp parallel for
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp parallel for
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
}
|
trmm.pluto_ancc.seq_par.c | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
double A[N][N+20];
double B[N][N+20];
void init_arrays()
{
int i,j;
for (i=0; i<N; i++)
for (j=0; j<N; j++)
{
B[i][j] = (i+j) % 5 + 1;
if (i < j)
A[i][j] = (i+j) % 5 + 1;
else if (i == j)
A[i][j] = 1;
else
A[i][j] = -1;
}
}
void print_array()
{
int i, j;
for (i=0; i<N; i++) {
for (j=0; j<N; j++) {
fprintf(stderr, "%lf ", round(B[i][j]));
if (j%80 == 79) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
double rtclock()
{
struct timezone tzp;
struct timeval tp;
int stat;
gettimeofday (&tp, &tzp);
return (tp.tv_sec + tp.tv_usec*1.0e-6);
}
int main()
{
init_arrays();
double annot_t_start=0, annot_t_end=0, annot_t_total=0;
int annot_i;
for (annot_i=0; annot_i<REPS; annot_i++)
{
annot_t_start = rtclock();
register int i,j,k,t;
register int c1t, c2t, c3t, c4t, c5t, c6t, c7t, c8t, c9t, c10t, c11t, c12t;
register int newlb_c1, newlb_c2, newlb_c3, newlb_c4, newlb_c5, newlb_c6,
newlb_c7, newlb_c8, newlb_c9, newlb_c10, newlb_c11, newlb_c12;
register int newub_c1, newub_c2, newub_c3, newub_c4, newub_c5, newub_c6,
newub_c7, newub_c8, newub_c9, newub_c10, newub_c11, newub_c12;
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
int c1, c2, c3, c4, c5, c6, c7, c8, c9;
register int lb, ub, lb1, ub1, lb2, ub2;
/* Generated from PLuTo-produced CLooG file by CLooG v0.14.1 64 bits in 0.26s. */
lb1=0;
ub1=floord(N-1,1024);
#pragma omp parallel for shared(lb1,ub1) private(c1,c2,c3,c4,c5,c6,c7,c8,c9)
for (c1=lb1; c1<=ub1; c1++) {
{
for (c2=0; c2<=floord(N-2,512); c2++ ) {
for (c3=max(0,ceild(256*c2-127,128)); c3<=floord(N-1,256); c3++ ) {
for (c4=max(8*c1,0); c4<=min(floord(N-1,128),8*c1+7); c4++ ) {
for (c5=max(0,4*c2); c5<=min(min(4*c2+3,floord(N-2,128)),floord(128*c3+127,64)); c5++ ) {
for (c8t=max(0,128*c5); c8t<=min(min(128*c5+127,N-2),256*c3+254)-10; c8t=c8t+11) {
newlb_c9=-2147483648;
newub_c9=min(N-1,256*c3+255);
register int cbv_1;
cbv_1=c8t+10;
#pragma ivdep
#pragma vector always
for (c8=c8t; c8<=cbv_1; c8=c8+1) {
newlb_c9=max(newlb_c9,max(c8+1,256*c3));
}
for (c8=c8t; c8<=c8t+10; c8=c8+1) {
for (c9=max(c8+1,256*c3); c9<=newlb_c9-1; c9=c9+1) {
register int cbv_2, cbv_3;
cbv_2=max(0,128*c4);
cbv_3=min(N-1,128*c4+127);
#pragma ivdep
#pragma vector always
for (c7=cbv_2; c7<=cbv_3; c7++ ) {
double scv_1;
scv_1=B[c8][c7];
scv_1=alpha*A[c8][c9]*B[c9][c7]+scv_1;
B[c8][c7]=scv_1;
}
}
}
for (c9t=newlb_c9; c9t<=newub_c9-7; c9t=c9t+8) {
register int cbv_4, cbv_5;
cbv_4=max(0,128*c4);
cbv_5=min(N-1,128*c4+127);
#pragma ivdep
#pragma vector always
for (c7=cbv_4; c7<=cbv_5; c7++ ) {
double scv_2, scv_3, scv_4, scv_5, scv_6, scv_7, scv_8, scv_9;
double scv_10, scv_11, scv_12, scv_13, scv_14, scv_15, scv_16, scv_17;
double scv_18, scv_19, scv_20;
scv_2=B[(c9t+1)][c7];
scv_3=B[(c9t+2)][c7];
scv_4=B[(c9t+3)][c7];
scv_5=B[(c9t+6)][c7];
scv_6=B[(c8t+8)][c7];
scv_7=B[(c8t+2)][c7];
scv_8=B[(c8t+4)][c7];
scv_9=B[(c8t+1)][c7];
scv_10=B[(c8t+10)][c7];
scv_11=B[(c9t+5)][c7];
scv_12=B[(c8t+5)][c7];
scv_13=B[c9t][c7];
scv_14=B[(c9t+7)][c7];
scv_15=B[(c8t+9)][c7];
scv_16=B[(c8t+7)][c7];
scv_17=B[(c9t+4)][c7];
scv_18=B[c8t][c7];
scv_19=B[(c8t+3)][c7];
scv_20=B[(c8t+6)][c7];
scv_18=alpha*A[c8t][c9t]*scv_13+scv_18;
scv_18=alpha*A[c8t][(c9t+1)]*scv_2+scv_18;
scv_18=alpha*A[c8t][(c9t+2)]*scv_3+scv_18;
scv_18=alpha*A[c8t][(c9t+3)]*scv_4+scv_18;
scv_18=alpha*A[c8t][(c9t+4)]*scv_17+scv_18;
scv_18=alpha*A[c8t][(c9t+5)]*scv_11+scv_18;
scv_18=alpha*A[c8t][(c9t+6)]*scv_5+scv_18;
scv_18=alpha*A[c8t][(c9t+7)]*scv_14+scv_18;
scv_9=alpha*A[(c8t+1)][c9t]*scv_13+scv_9;
scv_9=alpha*A[(c8t+1)][(c9t+1)]*scv_2+scv_9;
scv_9=alpha*A[(c8t+1)][(c9t+2)]*scv_3+scv_9;
scv_9=alpha*A[(c8t+1)][(c9t+3)]*scv_4+scv_9;
scv_9=alpha*A[(c8t+1)][(c9t+4)]*scv_17+scv_9;
scv_9=alpha*A[(c8t+1)][(c9t+5)]*scv_11+scv_9;
scv_9=alpha*A[(c8t+1)][(c9t+6)]*scv_5+scv_9;
scv_9=alpha*A[(c8t+1)][(c9t+7)]*scv_14+scv_9;
scv_7=alpha*A[(c8t+2)][c9t]*scv_13+scv_7;
scv_7=alpha*A[(c8t+2)][(c9t+1)]*scv_2+scv_7;
scv_7=alpha*A[(c8t+2)][(c9t+2)]*scv_3+scv_7;
scv_7=alpha*A[(c8t+2)][(c9t+3)]*scv_4+scv_7;
scv_7=alpha*A[(c8t+2)][(c9t+4)]*scv_17+scv_7;
scv_7=alpha*A[(c8t+2)][(c9t+5)]*scv_11+scv_7;
scv_7=alpha*A[(c8t+2)][(c9t+6)]*scv_5+scv_7;
scv_7=alpha*A[(c8t+2)][(c9t+7)]*scv_14+scv_7;
scv_19=alpha*A[(c8t+3)][c9t]*scv_13+scv_19;
scv_19=alpha*A[(c8t+3)][(c9t+1)]*scv_2+scv_19;
scv_19=alpha*A[(c8t+3)][(c9t+2)]*scv_3+scv_19;
scv_19=alpha*A[(c8t+3)][(c9t+3)]*scv_4+scv_19;
scv_19=alpha*A[(c8t+3)][(c9t+4)]*scv_17+scv_19;
scv_19=alpha*A[(c8t+3)][(c9t+5)]*scv_11+scv_19;
scv_19=alpha*A[(c8t+3)][(c9t+6)]*scv_5+scv_19;
scv_19=alpha*A[(c8t+3)][(c9t+7)]*scv_14+scv_19;
scv_8=alpha*A[(c8t+4)][c9t]*scv_13+scv_8;
scv_8=alpha*A[(c8t+4)][(c9t+1)]*scv_2+scv_8;
scv_8=alpha*A[(c8t+4)][(c9t+2)]*scv_3+scv_8;
scv_8=alpha*A[(c8t+4)][(c9t+3)]*scv_4+scv_8;
scv_8=alpha*A[(c8t+4)][(c9t+4)]*scv_17+scv_8;
scv_8=alpha*A[(c8t+4)][(c9t+5)]*scv_11+scv_8;
scv_8=alpha*A[(c8t+4)][(c9t+6)]*scv_5+scv_8;
scv_8=alpha*A[(c8t+4)][(c9t+7)]*scv_14+scv_8;
scv_12=alpha*A[(c8t+5)][c9t]*scv_13+scv_12;
scv_12=alpha*A[(c8t+5)][(c9t+1)]*scv_2+scv_12;
scv_12=alpha*A[(c8t+5)][(c9t+2)]*scv_3+scv_12;
scv_12=alpha*A[(c8t+5)][(c9t+3)]*scv_4+scv_12;
scv_12=alpha*A[(c8t+5)][(c9t+4)]*scv_17+scv_12;
scv_12=alpha*A[(c8t+5)][(c9t+5)]*scv_11+scv_12;
scv_12=alpha*A[(c8t+5)][(c9t+6)]*scv_5+scv_12;
scv_12=alpha*A[(c8t+5)][(c9t+7)]*scv_14+scv_12;
scv_20=alpha*A[(c8t+6)][c9t]*scv_13+scv_20;
scv_20=alpha*A[(c8t+6)][(c9t+1)]*scv_2+scv_20;
scv_20=alpha*A[(c8t+6)][(c9t+2)]*scv_3+scv_20;
scv_20=alpha*A[(c8t+6)][(c9t+3)]*scv_4+scv_20;
scv_20=alpha*A[(c8t+6)][(c9t+4)]*scv_17+scv_20;
scv_20=alpha*A[(c8t+6)][(c9t+5)]*scv_11+scv_20;
scv_20=alpha*A[(c8t+6)][(c9t+6)]*scv_5+scv_20;
scv_20=alpha*A[(c8t+6)][(c9t+7)]*scv_14+scv_20;
scv_16=alpha*A[(c8t+7)][c9t]*scv_13+scv_16;
scv_16=alpha*A[(c8t+7)][(c9t+1)]*scv_2+scv_16;
scv_16=alpha*A[(c8t+7)][(c9t+2)]*scv_3+scv_16;
scv_16=alpha*A[(c8t+7)][(c9t+3)]*scv_4+scv_16;
scv_16=alpha*A[(c8t+7)][(c9t+4)]*scv_17+scv_16;
scv_16=alpha*A[(c8t+7)][(c9t+5)]*scv_11+scv_16;
scv_16=alpha*A[(c8t+7)][(c9t+6)]*scv_5+scv_16;
scv_16=alpha*A[(c8t+7)][(c9t+7)]*scv_14+scv_16;
scv_6=alpha*A[(c8t+8)][c9t]*scv_13+scv_6;
scv_6=alpha*A[(c8t+8)][(c9t+1)]*scv_2+scv_6;
scv_6=alpha*A[(c8t+8)][(c9t+2)]*scv_3+scv_6;
scv_6=alpha*A[(c8t+8)][(c9t+3)]*scv_4+scv_6;
scv_6=alpha*A[(c8t+8)][(c9t+4)]*scv_17+scv_6;
scv_6=alpha*A[(c8t+8)][(c9t+5)]*scv_11+scv_6;
scv_6=alpha*A[(c8t+8)][(c9t+6)]*scv_5+scv_6;
scv_6=alpha*A[(c8t+8)][(c9t+7)]*scv_14+scv_6;
scv_15=alpha*A[(c8t+9)][c9t]*scv_13+scv_15;
scv_15=alpha*A[(c8t+9)][(c9t+1)]*scv_2+scv_15;
scv_15=alpha*A[(c8t+9)][(c9t+2)]*scv_3+scv_15;
scv_15=alpha*A[(c8t+9)][(c9t+3)]*scv_4+scv_15;
scv_15=alpha*A[(c8t+9)][(c9t+4)]*scv_17+scv_15;
scv_15=alpha*A[(c8t+9)][(c9t+5)]*scv_11+scv_15;
scv_15=alpha*A[(c8t+9)][(c9t+6)]*scv_5+scv_15;
scv_15=alpha*A[(c8t+9)][(c9t+7)]*scv_14+scv_15;
scv_10=alpha*A[(c8t+10)][c9t]*scv_13+scv_10;
scv_10=alpha*A[(c8t+10)][(c9t+1)]*scv_2+scv_10;
scv_10=alpha*A[(c8t+10)][(c9t+2)]*scv_3+scv_10;
scv_10=alpha*A[(c8t+10)][(c9t+3)]*scv_4+scv_10;
scv_10=alpha*A[(c8t+10)][(c9t+4)]*scv_17+scv_10;
scv_10=alpha*A[(c8t+10)][(c9t+5)]*scv_11+scv_10;
scv_10=alpha*A[(c8t+10)][(c9t+6)]*scv_5+scv_10;
scv_10=alpha*A[(c8t+10)][(c9t+7)]*scv_14+scv_10;
B[(c8t+8)][c7]=scv_6;
B[(c8t+2)][c7]=scv_7;
B[(c8t+4)][c7]=scv_8;
B[(c8t+1)][c7]=scv_9;
B[(c8t+10)][c7]=scv_10;
B[(c8t+5)][c7]=scv_12;
B[(c8t+9)][c7]=scv_15;
B[(c8t+7)][c7]=scv_16;
B[c8t][c7]=scv_18;
B[(c8t+3)][c7]=scv_19;
B[(c8t+6)][c7]=scv_20;
}
}
for (c9=c9t; c9<=newub_c9; c9=c9+1) {
register int cbv_6, cbv_7;
cbv_6=max(0,128*c4);
cbv_7=min(N-1,128*c4+127);
#pragma ivdep
#pragma vector always
for (c7=cbv_6; c7<=cbv_7; c7++ ) {
double scv_21, scv_22, scv_23, scv_24, scv_25, scv_26, scv_27, scv_28;
double scv_29, scv_30, scv_31, scv_32;
scv_21=B[(c8t+7)][c7];
scv_22=B[c8t][c7];
scv_23=B[(c8t+10)][c7];
scv_24=B[(c8t+5)][c7];
scv_25=B[(c8t+8)][c7];
scv_26=B[(c8t+3)][c7];
scv_27=B[(c8t+6)][c7];
scv_28=B[(c8t+2)][c7];
scv_29=B[(c8t+1)][c7];
scv_30=B[c9][c7];
scv_31=B[(c8t+9)][c7];
scv_32=B[(c8t+4)][c7];
scv_22=alpha*A[c8t][c9]*scv_30+scv_22;
scv_29=alpha*A[(c8t+1)][c9]*scv_30+scv_29;
scv_28=alpha*A[(c8t+2)][c9]*scv_30+scv_28;
scv_26=alpha*A[(c8t+3)][c9]*scv_30+scv_26;
scv_32=alpha*A[(c8t+4)][c9]*scv_30+scv_32;
scv_24=alpha*A[(c8t+5)][c9]*scv_30+scv_24;
scv_27=alpha*A[(c8t+6)][c9]*scv_30+scv_27;
scv_21=alpha*A[(c8t+7)][c9]*scv_30+scv_21;
scv_25=alpha*A[(c8t+8)][c9]*scv_30+scv_25;
scv_31=alpha*A[(c8t+9)][c9]*scv_30+scv_31;
scv_23=alpha*A[(c8t+10)][c9]*scv_30+scv_23;
B[(c8t+7)][c7]=scv_21;
B[c8t][c7]=scv_22;
B[(c8t+10)][c7]=scv_23;
B[(c8t+5)][c7]=scv_24;
B[(c8t+8)][c7]=scv_25;
B[(c8t+3)][c7]=scv_26;
B[(c8t+6)][c7]=scv_27;
B[(c8t+2)][c7]=scv_28;
B[(c8t+1)][c7]=scv_29;
B[(c8t+9)][c7]=scv_31;
B[(c8t+4)][c7]=scv_32;
}
}
for (c8=c8t; c8<=c8t+10; c8=c8+1) {
for (c9=newub_c9+1; c9<=min(N-1,256*c3+255); c9=c9+1) {
register int cbv_8, cbv_9;
cbv_8=max(0,128*c4);
cbv_9=min(N-1,128*c4+127);
#pragma ivdep
#pragma vector always
for (c7=cbv_8; c7<=cbv_9; c7++ ) {
double scv_33;
scv_33=B[c8][c7];
scv_33=alpha*A[c8][c9]*B[c9][c7]+scv_33;
B[c8][c7]=scv_33;
}
}
}
}
for (c8=c8t; c8<=min(min(128*c5+127,N-2),256*c3+254); c8=c8+1) {
for (c9t=max(c8+1,256*c3); c9t<=min(N-1,256*c3+255)-7; c9t=c9t+8) {
register int cbv_10, cbv_11;
cbv_10=max(0,128*c4);
cbv_11=min(N-1,128*c4+127);
#pragma ivdep
#pragma vector always
for (c7=cbv_10; c7<=cbv_11; c7++ ) {
double scv_34;
scv_34=B[c8][c7];
scv_34=alpha*A[c8][c9t]*B[c9t][c7]+scv_34;
scv_34=alpha*A[c8][(c9t+1)]*B[(c9t+1)][c7]+scv_34;
scv_34=alpha*A[c8][(c9t+2)]*B[(c9t+2)][c7]+scv_34;
scv_34=alpha*A[c8][(c9t+3)]*B[(c9t+3)][c7]+scv_34;
scv_34=alpha*A[c8][(c9t+4)]*B[(c9t+4)][c7]+scv_34;
scv_34=alpha*A[c8][(c9t+5)]*B[(c9t+5)][c7]+scv_34;
scv_34=alpha*A[c8][(c9t+6)]*B[(c9t+6)][c7]+scv_34;
scv_34=alpha*A[c8][(c9t+7)]*B[(c9t+7)][c7]+scv_34;
B[c8][c7]=scv_34;
}
}
for (c9=c9t; c9<=min(N-1,256*c3+255); c9=c9+1) {
register int cbv_12, cbv_13;
cbv_12=max(0,128*c4);
cbv_13=min(N-1,128*c4+127);
#pragma ivdep
#pragma vector always
for (c7=cbv_12; c7<=cbv_13; c7++ ) {
double scv_35;
scv_35=B[c8][c7];
scv_35=alpha*A[c8][c9]*B[c9][c7]+scv_35;
B[c8][c7]=scv_35;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
annot_t_end = rtclock();
annot_t_total += annot_t_end - annot_t_start;
}
annot_t_total = annot_t_total / REPS;
printf("%f\n", annot_t_total);
//print_array();
return ((int) B[0][0]);
}
|
rk4.c | #include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <sys/time.h>
#define SIZE 9600
struct timeval startTime;
struct timeval finishTime;
double timeIntervalLength;
__sw_global__ double *yt;
__sw_global__ double *k1;
__sw_global__ double *k2;
__sw_global__ double *k3;
__sw_global__ double *k4;
__sw_global__ double *yout;
__sw_global__ double totalSum;
__sw_global__ double* y;
__sw_global__ double* power;
__sw_global__ double** c;
__sw_global__ double h;
__sw_global__ double sum;
__sw_global__ int i,j;
void* myMalloc(int size, int info)
{
void* t = (void*)malloc(size);
if(!t)
{
printf("\nMemory allocation error [%d]",info);
fflush(stdout);
exit(0);
}
return t;
}
int main(int argc, char* argv[])
{
h=0.3154;
sum=0;
//
//MEMORY ALLOCATION
//
y = (double* )myMalloc(SIZE*sizeof(double) ,1);
power = (double* )myMalloc(SIZE*sizeof(double) ,2);
c = (double**)myMalloc(SIZE*sizeof(double*),3);
for (i=0;i<SIZE;i++)
{
c[i]=(double*)myMalloc(SIZE*sizeof(double),4);
}
yt = (double*)myMalloc(SIZE*sizeof(double*),4);
k1 = (double*)myMalloc(SIZE*sizeof(double*),5);
k2 = (double*)myMalloc(SIZE*sizeof(double*),6);
k3 = (double*)myMalloc(SIZE*sizeof(double*),7);
k4 = (double*)myMalloc(SIZE*sizeof(double*),8);
yout = (double*)myMalloc(SIZE*sizeof(double*),9);
//
//INITIALIZATION
//
for (i = 0; i < SIZE; i++)
{
y[i]=i*i;
power[i]=i+i;
for (j = 0; j < SIZE; j++)
{
c[i][j]=i*i+j;
}
}
// Start timers
gettimeofday(&startTime, NULL);
#pragma omp parallel for schedule(static, 32) default(shared) private(i,j)
{
for (i = 0; i < SIZE; i++)
{
yt[i] = 0.0;
for (j = 0; j < SIZE; j++)
yt[i] += c[i][j]*y[j];
k1[i] = h*(power[i]-yt[i]);
}
}
#pragma omp parallel for schedule(static, 32) default(shared) private(i,j)
{
for (i = 0; i < SIZE; i++)
{
yt[i] = 0.0;
for (j = 0; j < SIZE; j++)
yt[i] += c[i][j]*(y[j]+0.5*k1[j]);
k2[i] = h*(power[i]-yt[i]);
}
}
#pragma omp parallel for schedule(static, 32) default(shared) private(i,j)
{
for (i = 0; i < SIZE; i++)
{
yt[i] = 0.0;
for (j = 0; j < SIZE; j++)
yt[i] += c[i][j]*(y[j]+0.5*k2[j]);
k3[i] = h*(power[i]-yt[i]);
}
}
#pragma omp parallel for schedule(static, 32) default(shared) private(i,j) reduction(+:sum)
{
for (i =0; i < SIZE; i++)
{
yt[i]=0.0;
for (j = 0; j < SIZE; j++)
yt[i] += c[i][j]*(y[j]+k3[j]);
k4[i] = h*(power[i]-yt[i]);
yout[i] = y[i] + (k1[i] + 2*k2[i] + 2*k3[i] + k4[i])/6.0;
sum+=yout[i];
}
}
// End timers
gettimeofday(&finishTime, NULL);
//Calculate the interval length
timeIntervalLength = (double)(finishTime.tv_sec-startTime.tv_sec) * 1000000
+ (double)(finishTime.tv_usec-startTime.tv_usec);
timeIntervalLength=timeIntervalLength/1000;
//Print the interval lenght
printf("__aid_Time: %g msec.\n", timeIntervalLength);
printf("\n\nTotalSum=%g\n\n",sum);
fflush(stdout);
return 0;
}
|
imutil.c | /* -----------------------------------------------------------------------------
* imutil.c
* -----------------------------------------------------------------------------
* Copyright (c) 2015-2017 Blaine Rister et al., see LICENSE for details.
* -----------------------------------------------------------------------------
* Miscellaneous utility routines for image processing, linear algebra, and
* statistical regression. This library completely defines the Image,
* Mat_rm, and Ransac types, among others, and stands apart from the other
* source.
* -----------------------------------------------------------------------------
*/
#include <assert.h>
#include <getopt.h>
#include <errno.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <stddef.h>
#include <float.h>
#include <zlib.h>
#include <sys/stat.h>
#include <sys/types.h>
#include "immacros.h"
#include "imtypes.h"
#include "dicom.h"
#include "nifti.h"
#include "imutil.h"
/* Check for a version number */
#if !defined(SIFT3D_VERSION_NUMBER)
#error imutil.c: Must define the preprocessor macro SIFT3D_VERSION_NUMBER
#endif
/* Stringify a macro name */
#define STR(x) #x
/* Stringify the result of a macro expansion */
#define XSTR(x) STR(x)
/* zlib definitions */
#if defined(MSDOS) || defined(OS2) || defined(WIN32) || defined(__CYGWIN__)
#include <fcntl.h>
#include <io.h>
#define SET_BINARY_MODE(file) setmode(fileno(file), O_BINARY)
#else
#define SET_BINARY_MODE(file)
#endif
/* Implementation parameters */
//#define SIFT3D_USE_OPENCL // Use OpenCL acceleration
#define SIFT3D_RANSAC_REFINE // Use least-squares refinement in RANSAC
/* Implement strnlen, if it's missing */
#ifndef SIFT3D_HAVE_STRNLEN
size_t strnlen(const char *string, size_t maxlen) {
const char *end = memchr (string, '\0', maxlen);
return end ? end - string : maxlen;
}
#endif
/* Implement strndup, if it's missing */
#ifndef SIFT3D_HAVE_STRNDUP
char *strndup(const char *s, size_t n) {
size_t len = strnlen (s, n);
char *new = malloc (len + 1);
if (new == NULL)
return NULL;
new[len] = '\0';
return memcpy (new, s, len);
}
#endif
/* SIFT3D version message */
const char version_msg[] =
"SIFT3D version " XSTR(SIFT3D_VERSION_NUMBER) " \n"
"\n"
"Source code available at https://github.com/bbrister/SIFT3D\n"
"\n"
"Please contact Blaine Rister (blaine@stanford.edu) with questions or "
"concerns. \n";
/* Bug message */
const char bug_msg[] =
"SIFT3D has encountered an unexpected error. We would appreciate it \n"
"if you would report this issue at the following page: \n"
" https://github.com/bbrister/SIFT3D/issues \n";
/* Supported file extensions */
extern const char ext_dcm[]; // dicom.h
const char ext_analyze[] = "img";;
const char ext_gz[] = "gz";
const char ext_nii[] = "nii";
const char ext_dir[] = "";
/* Output file permissions */
const mode_t out_mode = 0755;
/* Default parameters */
const double SIFT3D_err_thresh_default = 5.0;
const int SIFT3D_num_iter_default = 500;
/* Declarations for the virtual function implementations */
static int copy_Affine(const void *const src, void *const dst);
static int copy_Tps(const void *const src, void *const dst);
static void apply_Affine_xyz(const void *const affine, const double x_in,
const double y_in, const double z_in,
double *const x_out, double *const y_out,
double *const z_out);
static void apply_Tps_xyz(const void *const tps, const double x_in,
const double y_in, const double z_in,
double *const x_out, double *const y_out,
double *const z_out);
static int apply_Affine_Mat_rm(const void *const affine,
const Mat_rm * const mat_in, Mat_rm * const mat_out);
static int apply_Tps_Mat_rm(const void *const tps, const Mat_rm * const mat_in,
Mat_rm * const mat_out);
static size_t Affine_get_size(void);
static size_t Tps_get_size(void);
static int write_Affine(const char *path, const void *const tform);
static int write_Tps(const char *path, const void *const tform);
static void cleanup_Affine(void *const affine);
static void cleanup_Tps(void *const tps);
static int mkpath(const char *path, mode_t mode);
/* Virtual function tables */
const Tform_vtable Affine_vtable = {
copy_Affine,
apply_Affine_xyz,
apply_Affine_Mat_rm,
Affine_get_size,
write_Affine,
cleanup_Affine
};
const Tform_vtable Tps_vtable = {
copy_Tps,
apply_Tps_xyz,
apply_Tps_Mat_rm,
Tps_get_size,
write_Tps,
cleanup_Tps
};
/* Internal macros */
#define TFORM_GET_VTABLE(arg) (((Affine *) arg)->tform.vtable)
#define AFFINE_GET_DIM(affine) ((affine)->A.num_rows)
/* Global data */
CL_data cl_data;
/* LAPACK declarations */
#ifdef SIFT3D_MEX
// Set the integer width to Matlab's defined width
#include <uchar.h>
#include "mex.h"
typedef mwSignedIndex fortran_int;
#ifdef _WINDOWS
// Remove underscores from FORTRAN functions
#define dlange_ dlange
#define dgecon_ dgecon
#define dgelss_ dgelss
#define dgetrf_ dgetrf
#define dgetrs_ dgetrs
#define dsyevd_ dsyevd
#endif
#else
typedef int32_t fortran_int;
#endif
extern double dlange_(const char *, const fortran_int *, const fortran_int *,
const double *, const fortran_int *, double *);
extern void dgecon_(const char *, const fortran_int *, double *,
const fortran_int *, const double *, double *,
double *, fortran_int *, fortran_int *);
extern void dgelss_(const fortran_int *, const fortran_int *,
const fortran_int *, const double *, const fortran_int *,
double *, const fortran_int *, double *, const double *,
fortran_int *, double *, const fortran_int *,
fortran_int *);
extern void dgetrf_(const fortran_int *, const fortran_int *, double *,
const fortran_int *, fortran_int *, fortran_int *);
extern void dgetrs_(const char *, const fortran_int *, const fortran_int *,
const double *, const fortran_int *, fortran_int *,
double *, const fortran_int *, fortran_int *);
extern void dsyevd_(const char *, const char *, const fortran_int *, double *,
const fortran_int *, double *, double *,
const fortran_int *, fortran_int *, const fortran_int *,
fortran_int *);
/* Internal helper routines */
static char *read_file(const char *path);
static int do_mkdir(const char *path, mode_t mode);
static int cross_mkdir(const char *path, mode_t mode);
static double resample_linear(const Image * const in, const double x,
const double y, const double z, const int c);
static double resample_lanczos2(const Image * const in, const double x,
const double y, const double z, const int c);
static double lanczos(double x, double a);
static int check_cl_image_support(cl_context context, cl_mem_flags mem_flags,
cl_image_format image_format,
cl_mem_object_type image_type);
static int compile_cl_program_from_source(cl_program * program,
cl_context context,
cl_device_id * devices,
int num_devices, char **src,
int num_str);
static int n_choose_k(const int n, const int k, int **ret);
static int make_spline_matrix(Mat_rm * src, Mat_rm * src_in, Mat_rm * sp_src,
int K_terms, int *r, int dim);
static int make_affine_matrix(const Mat_rm *const pts_in, const int dim,
Mat_rm *const mat_out);
static Mat_rm *extract_ctrl_pts(void *tform, tform_type type);
static Mat_rm *extract_ctrl_pts_Tps(Tps * tps);
static int solve_system(const Mat_rm *const src, const Mat_rm *const ref,
void *const tform);
static double tform_err_sq(const void *const tform, const Mat_rm *const src,
const Mat_rm *const ref, const int i);
static int ransac(const Mat_rm *const src, const Mat_rm *const ref,
const Ransac *const ran, void *tform, int **const cset, int *const len);
static int convolve_sep(const Image * const src,
Image * const dst, const Sep_FIR_filter * const f,
const int dim, const double unit);
static int convolve_sep_gen(const Image * const src,
Image * const dst, const Sep_FIR_filter * const f,
const int dim, const double unit);
static int convolve_sep_cl(const Image * const src,
Image * const dst, const Sep_FIR_filter * const f,
int dim, const double unit);
static int convolve_sep_sym(const Image * const src, Image * const dst,
const Sep_FIR_filter * const f, const int dim,
const double unit);
static const char *get_file_name(const char *path);
static const char *get_file_ext(const char *name);
/* Unfinished public routines */
int init_Tps(Tps * tps, int dim, int terms);
int resize_Tps(Tps * tps, int num_pts, int dim);
/* As realloc, but frees the underlying pointer and returns NULL on error, or
* if size is 0 and ptr is non-NULL. */
void *SIFT3D_safe_realloc(void *ptr, size_t size) {
void *ret;
// Call realloc and handle failures
if (size == 0 || (ret = realloc(ptr, size)) == NULL) {
if (ptr != NULL) {
free(ptr);
}
return NULL;
}
return ret;
}
/* Finish all OpenCL command queues. */
void clFinish_all()
{
#ifdef SIFT3D_USE_OPENCL
int i;
for (i = 0; i < cl_data.num_devices; i++) {
clFinish(cl_data.queues[i]);
}
#endif
}
/* Check the error code and exit on error, unless NDEBUG is
* defined. If exiting, prints the error type and the cause,
* given by the msg string. */
void check_cl_error(int err, const char *msg)
{
#ifdef NDEBUG
return;
#endif
switch (err) {
case CL_SUCCESS:
return;
default:
printf("unknown OpenCL error %d \n", err);
}
printf("Exiting due to error in: %s \n", msg);
exit(1);
}
/* Returns SIFT3D_SUCCESS if the specified format is supported for this context,
* or SIFT3D_FAILURE if it is not. */
SIFT3D_IGNORE_UNUSED
static int check_cl_image_support(cl_context context, cl_mem_flags mem_flags,
cl_image_format image_format,
cl_mem_object_type image_type)
{
#ifdef SIFT3D_USE_OPENCL
cl_image_format *image_formats;
cl_int err;
cl_uint num_image_formats;
int i, support;
err = clGetSupportedImageFormats(context, mem_flags, image_type,
0, NULL, &num_image_formats);
if ((image_formats =
malloc(num_image_formats * sizeof(cl_image_format))) == NULL)
return SIFT3D_FAILURE;
err |= clGetSupportedImageFormats(context, mem_flags, image_type,
num_image_formats, image_formats,
NULL);
check_cl_error(err, "2D image formats");
support = SIFT3D_FALSE;
for (i = 0; i < num_image_formats; i++) {
if (memcmp
(image_formats + i, &image_format,
sizeof(cl_image_format))) {
support = SIFT3D_TRUE;
break;
}
}
return (support == SIFT3D_TRUE) ? SIFT3D_SUCCESS : SIFT3D_FAILURE;
#else
printf
("check_cl_image_support: This version was not compiled with OpenCL!\n");
return SIFT3D_FAILURE;
#endif
}
/* Initialize the relevant OpenCL data, using the specified device type,
* memory flags, and image format. If no such devices are found, or these
* settings are not supported on the device, returns SIFT3D_FAILURE. Returns SIFT3D_SUCCESS
* when userData is initialized.
*
* This library saves a copy of user_cl_data for use with future calls. To change
* the settings of the library, call init_cl again. */
int init_cl(CL_data * user_cl_data, const char *platform_name,
cl_device_type device_type, cl_mem_flags mem_flags,
cl_image_format image_format)
{
#ifdef SIFT3D_USE_OPENCL
Kernels kernels;
cl_platform_id *platforms;
cl_device_id *devices;
char *name, *src;
cl_context_properties properties[3];
cl_command_queue *queues;
cl_program program;
cl_platform_id platform;
cl_context context;
cl_uint num_platforms, num_devices;
cl_int err;
cl_bool support;
size_t size;
int i;
// Initialize persistent arrays to NULL
devices = NULL;
queues = NULL;
src = NULL;
// Query the available platforms and select the specified one
err = clGetPlatformIDs(0, NULL, &num_platforms);
if (err != CL_SUCCESS || num_platforms < 1)
goto init_cl_quit;
if ((platforms = (cl_platform_id *) malloc(num_platforms *
sizeof(cl_platform_id))) ==
NULL)
goto init_cl_quit;
clGetPlatformIDs(num_platforms, platforms, NULL);
name = NULL;
platform = (cl_platform_id) NULL;
for (i = 0; i < num_platforms; i++) {
err = clGetPlatformInfo(platforms[i], CL_PLATFORM_NAME, 0,
NULL, &size);
name = (char *)realloc(name, size * sizeof(char));
err = clGetPlatformInfo(platforms[i], CL_PLATFORM_NAME,
size, name, NULL);
if (!strcmp(name, platform_name)) {
platform = platforms[i];
}
}
free(platforms);
free(name);
if (platform == (cl_platform_id) NULL) {
printf("init_cl: Failed to find platform %s \n", platform_name);
goto init_cl_quit;
}
// Get the number of devices of the specified type
devices = NULL;
err = clGetDeviceIDs(platform, device_type, 0, NULL, &num_devices);
if (err != CL_SUCCESS && err != CL_DEVICE_NOT_FOUND) {
check_cl_error(err, "Create context");
} else if (num_devices > 0) {
devices =
(cl_device_id *) malloc(num_devices * sizeof(cl_device_id));
err =
clGetDeviceIDs(platform, device_type, num_devices, devices,
NULL);
check_cl_error(err, "Get devices");
}
if (num_devices <= 0 || devices == NULL) {
puts("init_cl: No OpenCL devices available \n");
goto init_cl_quit;
}
//TODO: Multiple GPUs on one context does not seem to work. Maybe try multiple
// contexts, one per GPU?
num_devices = 1;
// Create the context
properties[0] = CL_CONTEXT_PLATFORM;
properties[1] = (cl_context_properties) platform;
properties[2] = 0;
context =
clCreateContext(properties, num_devices, devices, NULL, NULL, &err);
check_cl_error(err, "Create context \n");
// Create a command queue on each device
if ((queues = malloc(num_devices * sizeof(cl_command_queue))) == NULL)
goto init_cl_quit;
for (i = 0; i < num_devices; i++) {
cl_device_type type;
err = clGetDeviceInfo(devices[i], CL_DEVICE_TYPE,
sizeof(cl_device_type), &type, NULL);
if (type == device_type) {
if ((queues[i] = clCreateCommandQueue(context,
devices[i], 0,
NULL)) == NULL) {
goto init_cl_quit;
}
}
}
// Query for support of the desired image and memory format on these devices
for (i = 0; i < num_devices; i++) {
// Query for image support
support = CL_FALSE;
clGetDeviceInfo(devices[i], CL_DEVICE_IMAGE_SUPPORT,
sizeof(cl_bool), &support, NULL);
if (support != CL_TRUE) {
printf
("init_cl: Images are not supported by device %d \n",
i);
goto init_cl_quit;
}
// Query for support of the specified image format in both 2D and 3D images
check_cl_image_support(context, mem_flags, image_format,
CL_MEM_OBJECT_IMAGE2D);
check_cl_image_support(context, mem_flags, image_format,
CL_MEM_OBJECT_IMAGE3D);
}
// Load the kernels from a file
if ((src = read_file(KERNELS_PATH)) == NULL) {
printf("init_cl: Error reading kernel source file %s",
KERNELS_PATH);
goto init_cl_quit;
}
// Compile static programs
if (compile_cl_program_from_source
(&program, context, devices, num_devices, &src, 1))
goto init_cl_quit;
kernels.downsample_2x_3d =
clCreateKernel(program, "downsample_2x_3d", &err);
check_cl_error(err, "init_cl: create kernels");
clReleaseProgram(program);
// Save data to the user and library copies
cl_data.platform = platform;
cl_data.devices = devices;
cl_data.context = context;
cl_data.queues = queues;
cl_data.num_devices = num_devices;
cl_data.image_format = image_format;
cl_data.valid = SIFT3D_TRUE;
cl_data.kernels = kernels;
*user_cl_data = cl_data;
return SIFT3D_SUCCESS;
init_cl_quit:
if (devices != NULL)
free(devices);
if (queues != NULL)
free(queues);
if (src != NULL)
free(src);
return SIFT3D_FAILURE;
#else
printf("init_cl: This version was not compiled with OpenCL!\n");
return SIFT3D_FAILURE;
#endif
}
/* Compile a program from the given source strings, writing the program handle into
* the specified pointer. */
SIFT3D_IGNORE_UNUSED
static int compile_cl_program_from_source(cl_program * program,
cl_context context,
cl_device_id * devices,
int num_devices, char **src,
int num_str)
{
#ifdef SIFT3D_USE_OPENCL
cl_int err;
int i;
err = CL_SUCCESS;
*program =
clCreateProgramWithSource(context, 1, (const char **)src, NULL,
&err);
if (*program == NULL || err != CL_SUCCESS) {
puts("Error creating program for static kernels \n");
return SIFT3D_FAILURE;
}
if ((err =
clBuildProgram(*program, 0, NULL, NULL, NULL,
NULL)) != CL_SUCCESS) {
char log[1 << 15];
puts("Error: Failed to build program \n");
for (i = 0; i < num_devices; i++) {
clGetProgramBuildInfo(*program, devices[i],
CL_PROGRAM_BUILD_LOG, sizeof(log),
log, NULL);
printf("\n-------Build log for device %d-------\n %s",
i, log);
}
return SIFT3D_FAILURE;
}
return SIFT3D_SUCCESS;
#else
printf
("compile_cl_program_from_source: This verison was not compiled with "
"OpenCL!\n");
return SIFT3D_FAILURE;
#endif
}
/* Initialize a triangle mesh for first use. This must be called before mesh
* can be used in any other functions. */
void init_Mesh(Mesh * const mesh)
{
mesh->tri = NULL;
mesh->num = -1;
}
/* Release all memory associated with a triangle mesh. mesh cannot be reused
* before it is reinitialized. */
void cleanup_Mesh(Mesh * const mesh)
{
free(mesh->tri);
}
/* Convert a matrix to a different type. in and out may be the same pointer.
*
* This function resizes out.
*
* All matrices must be initialized prior to calling this function. */
int convert_Mat_rm(const Mat_rm * const in, Mat_rm * const out,
const Mat_rm_type type)
{
int i, j;
// Resize the output
out->num_rows = in->num_rows;
out->num_cols = in->num_cols;
out->type = type;
if (resize_Mat_rm(out))
return SIFT3D_FAILURE;
#define CONVERT_TYPE(type_in, type_out) \
SIFT3D_MAT_RM_LOOP_START(in, i, j) \
SIFT3D_MAT_RM_GET(out, i, j, type_out) = (type_out) \
SIFT3D_MAT_RM_GET(in, i, j, type_in); \
SIFT3D_MAT_RM_LOOP_END
#define CONVERT_TYPE_OUTER(type_out) \
switch (in->type) { \
case SIFT3D_DOUBLE: \
CONVERT_TYPE(double, type_out) \
break; \
case SIFT3D_FLOAT: \
CONVERT_TYPE(float, type_out) \
break; \
case SIFT3D_INT: \
CONVERT_TYPE(int, type_out) \
break; \
default: \
puts("convert_Mat_rm: unknown type of input matrix \n"); \
return SIFT3D_FAILURE; \
}
// Convert to the specified type
switch (type) {
case SIFT3D_DOUBLE:
CONVERT_TYPE_OUTER(double) break;
case SIFT3D_FLOAT:
CONVERT_TYPE_OUTER(float) break;
case SIFT3D_INT:
CONVERT_TYPE_OUTER(int) break;
default:
puts("convert_Mat_rm: unknown destination type \n");
return SIFT3D_FAILURE;
}
#undef CONVERT_TYPE_OUTER
#undef CONVERT_TYPE
return SIFT3D_SUCCESS;
}
/* Shortcut function to initalize a matrix.
*
* Parameters:
* mat - The matrix to be initialized
* num_rows - The number of rows
* num_cols - The number of columns
* type - The data type
* set_zero - If true, initializes the elements to zero.
*
* Returns SIFT3D_SUCCESS on success, SIFT3D_FAILURE otherwise. */
int init_Mat_rm(Mat_rm *const mat, const int num_rows, const int num_cols,
const Mat_rm_type type, const int set_zero) {
mat->type = type;
mat->num_rows = num_rows;
mat->num_cols = num_cols;
mat->u.data_double = NULL;
mat->size = 0;
mat->static_mem = SIFT3D_FALSE;
if (resize_Mat_rm(mat))
return SIFT3D_FAILURE;
if (set_zero && zero_Mat_rm(mat))
return SIFT3D_FAILURE;
return SIFT3D_SUCCESS;
}
/* As init_Mat_rm, but aliases data memory with pointer p. The flag
* mat->static_mem is set, and the matrix does not need to be freed with
* cleanup_Mat_rm. But, an error will be thrown if the user attempts to resize
* the memory. That is, resize_Mat_rm will only return success if the size of
* the matrix does not change. */
int init_Mat_rm_p(Mat_rm *const mat, const void *const p, const int num_rows,
const int num_cols, const Mat_rm_type type,
const int set_zero) {
// Perform normal initialization
if (init_Mat_rm(mat, num_rows, num_cols, type, set_zero))
return SIFT3D_FAILURE;
// Clean up any existing memory
cleanup_Mat_rm(mat);
// Alias with provided memory and set the static flag
mat->u.data_double = (double *) p;
mat->static_mem = SIFT3D_TRUE;
// Optionally set to zero
if (set_zero && zero_Mat_rm(mat))
return SIFT3D_FAILURE;
return SIFT3D_SUCCESS;
}
/* Prints the type of mat into the string str. */
void sprint_type_Mat_rm(const Mat_rm * const mat, char *const str)
{
switch (mat->type) {
case SIFT3D_DOUBLE:
sprintf(str, "double");
break;
case SIFT3D_FLOAT:
sprintf(str, "float");
break;
case SIFT3D_INT:
sprintf(str, "int");
break;
default:
sprintf(str, "<sprint_type_Mat_rm: unknown type>");
}
}
/* Concatenate two matrices. If dim = 0, concatenates vertically, i.e.
* dst = [src1
* src2].
* If dim = 1, concatenates horizontally, i.e
* dst = [src1 src2].
*
* Returns SIFT3D_SUCCESS on success, SIFT3D_FAILURE on failure. */
int concat_Mat_rm(const Mat_rm * const src1, const Mat_rm * const src2,
Mat_rm * const dst, const int dim)
{
int off[2], dst_dims[2];
int i, j;
const int dims1[] = {src1->num_rows, src1->num_cols};
const int dims2[] = {src2->num_rows, src2->num_cols};
// Verify inputs
if (dims1[1 - dim] != dims2[1 - dim]) {
SIFT3D_ERR("concat_Mat_rm: incompatible dimensions: "
"left: [%d x %d] right: [%d x %d] dim: %d \n",
dims1[0], dims1[1], dims2[0], dims2[1], dim);
return SIFT3D_FAILURE;
}
if (src1->type != src2->type) {
char type1[1024], type2[1024];
sprint_type_Mat_rm(src1, type1);
sprint_type_Mat_rm(src2, type2);
SIFT3D_ERR("concat_Mat_rm: incompatible types: "
"left: <%s> right: <%s> \n", type1, type2);
return SIFT3D_FAILURE;
}
// Compute the destination dimensions
for (i = 0; i < 2; i++) {
dst_dims[i] = dim == i ? dims1[i] + dims2[i] : dims1[i];
}
// Resize dst
dst->type = src1->type;
dst->num_rows = dst_dims[0];
dst->num_cols = dst_dims[1];
if (resize_Mat_rm(dst))
return SIFT3D_FAILURE;
// Compute the offsets
for (i = 0; i < 2; i++) {
off[i] = dim == i ? dims1[i] : 0;
}
#define COPY_DATA(type) \
/* Copy src1 data */ \
SIFT3D_MAT_RM_LOOP_START(src1, i, j) \
SIFT3D_MAT_RM_GET(dst, i, j, type) = \
SIFT3D_MAT_RM_GET(src1, i, j, type); \
SIFT3D_MAT_RM_LOOP_END \
\
/* Copy src2 data */ \
SIFT3D_MAT_RM_LOOP_START(src2, i, j) \
\
SIFT3D_MAT_RM_GET(dst, i + off[0], j + off[1], type) = \
SIFT3D_MAT_RM_GET(src2, i, j, type); \
\
SIFT3D_MAT_RM_LOOP_END
// Copy the data
switch (dst->type) {
case SIFT3D_DOUBLE:
COPY_DATA(double);
break;
case SIFT3D_FLOAT:
COPY_DATA(float);
break;
case SIFT3D_INT:
COPY_DATA(int);
break;
default:
SIFT3D_ERR("concat_Mat_rm: unknown type \n");
return SIFT3D_FAILURE;
}
#undef COPY_DATA
return SIFT3D_SUCCESS;
}
/* Copies a matrix. dst will be resized. */
int copy_Mat_rm(const Mat_rm * const src, Mat_rm * const dst)
{
// Resize dst
dst->type = src->type;
dst->num_rows = src->num_rows;
dst->num_cols = src->num_cols;
if (resize_Mat_rm(dst))
return SIFT3D_FAILURE;
// Copy the data (use memmove because of static mode)
memmove(dst->u.data_double, src->u.data_double, src->size);
return SIFT3D_SUCCESS;
}
/* Print a matrix to stdout. The matrix must be initialized. */
int print_Mat_rm(const Mat_rm * const mat)
{
int i, j;
#define PRINT_MAT_RM(type, format) \
SIFT3D_MAT_RM_LOOP_START(mat, i, j) \
printf("%" #format " ", SIFT3D_MAT_RM_GET(mat, i, j, type)); \
SIFT3D_MAT_RM_LOOP_COL_END \
puts("\n"); \
SIFT3D_MAT_RM_LOOP_ROW_END
switch (mat->type) {
case SIFT3D_DOUBLE:
PRINT_MAT_RM(double, f) break;
case SIFT3D_FLOAT:
PRINT_MAT_RM(float, f) break;
case SIFT3D_INT:
PRINT_MAT_RM(int, d) break;
default:
puts("print_Mat_rm: unknown type \n");
return SIFT3D_FAILURE;
}
#undef PRINT_MAT_RM
return SIFT3D_SUCCESS;
}
/* Re-sizes a matrix. The following fields
* must already be initialized:
* -num_rows
* -num_cols
* -type
* -u.data_* (NULL for first use, non-null for resize)
*
* The following fields will be modified:
* -size
* -u.data_* (Change is not guaranteed)
*
* Returns SIFT3D_SUCCESS on success, SIFT3D_FAILURE otherwise.
*/
int resize_Mat_rm(Mat_rm *const mat) {
size_t type_size, total_size;
const int num_rows = mat->num_rows;
const int num_cols = mat->num_cols;
double **const data = &mat->u.data_double;
const size_t numel = num_rows * num_cols;
const Mat_rm_type type = mat->type;
// Get the size of the underyling datatype
switch (type) {
case SIFT3D_DOUBLE:
type_size = sizeof(double);
break;
case SIFT3D_FLOAT:
type_size = sizeof(float);
break;
case SIFT3D_INT:
type_size = sizeof(int);
break;
default:
SIFT3D_ERR("resize_Mat_rm: unknown type! \n");
return SIFT3D_FAILURE;
}
// Calculate the new size
total_size = type_size * numel;
// Do nothing if the size has not changed
if (total_size == mat->size)
return SIFT3D_SUCCESS;
mat->size = total_size;
// Check for static reallocation
if (mat->static_mem) {
SIFT3D_ERR("resize_Mat_rm: illegal re-allocation of static matrix \n");
return SIFT3D_FAILURE;
}
// Reset if the new size is 0
if (total_size == 0) {
cleanup_Mat_rm(mat);
return init_Mat_rm(mat, num_rows, num_cols, type, SIFT3D_FALSE);
}
// Re-allocate the memory
if ((*data = (double *) SIFT3D_safe_realloc(*data, total_size)) == NULL) {
mat->size = 0;
return SIFT3D_FAILURE;
}
return SIFT3D_SUCCESS;
}
/* Set all elements to zero */
int zero_Mat_rm(Mat_rm *const mat)
{
int i, j;
#define SET_ZERO(type) \
SIFT3D_MAT_RM_LOOP_START(mat, i, j) \
SIFT3D_MAT_RM_GET(mat, i, j, type) = (type) 0; \
SIFT3D_MAT_RM_LOOP_END
switch (mat->type) {
case SIFT3D_DOUBLE:
SET_ZERO(double);
break;
case SIFT3D_FLOAT:
SET_ZERO(float);
break;
case SIFT3D_INT:
SET_ZERO(int);
break;
default:
return SIFT3D_FAILURE;
}
#undef SET_ZERO
return SIFT3D_SUCCESS;
}
/* Set a matrix to identity.
*
* Parameters:
* n: The length of the square matrix. The output will have size [n x n].
* mat: The matrix to be set.
*/
int identity_Mat_rm(const int n, Mat_rm *const mat) {
int i;
// Resize the matrix
mat->num_rows = mat->num_cols = n;
if (resize_Mat_rm(mat))
return SIFT3D_FAILURE;
// Set to identity
zero_Mat_rm(mat);
#define SET_IDENTITY(type) \
for (i = 0; i < n; i++) { \
SIFT3D_MAT_RM_GET(mat, i, i, type) = (type) 1; \
}
SIFT3D_MAT_RM_TYPE_MACRO(mat, identity_Mat_rm_quit, SET_IDENTITY);
#undef SET_IDENTITY
return SIFT3D_SUCCESS;
identity_Mat_rm_quit:
return SIFT3D_FAILURE;
}
/* De-allocate the memory for a Mat_rm struct, unless it was initialized in
* static mode. */
void cleanup_Mat_rm(Mat_rm *mat) {
if (mat->u.data_double == NULL)
return;
if (!mat->static_mem)
free(mat->u.data_double);
}
/* Make a grid with the specified spacing between lines and line width.
* Uses the default stride and initializes grid. */
int draw_grid(Image * grid, int nx, int ny, int nz, int spacing, int line_width)
{
int x, y, z;
const double line_half_width = (double)line_width / 2.0;
// Verify inputs
if (spacing < 2 || line_width < 1 || line_width > spacing)
return SIFT3D_FAILURE;
if (init_im_with_dims(grid, nx, ny, nz, 1))
return SIFT3D_FAILURE;
SIFT3D_IM_LOOP_START(grid, x, y, z)
if (x % spacing == 0 || y % spacing == 0 || z % spacing == 0) {
int x_draw, y_draw, z_draw, x_start, x_end, y_start,
y_end, z_start, z_end;
// Draw a line
x_start = SIFT3D_MAX(x - line_half_width, 0);
y_start = SIFT3D_MAX(y - line_half_width, 0);
z_start = SIFT3D_MAX(z - line_half_width, 0);
x_end = SIFT3D_MIN(x + line_half_width + 1, nx - 1);
y_end = SIFT3D_MIN(y + line_half_width + 1, ny - 1);
z_end = SIFT3D_MIN(z + line_half_width + 1, nz - 1);
SIFT3D_IM_LOOP_LIMITED_START(grid, x_draw, y_draw, z_draw,
x_start, x_end,
y_start, y_end, z_start, z_end)
if (abs(x_draw - x) < line_half_width &&
abs(y_draw - y) < line_half_width &&
abs(z_draw - z) < line_half_width)
SIFT3D_IM_GET_VOX(grid, x_draw, y_draw,
z_draw, 0) = 1.0f;
SIFT3D_IM_LOOP_END}
SIFT3D_IM_LOOP_END return SIFT3D_SUCCESS;
}
/* Draw points in in image*/
int draw_points(const Mat_rm * const in, const int *const dims, int radius,
Image * const out)
{
Mat_rm in_i;
int i, x, y, z;
// Initialize intermediates
if (init_Mat_rm(&in_i, 0, 0, SIFT3D_INT, SIFT3D_FALSE))
return SIFT3D_FAILURE;
// Resize the output
memcpy(SIFT3D_IM_GET_DIMS(out), dims, IM_NDIMS * sizeof(int));
out->nc = 1;
im_default_stride(out);
if (im_resize(out))
goto draw_points_quit;
im_zero(out);
// Convert the input to integer
if (convert_Mat_rm(in, &in_i, SIFT3D_INT))
goto draw_points_quit;
for (i = 0; i < in->num_rows; i++) {
const int cx = SIFT3D_MAT_RM_GET(&in_i, i, 0, int);
const int cy = SIFT3D_MAT_RM_GET(&in_i, i, 1, int);
const int cz = SIFT3D_MAT_RM_GET(&in_i, i, 2, int);
const int x_start = SIFT3D_MAX(cx - radius, 0);
const int y_start = SIFT3D_MAX(cy - radius, 0);
const int z_start = SIFT3D_MAX(cz - radius, 0);
const int x_end = SIFT3D_MIN(cx + radius, dims[0] - 1);
const int y_end = SIFT3D_MIN(cy + radius, dims[1] - 1);
const int z_end = SIFT3D_MIN(cz + radius, dims[2] - 1);
// Draw the point
SIFT3D_IM_LOOP_LIMITED_START(out, x, y, z, x_start, x_end,
y_start, y_end, z_start, z_end)
SIFT3D_IM_GET_VOX(out, x, y, z, 0) = 1.0f;
SIFT3D_IM_LOOP_END}
// Clean up
cleanup_Mat_rm(&in_i);
return SIFT3D_SUCCESS;
draw_points_quit:
cleanup_Mat_rm(&in_i);
return SIFT3D_FAILURE;
}
/* Draw lines between two sets of points.
* TODO currently only does XY plane. Add support for other planes */
int draw_lines(const Mat_rm * const points1, const Mat_rm * const points2,
const int *const dims, Image * const out)
{
Mat_rm points1_d, points2_d;
double xd;
int i, y;
// Parameters
const double line_step = 0.1;
// Verify inputs
if (points1->num_rows != points2->num_rows ||
points1->num_cols != points2->num_cols ||
points1->num_cols != IM_NDIMS) {
puts("draw_lines: invalid points dimensions \n");
return SIFT3D_FAILURE;
}
// Initialize intermediates
if (init_Mat_rm(&points1_d, 0, 0, SIFT3D_DOUBLE, SIFT3D_FALSE) ||
init_Mat_rm(&points2_d, 0, 0, SIFT3D_DOUBLE, SIFT3D_FALSE))
return SIFT3D_FAILURE;
// Resize the output image
memcpy(SIFT3D_IM_GET_DIMS(out), dims, IM_NDIMS * sizeof(int));
out->nc = 1;
im_default_stride(out);
if (im_resize(out))
goto draw_lines_quit;
im_zero(out);
// Convert the inputs to double
if (convert_Mat_rm(points1, &points1_d, SIFT3D_DOUBLE) ||
convert_Mat_rm(points2, &points2_d, SIFT3D_DOUBLE))
goto draw_lines_quit;
for (i = 0; i < points1->num_rows; i++) {
const double p1x = SIFT3D_MAT_RM_GET(&points1_d, i, 0, double);
const double p2x = SIFT3D_MAT_RM_GET(&points2_d, i, 0, double);
const double p1y = SIFT3D_MAT_RM_GET(&points1_d, i, 1, double);
const double p2y = SIFT3D_MAT_RM_GET(&points2_d, i, 1, double);
const double p1z = SIFT3D_MAT_RM_GET(&points1_d, i, 2, double);
const double p2z = SIFT3D_MAT_RM_GET(&points2_d, i, 2, double);
// Check the bounds
if (!IM_CONTAINS(out, p1x, p1y, p1z) ||
!IM_CONTAINS(out, p2x, p2y, p2z))
continue;
// Get the bounds of the line
const double x_start = SIFT3D_MIN(p1x, p2x) + 0.5;
const double x_end = SIFT3D_MAX(p1x, p2x) + 0.5;
const int zi = (int)p1z;
// Check if the line is vertical
const int vert = fabs(x_start - x_end) < 1.0;
// Draw the line
if (vert) {
const int xi = (int)x_start;
const int y_start = (int)SIFT3D_MIN(p1y, p2y);
const int y_end = (int)SIFT3D_MAX(p1y, p2y);
for (y = y_start; y <= y_end; y++) {
SIFT3D_IM_GET_VOX(out, xi, y, zi, 0) = 1.0f;
}
} else {
// Get the line parameters
const double y_slope = p1x < p2x ? (p2y - p1y) /
(p2x - p1x) : (p1y - p2y) / (p1x - p2x);
const double b = p1y + 0.5 - (p1x + 0.5) * y_slope;
for (xd = x_start; xd <= x_end; xd += line_step) {
const double yd = y_slope * xd + b;
const int xi = (int)xd;
const int yi = (int)yd;
if (yi < 0 || yi > dims[1] - 1)
continue;
SIFT3D_IM_GET_VOX(out, xi, yi, zi, 0) = 1.0f;
}
}
}
// Clean up
cleanup_Mat_rm(&points1_d);
cleanup_Mat_rm(&points2_d);
return SIFT3D_SUCCESS;
draw_lines_quit:
cleanup_Mat_rm(&points1_d);
cleanup_Mat_rm(&points2_d);
return SIFT3D_FAILURE;
}
/* Detect the format of the supplied file name. */
im_format im_get_format(const char *path) {
struct stat st;
const char *ext;
// Check if the file exists and is a directory
if (stat(path, &st) == 0) {
if (S_ISDIR(st.st_mode))
return DIRECTORY;
}
// If not a directory, get the file extension
ext = get_file_ext(path);
// Check the known types
if (!strcmp(ext, ext_analyze) || !strcmp(ext, ext_gz) ||
!strcmp(ext, ext_nii))
return NIFTI;
if (!strcmp(ext, ext_dcm))
return DICOM;
if (!strcmp(ext, ext_dir))
return DIRECTORY;
// The type was not recognized
return UNKNOWN;
}
/* Read an image from a file. The file extension must match one of the
* supported formats.
*
* Supported formats:
* - Analyze (.img, .img.gz)
* - DICOM (.dcm)
* - Directory of DICOM files
* - NIFTI-1 (.nii, .nii.gz)
*
* Return values:
* -SIFT3D_SUCCESS - Image successfully read
* -SIFT3D_FILE_DOES_NOT_EXIST - The file does not exist
* -SIFT3D_UNSUPPORTED_FILE_TYPE - The file type is not supported
* -SIFT3D_WRAPPER_NOT_COMPILED - The file type is supported, but the wrapper
* library was not compiled.
* -SIFT3D_UNEVEN_SPACING - The image slices are unevenly spaced.
* -SIFT3D_INCONSISTENT_AXES - The image slices have inconsistent axes.
* -SIFT3D_DUPLICATE_SLICES - There are multiple slices in the same location.
* -SIFT3D_FAILURE - Other error
*/
int im_read(const char *path, Image *const im) {
struct stat st;
int ret;
// Ensure the file exists
if (stat(path, &st) != 0) {
SIFT3D_ERR("im_read: failed to find file %s \n", path);
return SIFT3D_FILE_DOES_NOT_EXIST;
}
// Get the file format and write the file
switch (im_get_format(path)) {
case ANALYZE:
case NIFTI:
ret = read_nii(path, im);
break;
case DICOM:
ret = read_dcm(path, im);
break;
case DIRECTORY:
ret = read_dcm_dir(path, im);
break;
case FILE_ERROR:
ret = SIFT3D_FAILURE;
break;
case UNKNOWN:
default:
SIFT3D_ERR("im_read: unrecognized file extension "
"from file %s \n", path);
ret = SIFT3D_UNSUPPORTED_FILE_TYPE;
}
return ret;
}
/* Write an image to a file.
*
* Supported formats:
* -DICOM (.dcm)
* -Directory of DICOM files
* -NIFTI (.nii, .nii.gz)
*
* Return values:
* -SIFT3D_SUCCESS - Successfully wrote the image
* -SIFT3D_UNSUPPORTED_FILE_TYPE - Cannot write this file type
* -SIFT3D_FAILURE - Other error
*/
int im_write(const char *path, const Image *const im) {
// Create the path
if (mkpath(path, out_mode))
return SIFT3D_FAILURE;
// Get the file format
switch (im_get_format(path)) {
case ANALYZE:
case NIFTI:
return write_nii(path, im);
case DICOM:
return write_dcm(path, im, NULL, -1.0f);
case DIRECTORY:
// Create the directory
if (do_mkdir(path, out_mode)) {
SIFT3D_ERR("im_write: failed to create directory "
"%s \n", path);
return SIFT3D_FAILURE;
}
return write_dcm_dir(path, im, NULL);
case UNKNOWN:
default:
// Otherwise, the file extension was not found
SIFT3D_ERR("im_write: unrecognized file extension "
"from file %s \n", path);
return SIFT3D_UNSUPPORTED_FILE_TYPE;
}
// Unreachable code
return SIFT3D_FAILURE;
}
/* Separate the file name component from its path */
static const char *get_file_name(const char *path) {
const char *name;
// Get the last file separator
name = strrchr(path, SIFT3D_FILE_SEP);
return name == NULL ? path : name;
}
/* Get the extension of a file name */
static const char *get_file_ext(const char *name)
{
const char *dot;
// Get the file name component
name = get_file_name(name);
// Get the last dot
dot = strrchr(name, '.');
return dot == NULL || dot == name ? "" : dot + 1;
}
/* Get the parent directory of a file. The returned string must later be
* freed. */
char *im_get_parent_dir(const char *path) {
ptrdiff_t file_pos;
char *dirName;
// Duplicate the path so we can edit it
dirName = strndup(path, FILENAME_MAX);
// Subtract away the file name
file_pos = get_file_name(path) - path;
if (file_pos > 0)
dirName[file_pos] = '\0';
return dirName;
}
/* Write a matrix to a .csv or .csv.gz file. */
int write_Mat_rm(const char *path, const Mat_rm * const mat)
{
FILE *file;
gzFile gz;
const char *ext;
int i, j, compress;
const char *mode = "w";
// Validate and create the output directory
if (mkpath(path, out_mode))
return SIFT3D_FAILURE;
// Get the file extension
ext = get_file_ext(path);
// Check if we need to compress the file
compress = strcmp(ext, ext_gz) == 0;
// Open the file
if (compress) {
if ((gz = gzopen(path, mode)) == Z_NULL)
return SIFT3D_FAILURE;
} else {
if ((file = fopen(path, mode)) == NULL)
return SIFT3D_FAILURE;
}
#define WRITE_MAT(mat, format, type) \
SIFT3D_MAT_RM_LOOP_START(mat, i, j) \
const char delim = j < mat->num_cols - 1 ? ',' : '\n'; \
if (compress) { \
gzprintf(gz, format, SIFT3D_MAT_RM_GET(mat, i, j, \
type)); \
gzputc(gz, delim); \
} else { \
fprintf(file, format, SIFT3D_MAT_RM_GET(mat, i, j, \
type)); \
fputc(delim, file); \
} \
SIFT3D_MAT_RM_LOOP_END
// Write the matrix
switch (mat->type) {
case SIFT3D_DOUBLE:
WRITE_MAT(mat, "%f", double);
break;
case SIFT3D_FLOAT:
WRITE_MAT(mat, "%f", float);
break;
case SIFT3D_INT:
WRITE_MAT(mat, "%d", int);
break;
default:
goto write_mat_quit;
}
#undef WRITE_MAT
// Check for errors and finish writing the matrix
if (compress) {
if (gzclose(gz) != Z_OK)
goto write_mat_quit;
} else {
if (ferror(file))
goto write_mat_quit;
fclose(file);
}
return SIFT3D_SUCCESS;
write_mat_quit:
if (compress) {
gzclose(gz);
} else {
fclose(file);
}
return SIFT3D_FAILURE;
}
/* Shortcut to initialize an image for first-time use.
* Allocates memory, and assumes the default stride. This
* function calls init_im and initializes all values to 0. */
int init_im_with_dims(Image *const im, const int nx, const int ny, const int nz,
const int nc)
{
init_im(im);
im->nx = nx;
im->ny = ny;
im->nz = nz;
im->nc = nc;
im_default_stride(im);
if (im_resize(im))
return SIFT3D_FAILURE;
im_zero(im);
return SIFT3D_SUCCESS;
}
/* Calculate the strides of an image object in the default
* manner. The following parameters must be initialized:
* -nx
* -ny
* -nz
* -nc
* If a dimension is not used, its size should be set
* to 1. */
void im_default_stride(Image *const im)
{
size_t prod;
int i;
prod = (size_t) im->nc;
SIFT3D_IM_GET_STRIDES(im)[0] = prod;
for (i = 1; i < IM_NDIMS; i++) {
prod *= SIFT3D_IM_GET_DIMS(im)[i - 1];
SIFT3D_IM_GET_STRIDES(im)[i] = prod;
}
}
/* Pads an image to a new size. Prior to calling this function, initialize
* pad with all dimensions and strides, as in im_resize. Other metadata,
* such as units, will be copied from im to pad. */
int im_pad(const Image * const im, Image * const pad)
{
int x, y, z, c;
const int pad_x_end = pad->nx - 1;
const int pad_y_end = pad->ny - 1;
const int pad_z_end = pad->nz - 1;
const int data_x_end = SIFT3D_MIN(im->nx - 1, pad_x_end);
const int data_y_end = SIFT3D_MIN(im->ny - 1, pad_y_end);
const int data_z_end = SIFT3D_MIN(im->nz - 1, pad_z_end);
// Copy relevant metadata, omitting dimensions and strides
memcpy(SIFT3D_IM_GET_UNITS(pad), SIFT3D_IM_GET_UNITS(im),
IM_NDIMS * sizeof(double));
// Resize the output
if (im_resize(pad))
return SIFT3D_FAILURE;
// Copy the image
SIFT3D_IM_LOOP_LIMITED_START_C(im, x, y, z, c, 0, data_x_end, 0,
data_y_end, 0, data_z_end)
SIFT3D_IM_GET_VOX(pad, x, y, z, c) =
SIFT3D_IM_GET_VOX(im, x, y, z, c);
SIFT3D_IM_LOOP_END_C
// Pad the remaining data with zeros
SIFT3D_IM_LOOP_LIMITED_START_C(im, x, y, z, c, data_x_end,
pad_x_end, data_y_end, pad_y_end,
data_z_end, pad_z_end)
SIFT3D_IM_GET_VOX(pad, x, y, z, c) = 0.0f;
SIFT3D_IM_LOOP_END_C return SIFT3D_SUCCESS;
}
/* Resize an image according to the current nx, ny,
* and nz. Does not modify scale space information or
* strides. Prior to calling this function, use init_im(im)
* and initialize the following fields:
* -nx
* -ny
* -nz
* -nc
* -xs (can be set by im_default_stride(im))
* -ys (can be set by im_default_stride(im))
* -zs (can be set by im_default_stride(im))
*
* All of this initialization can also be done with
* init_im_with_dims(), which calls this function.
*
* Returns SIFT3D_SUCCESS on success, SIFT3D_FAILURE otherwise.
*/
int im_resize(Image *const im)
{
int i;
//FIXME: This will not work for strange strides
const size_t size = im->nx * im->ny * im->nz * im->nc;
// Verify inputs
for (i = 0; i < IM_NDIMS; i++) {
const int dim = SIFT3D_IM_GET_DIMS(im)[i];
if (dim > 0)
continue;
SIFT3D_ERR("im_resize: invalid dimension %d: %d \n", i,
dim);
return SIFT3D_FAILURE;
}
if (im->nc < 1) {
SIFT3D_ERR("im_resize: invalid number of channels: %d \n",
im->nc);
return SIFT3D_FAILURE;
}
// Do nothing if the size has not changed
if (im->size == size)
return SIFT3D_SUCCESS;
im->size = size;
// Allocate new memory
im->data = SIFT3D_safe_realloc(im->data, size * sizeof(float));
#ifdef SIFT3D_USE_OPENCL
{
cl_int err;
int initialized;
if (cl_data.valid) {
initialized = (im->data != NULL);
// Destroy the old image
if (initialized && im->cl_valid)
clReleaseMemObject(im->cl_image);
// Init an OpenCL image
if (im->nz > 0) {
im->cl_image = clCreateImage2D(cl_data.context,
cl_data.
mem_flags,
&cl_data.
image_format,
im->nx, im->ny,
im->ys,
im->data, &err);
} else {
im->cl_image = clCreateImage3D(cl_data.context,
cl_data.
mem_flags,
&cl_data.
image_format,
im->nx, im->ny,
im->nz,
im->ys,
im->zs,
im->data, &err);
}
if (err != CL_SUCCESS) {
im->cl_valid = SIFT3D_FALSE;
return SIFT3D_FAILURE;
}
im->cl_valid = SIFT3D_TRUE;
}
}
#endif
return size != 0 && im->data == NULL ? SIFT3D_FAILURE : SIFT3D_SUCCESS;
}
/* Concatenate two images in dimension dim, so that src1 comes before src2.
* For example, if dim == 0, the images are horizontally concatenated in x,
* so that src1 is on the left and src2 is on the right. Resizes dst.
*
* Returns SIFT3D_SUCCESS on success, SIFT3D_FAILURE otherwise. */
int im_concat(const Image * const src1, const Image * const src2, const int dim,
Image * const dst)
{
int off[IM_NDIMS], dims_out[IM_NDIMS];
int i, x, y, z, c;
const int nc = src1->nc;
// Verify inputs
for (i = 0; i < IM_NDIMS; i++) {
const int src1d = SIFT3D_IM_GET_DIMS(src1)[i];
const int src2d = SIFT3D_IM_GET_DIMS(src2)[i];
if (i == dim)
continue;
if (src1d != src2d) {
SIFT3D_ERR("im_concat: dimension %d must be "
"equal in input images. src1: %d src2: %d \n",
i, src1d, src2d);
return SIFT3D_FAILURE;
}
}
if (src1->nc != src2->nc) {
SIFT3D_ERR("im_concat: images must have an equal number "
"of channels. src1: %d src2: %d \n", src1->nc,
src2->nc);
return SIFT3D_FAILURE;
}
// Get the output dimensions and offsets
for (i = 0; i < IM_NDIMS; i++) {
const int src1d = SIFT3D_IM_GET_DIMS(src1)[i];
const int src2d = SIFT3D_IM_GET_DIMS(src2)[i];
dims_out[i] = dim == i ? src1d + src2d : src1d;
off[i] = dim == i ? src1d : 0;
}
// Resize dst
memcpy(SIFT3D_IM_GET_DIMS(dst), dims_out, IM_NDIMS * sizeof(int));
dst->nc = nc;
im_default_stride(dst);
if (im_resize(dst))
return SIFT3D_FAILURE;
// Copy the data from src1
SIFT3D_IM_LOOP_START_C(src1, x, y, z, c)
SIFT3D_IM_GET_VOX(dst, x, y, z, c) =
SIFT3D_IM_GET_VOX(src1, x, y, z, c);
SIFT3D_IM_LOOP_END_C
// Copy the data from src2
SIFT3D_IM_LOOP_START_C(src2, x, y, z, c)
// Get the destination coordinates with offsets
const int x_dst = x + off[0];
const int y_dst = y + off[1];
const int z_dst = z + off[2];
// Copy the data from src2
SIFT3D_IM_GET_VOX(dst, x_dst, y_dst, z_dst, c) =
SIFT3D_IM_GET_VOX(src2, x, y, z, c);
SIFT3D_IM_LOOP_END_C
return SIFT3D_SUCCESS;
}
/* Upsample an image by a factor of 2 in each dimension.
* This function resizes dst and modifies its units. */
int im_upsample_2x(const Image *const src, Image *const dst)
{
double units[IM_NDIMS];
int dims[IM_NDIMS];
int i, x, y, z, c, sx, sy, sz;
const int nc = src->nc;
const int w = 2;
const float weight = (float) (1.0 / pow((double) w, IM_NDIMS));
// Resize the output
for (i = 0; i < IM_NDIMS; i++) {
dims[i] = SIFT3D_IM_GET_DIMS(src)[i] * 2;
units[i] = SIFT3D_IM_GET_UNITS(src)[i] / 2.0;
}
memcpy(SIFT3D_IM_GET_DIMS(dst), dims, IM_NDIMS * sizeof(int));
memcpy(SIFT3D_IM_GET_UNITS(dst), units, IM_NDIMS * sizeof(float));
dst->nc = nc;
im_default_stride(dst);
if (im_resize(dst))
return SIFT3D_FAILURE;
// TODO: 3-pass (separable) upsample might be faster
// Upsample
SIFT3D_IM_LOOP_START_C(dst, x, y, z, c)
const int sx_start = x >> 1;
const int sy_start = y >> 1;
const int sz_start = z >> 1;
const int sx_end = sx_start + w - 1;
const int sy_end = sy_start + w - 1;
const int sz_end = sz_start + w - 1;
SIFT3D_IM_GET_VOX(dst, x, y, z, c) = 0;
SIFT3D_IM_LOOP_LIMITED_START(dst, sx, sy, sz, sx_start,
sx_end, sy_start, sy_end,
sz_start, sz_end)
SIFT3D_IM_GET_VOX(dst, x, y, z, c) +=
SIFT3D_IM_GET_VOX(src, sx, sy, sz, c);
SIFT3D_IM_LOOP_END
SIFT3D_IM_GET_VOX(dst, x, y, z, c) *= weight;
SIFT3D_IM_LOOP_END_C
return SIFT3D_SUCCESS;
}
/* Downsample an image by a factor of 2 in each dimension.
* This function initializes dst with the proper
* dimensions, and allocates memory. */
int im_downsample_2x(const Image *const src, Image *const dst)
{
int x, y, z, c;
// Initialize dst
dst->nx = (int)floor((double)src->nx / 2.0);
dst->ny = (int)floor((double)src->ny / 2.0);
dst->nz = (int)floor((double)src->nz / 2.0);
dst->nc = src->nc;
im_default_stride(dst);
if (im_resize(dst))
return SIFT3D_FAILURE;
// Downsample
SIFT3D_IM_LOOP_START_C(dst, x, y, z, c)
const int src_x = x << 1;
const int src_y = y << 1;
const int src_z = z << 1;
SIFT3D_IM_GET_VOX(dst, x, y, z, c) =
SIFT3D_IM_GET_VOX(src, src_x, src_y, src_z, c);
SIFT3D_IM_LOOP_END_C
return SIFT3D_SUCCESS;
}
/* Same as im_downsample_2x, but with OpenCL acceleration. This function DOES NOT
* read the results back dst->data. Use im_read_back for that. */
int im_downsample_2x_cl(Image * src, Image * dst)
{
#ifdef SIFT3D_USE_OPENCL
size_t global_work_size[3];
cl_int err, dim;
cl_kernel kernel;
// Verify image dimensions
if (src->nx % 2 != 0 || src->ny % 2 != 0 || src->nz % 2 != 0)
return SIFT3D_FAILURE;
// Initialize dst dimensions, resized in im_set_kernel_arg
dst->nx = src->nx / 2;
dst->ny = src->ny / 2;
dst->nz = src->nz / 2;
dst->nc = src->nc;
im_default_stride(dst);
// Do not have a 2D kernel right now
assert(src->nz > 0);
dim = 3;
global_work_size[0] = dst->nx;
global_work_size[1] = dst->ny;
global_work_size[2] = dst->nz;
kernel = cl_data.kernels.downsample_2x_3d;
im_set_kernel_arg(kernel, 0, src);
im_set_kernel_arg(kernel, 1, dst);
err =
clEnqueueNDRangeKernel(cl_data.queues[0], kernel, dim, NULL,
global_work_size, NULL, 0, NULL, NULL);
return (int)err;
#else
printf
("im_downsample_2x_cl: This version was not compiled with OpenCL!\n");
return SIFT3D_FAILURE;
#endif
}
/* Loads the C-accessible data of an image into its OpenCL data. If blocking is set,
* block the function until the load is complete. */
int im_load_cl(Image * im, int blocking)
{
#ifdef SIFT3D_USE_OPENCL
const size_t origin[] = { 0, 0, 0 };
const size_t region[] = { im->nx, im->ny, im->nz };
const cl_bool cl_blocking = (blocking) ? CL_TRUE : CL_FALSE;
return clEnqueueWriteImage(cl_data.queues[0], im->cl_image, cl_blocking,
origin, region, im->ys, im->zs,
im->data, 0, NULL, NULL);
#else
printf("im_load_cl: This version was not compiled with OpenCL!\n");
return SIFT3D_FAILURE;
#endif
}
/* Reads the OpenCL data of an image back to its C-accessible data. If blocking is set,
* block the function until the read is complete. */
int im_read_back(Image * im, int blocking)
{
#ifdef SIFT3D_USE_OPENCL
const size_t origin[] = { 0, 0, 0 };
const size_t region[] = { im->nx, im->ny, im->nz };
const cl_bool cl_blocking = (blocking) ? CL_TRUE : CL_FALSE;
return clEnqueueReadImage(cl_data.queues[0], im->cl_image, cl_blocking,
origin, region, im->ys, im->zs,
im->data, 0, NULL, NULL);
#else
printf("im_read_back: This version was not compiled with OpenCL!\n");
return SIFT3D_FAILURE;
#endif
}
/* Updates an Image struct's OpenCL data, if neccessary, and sets it as argument n
* in the provided kernel. */
int im_set_kernel_arg(cl_kernel kernel, int n, Image * im)
{
#ifdef SIFT3D_USE_OPENCL
cl_int err;
if (!im->cl_valid && im_resize(im))
return SIFT3D_FAILURE;
err = clSetKernelArg(kernel, n, sizeof(cl_mem), &im->cl_image);
check_cl_error(err, "im_set_kernel_arg");
return SIFT3D_SUCCESS;
#else
printf
("im_set_kernel_arg: This version was not compiled with OpenCL!\n");
return SIFT3D_FAILURE;
#endif
}
/* Copy an image's dimensions and stride into another.
* This function resizes dst.
*
* @param src The source image.
* @param dst The destination image.
* @return Returns SIFT3D_SUCCESS or SIFT3D_FAILURE.
*/
int im_copy_dims(const Image * const src, Image * dst)
{
if (src->data == NULL)
return SIFT3D_FAILURE;
dst->nx = src->nx;
dst->ny = src->ny;
dst->nz = src->nz;
dst->xs = src->xs;
dst->ys = src->ys;
dst->zs = src->zs;
dst->nc = src->nc;
dst->ux = src->ux;
dst->uy = src->uy;
dst->uz = src->uz;
return im_resize(dst);
}
/* Copy an image's data into another. This function
* changes the dimensions and stride of dst,
* and allocates memory. */
int im_copy_data(const Image * const src, Image * const dst)
{
int x, y, z, c;
// Return if src has no data
if (src->data == NULL)
return SIFT3D_FAILURE;
// Return if src and dst are the same
if (dst->data == src->data)
return SIFT3D_SUCCESS;
// Resize dst
if (im_copy_dims(src, dst))
return SIFT3D_FAILURE;
// Copy data
SIFT3D_IM_LOOP_START_C(dst, x, y, z, c)
SIFT3D_IM_GET_VOX(dst, x, y, z, c) =
SIFT3D_IM_GET_VOX(src, x, y, z, c);
SIFT3D_IM_LOOP_END_C
return SIFT3D_SUCCESS;
}
/* Clean up memory for an Image */
void im_free(Image * im)
{
if (im->data != NULL)
free(im->data);
}
/* Make a deep copy of a single channel of an image. */
int im_channel(const Image * const src, Image * const dst,
const unsigned int chan)
{
int x, y, z;
const int c = chan;
// Verify inputs
if (c >= src->nc) {
SIFT3D_ERR("im_channel: invalid channel: %d, image has "
"%d channels", c, src->nc);
return SIFT3D_FAILURE;
}
// Resize the output
memcpy(SIFT3D_IM_GET_DIMS(dst), SIFT3D_IM_GET_DIMS(src),
IM_NDIMS * sizeof(int));
dst->nc = 1;
im_default_stride(dst);
if (im_resize(dst))
return SIFT3D_FAILURE;
// Copy the channel
SIFT3D_IM_LOOP_START(dst, x, y, z)
SIFT3D_IM_GET_VOX(dst, x, y, z, 0) =
SIFT3D_IM_GET_VOX(src, x, y, z, c);
SIFT3D_IM_LOOP_END return SIFT3D_SUCCESS;
}
/* Find the maximum absolute value of an image */
float im_max_abs(const Image *const im) {
float max;
int x, y, z, c;
max = 0.0f;
SIFT3D_IM_LOOP_START_C(im, x, y, z, c)
const float samp = fabsf(SIFT3D_IM_GET_VOX(im, x, y, z, c));
max = SIFT3D_MAX(max, samp);
SIFT3D_IM_LOOP_END_C
return max;
}
/* Scale an image to the [-1, 1] range, where
* the largest absolute value is 1. */
void im_scale(const Image *const im)
{
int x, y, z, c;
// Find the maximum absolute value
const float max = im_max_abs(im);
if (max == 0.0f)
return;
// Divide by the max
SIFT3D_IM_LOOP_START_C(im, x, y, z, c)
SIFT3D_IM_GET_VOX(im, x, y, z, c) /= max;
SIFT3D_IM_LOOP_END_C
}
/* Subtract src2 from src1, saving the result in
* dst.
* Resizes dst.
*/
int im_subtract(Image * src1, Image * src2, Image * dst)
{
int x, y, z, c;
// Verify inputs
if (src1->nx != src2->nx ||
src1->ny != src2->ny ||
src1->nz != src2->nz || src1->nc != src2->nc)
return SIFT3D_FAILURE;
// Resize the output image
if (im_copy_dims(src1, dst))
return SIFT3D_FAILURE;
SIFT3D_IM_LOOP_START_C(dst, x, y, z, c)
SIFT3D_IM_GET_VOX(dst, x, y, z, c) =
SIFT3D_IM_GET_VOX(src1, x, y, z, c) -
SIFT3D_IM_GET_VOX(src2, x, y, z, c);
SIFT3D_IM_LOOP_END_C return SIFT3D_SUCCESS;
}
/* Zero an image. */
void im_zero(Image * im)
{
int x, y, z, c;
SIFT3D_IM_LOOP_START_C(im, x, y, z, c)
SIFT3D_IM_GET_VOX(im, x, y, z, c) = 0.0f;
SIFT3D_IM_LOOP_END_C}
/* Transform an image according to the inverse of the provided tform.
*
* Paramters:
* tform: The transformation.
* src: The input image.
* interp: The type of interpolation.
* resize: If true, resizes the dst to be the same size as src. Otherwise,
* uses the dimensions of dst.
* dst: The output image.
*
* Returns SIFT3D_SUCCESS on success, SIFT3D_FAILURE otherwise. */
int im_inv_transform(const void *const tform, const Image * const src,
const interp_type interp, const int resize,
Image *const dst)
{
int x, y, z, c;
// Optionally resize the output image
if (resize && im_copy_dims(src, dst))
return SIFT3D_FAILURE;
#define IMUTIL_RESAMPLE(arg) \
SIFT3D_IM_LOOP_START(dst, x, y, z) \
\
double transx, transy, transz; \
\
apply_tform_xyz(tform, (double)x, (double)y, (double)z, \
&transx, &transy, &transz); \
\
for (c = 0; c < dst->nc; c++) { \
SIFT3D_IM_GET_VOX(dst, x, y, z, c) = resample_ ## arg(src, \
transx, transy, transz, c); \
} \
SIFT3D_IM_LOOP_END
// Transform
switch (interp) {
case LINEAR:
IMUTIL_RESAMPLE(linear)
break;
case LANCZOS2:
IMUTIL_RESAMPLE(lanczos2)
break;
default:
SIFT3D_ERR("im_inv_transform: unrecognized "
"interpolation type");
return SIFT3D_FAILURE;
}
#undef RESAMPLE
return SIFT3D_SUCCESS;
}
/* Helper routine for image transformation. Performs trilinear
* interpolation, setting out-of-bounds voxels to zero. */
static double resample_linear(const Image * const in, const double x,
const double y, const double z, const int c)
{
// Detect out-of-bounds
if (x < 0 || x > in->nx - 1 ||
y < 0 || y > in->ny - 1 || z < 0 || z > in->nz - 1)
return 0.0;
int fx = (int)floor(x);
int fy = (int)floor(y);
int fz = (int)floor(z);
int cx = (int)ceil(x);
int cy = (int)ceil(y);
int cz = (int)ceil(z);
double dist_x = x - fx;
double dist_y = y - fy;
double dist_z = z - fz;
double c0 = SIFT3D_IM_GET_VOX(in, fx, fy, fz, c);
double c1 = SIFT3D_IM_GET_VOX(in, fx, cy, fz, c);
double c2 = SIFT3D_IM_GET_VOX(in, cx, fy, fz, c);
double c3 = SIFT3D_IM_GET_VOX(in, cx, cy, fz, c);
double c4 = SIFT3D_IM_GET_VOX(in, fx, fy, cz, c);
double c5 = SIFT3D_IM_GET_VOX(in, fx, cy, cz, c);
double c6 = SIFT3D_IM_GET_VOX(in, cx, fy, cz, c);
double c7 = SIFT3D_IM_GET_VOX(in, cx, cy, cz, c);
double out = c0 * (1.0 - dist_x) * (1.0 - dist_y) * (1.0 - dist_z)
+ c1 * (1.0 - dist_x) * dist_y * (1.0 - dist_z)
+ c2 * dist_x * (1.0 - dist_y) * (1.0 - dist_z)
+ c3 * dist_x * dist_y * (1.0 - dist_z)
+ c4 * (1.0 - dist_x) * (1.0 - dist_y) * dist_z
+ c5 * (1.0 - dist_x) * dist_y * dist_z
+ c6 * dist_x * (1.0 - dist_y) * dist_z
+ c7 * dist_x * dist_y * dist_z;
return out;
}
/* Helper routine to resample an image at a point, using the Lanczos kernel */
static double resample_lanczos2(const Image * const im, const double x,
const double y, const double z, const int c)
{
double val;
int xs, ys, zs;
//TODO: faster separable implementation
// Kernel parameter
const double a = 2;
// Check bounds
const double xMin = 0;
const double yMin = 0;
const double zMin = 0;
const double xMax = im->nx - 1;
const double yMax = im->ny - 1;
const double zMax = im->nz - 1;
if (x < xMin || y < yMin || z < zMin ||
x > xMax || y > yMax || z > zMax)
return 0.0;
// Window
const int x_start = SIFT3D_MAX(floor(x) - a, xMin);
const int x_end = SIFT3D_MIN(floor(x) + a, xMax);
const int y_start = SIFT3D_MAX(floor(y) - a, yMin);
const int y_end = SIFT3D_MIN(floor(y) + a, yMax);
const int z_start = SIFT3D_MAX(floor(z) - a, zMin);
const int z_end = SIFT3D_MIN(floor(z) + a, zMax);
// Iterate through the window
val = 0.0;
SIFT3D_IM_LOOP_LIMITED_START(in, xs, ys, zs, x_start, x_end, y_start,
y_end, z_start, z_end)
// Evalutate the kernel
const double xw = fabs((double)xs - x) + DBL_EPSILON;
const double yw = fabs((double)ys - y) + DBL_EPSILON;
const double zw = fabs((double)zs - z) + DBL_EPSILON;
const double kernel = lanczos(xw, a) * lanczos(yw, a) * lanczos(zw, a);
// Accumulate
val += kernel * SIFT3D_IM_GET_VOX(im, xs, ys, zs, c);
SIFT3D_IM_LOOP_END return val;
}
/* Lanczos kernel function */
static double lanczos(double x, double a)
{
const double pi_x = M_PI * x;
return a * sin(pi_x) * sin(pi_x / a) / (pi_x * pi_x);
}
/* Resample an image to different units.
*
* Parameters:
* src: The input image.
* units: The new units.
* interp: The type of interpolation to use.
* dst: The output image.
*
* Returns SIFT3D_SUCCESS on success, SIFT3D_FAILURE otherwise. */
int im_resample(const Image *const src, const double *const units,
const interp_type interp, Image *const dst) {
Affine aff;
Mat_rm A;
double factors[IM_NDIMS];
int i;
// Initialize intermediates
if (init_Mat_rm(&A, IM_NDIMS, IM_NDIMS + 1, SIFT3D_DOUBLE,
SIFT3D_TRUE) ||
init_Affine(&aff, IM_NDIMS))
return SIFT3D_FAILURE;
// Compute the scaling factors
for (i = 0; i < IM_NDIMS; i++) {
factors[i] = SIFT3D_IM_GET_UNITS(src)[i] / units[i];
}
// Set the transformation matrix
for (i = 0; i < IM_NDIMS; i++) {
SIFT3D_MAT_RM_GET(&A, i, i, double) = 1.0 / factors[i];
}
if (Affine_set_mat(&A, &aff))
goto im_resample_quit;
// Set the output dimensions
dst->nc = src->nc;
for (i = 0; i < IM_NDIMS; i++) {
SIFT3D_IM_GET_DIMS(dst)[i] = (int) ceil((double)
SIFT3D_IM_GET_DIMS(src)[i] * factors[i]);
}
im_default_stride(dst);
if (im_resize(dst))
goto im_resample_quit;
// Apply the transformation
if (im_inv_transform(&aff, src, interp, SIFT3D_FALSE, dst))
goto im_resample_quit;
// Set the new output units
memcpy(SIFT3D_IM_GET_UNITS(dst), units, IM_NDIMS * sizeof(double));
// Clean up
cleanup_tform(&aff);
cleanup_Mat_rm(&A);
return SIFT3D_SUCCESS;
im_resample_quit:
cleanup_tform(&aff);
cleanup_Mat_rm(&A);
return SIFT3D_FAILURE;
}
/* Horizontally convolves a separable filter with an image,
* on CPU. Currently only works in 3D.
*
* This function chooses among the best variant of convolve_sep* based on
* compilation options and filter parameters.
*
* Parameters:
* src - input image (initialized)
* dst - output image (initialized)
int x, y, z;
* f - filter to be applied
* dim - dimension in which to convolve
* unit - the spacing of the filter coefficients
*/
static int convolve_sep(const Image * const src,
Image * const dst, const Sep_FIR_filter * const f,
const int dim, const double unit) {
#ifdef SIFT3D_USE_OPENCL
return convolve_sep_cl(src, dst, f, dim, unit);
#else
return f->symmetric ?
convolve_sep_sym(src, dst, f, dim, unit) :
convolve_sep_gen(src, dst, f, dim, unit);
#endif
}
/* Convolve_sep for general filters */
static int convolve_sep_gen(const Image * const src,
Image * const dst, const Sep_FIR_filter * const f,
const int dim, const double unit)
{
register int x, y, z, c, d;
register const int half_width = f->width / 2;
register const int nx = src->nx;
register const int ny = src->ny;
register const int nz = src->nz;
register const float conv_eps = 0.1f;
register const int dim_end = SIFT3D_IM_GET_DIMS(src)[dim] - 1;
register const float unit_factor = unit /
SIFT3D_IM_GET_UNITS(src)[dim];
register const int unit_half_width =
(int) ceilf(half_width * unit_factor);
int start[] = {0, 0, 0};
int end[] = {nx - 1, ny - 1, nz - 1};
// Compute starting and ending points for the convolution dimension
start[dim] += unit_half_width;
end[dim] -= unit_half_width + 1;
//TODO: Convert this to convolve_x, which only convolves in x,
// then make a wrapper to restride, transpose, convolve x, and transpose
// back
// Resize the output, with the default stride
if (im_copy_dims(src, dst))
return SIFT3D_FAILURE;
im_default_stride(dst);
if (im_resize(dst))
return SIFT3D_FAILURE;
// Initialize the output to zeros
im_zero(dst);
#define SAMP_AND_ACC(src, dst, tap, coords, c) \
{ \
float frac; \
\
const int idx_lo[] = {(coords)[0], (coords)[1], (coords)[2]}; \
int idx_hi[] = {idx_lo[0], idx_lo[1], idx_lo[2]}; \
\
/* Convert the physical coordinates to integer indices*/ \
idx_hi[dim] += 1; \
frac = (coords)[dim] - (float) idx_lo[dim]; \
\
/* Sample with linear interpolation */ \
SIFT3D_IM_GET_VOX(dst, x, y, z, c) += (tap) * \
((1.0f - frac) * \
SIFT3D_IM_GET_VOX(src, idx_lo[0], idx_lo[1], idx_lo[2], c) + \
frac * \
SIFT3D_IM_GET_VOX(src, idx_hi[0], idx_hi[1], idx_hi[2], c)); \
}
// First pass: process the interior
#pragma omp parallel for private(x) private(y) private(c)
SIFT3D_IM_LOOP_LIMITED_START_C(dst, x, y, z, c, start[0], end[0],
start[1], end[1], start[2], end[2])
float coords[] = { x, y, z };
for (d = -half_width; d <= half_width; d++) {
const float tap = f->kernel[d + half_width];
const float step = d * unit_factor;
// Adjust the sampling coordinates
coords[dim] -= step;
// Sample
SAMP_AND_ACC(src, dst, tap, coords, c);
// Reset the sampling coordinates
coords[dim] += step;
}
SIFT3D_IM_LOOP_END_C
// Second pass: process the boundaries
#pragma omp parallel for private(x) private(y) private(c)
SIFT3D_IM_LOOP_START_C(dst, x, y, z, c)
const int i_coords[] = { x, y, z };
// Skip pixels we have already processed
if (i_coords[dim] >= start[dim] && i_coords[dim] <= end[dim])
continue;
// Process the boundary pixel
for (d = -half_width; d <= half_width; d++) {
float coords[] = { x, y, z };
const float tap = f->kernel[d + half_width];
const float step = d * unit_factor;
// Adjust the sampling coordinates
coords[dim] -= step;
// Mirror coordinates
if ((int) coords[dim] < 0) {
coords[dim] = -coords[dim];
assert((int) coords[dim] >= 0);
} else if ((int) coords[dim] >= dim_end) {
coords[dim] = 2.0f * dim_end - coords[dim] -
conv_eps;
assert((int) coords[dim] < dim_end);
}
// Sample
SAMP_AND_ACC(src, dst, tap, coords, c);
}
SIFT3D_IM_LOOP_END_C
#undef SAMP_AND_ACC
return SIFT3D_SUCCESS;
}
/* Same as convolve_sep, but with OpenCL acceleration. This does NOT
* read back the results to C-accessible data. Use im_read_back for that. */
SIFT3D_IGNORE_UNUSED
static int convolve_sep_cl(const Image * const src, Image * const dst,
const Sep_FIR_filter * const f, const int dim,
const double unit)
{
#ifdef SIFT3D_USE_OPENCL
cl_kernel kernel;
cl_int dx, dy, dz, err;
const size_t global_work_size[] = { src->nx, src->ny, src->nz };
// Resize the output, with the default stride
if (im_copy_dims(src, dst))
return SIFT3D_FAILURE;
im_default_stride(dst);
if (im_resize(dst))
return SIFT3D_FAILURE;
// Do not have a 2D kernel right now
if (dim != 3) {
printf("convolve_sep_cl: unsupported dimension: %d \n", dim);
return SIFT3D_FAILURE}
// Form the dimension offsets
dx = dy = dz = 0;
switch (dim) {
case 0:
dx = 1;
break;
case 1:
dy = 1;
break;
case 2:
dz = 1;
break;
default:
return SIFT3D_FAILURE;
}
kernel = f->cl_apply_unrolled;
im_set_kernel_arg(kernel, 0, src);
im_set_kernel_arg(kernel, 1, dst);
err = clSetKernelArg(kernel, 2, sizeof(cl_int), &dx);
err |= clSetKernelArg(kernel, 3, sizeof(cl_int), &dy);
err |= clSetKernelArg(kernel, 4, sizeof(cl_int), &dz);
check_cl_error(err, "convolve_sep_cl: set kernel arg");
err =
clEnqueueNDRangeKernel(cl_data.queues[0], kernel, dim, NULL,
global_work_size, NULL, 0, NULL, NULL);
return (int)err;
#else
printf("colvolve_sep_cl: This version was not compiled with OpenCL!\n");
return SIFT3D_FAILURE;
#endif
}
/* Convolve_sep for symmetric filters. */
static int convolve_sep_sym(const Image * const src, Image * const dst,
const Sep_FIR_filter * const f, const int dim,
const double unit)
{
// TODO: Symmetry-specific function
return convolve_sep_gen(src, dst, f, dim, unit);
}
/* Permute the dimensions of an image.
*
* Arguments:
* src - input image (initialized)
* dim1 - input permutation dimension (x = 0, y = 1, z = 2)
* dim2 - output permutation dimension (x = 0, y = 1, z = 2)
* dst - output image (initialized)
*
* example:
* im_permute(src, dst, 0, 1) -- permute x with y in src
* and save to dst
*/
int im_permute(const Image * const src, const int dim1, const int dim2,
Image * const dst)
{
register int x, y, z, c;
// Verify inputs
if (dim1 < 0 || dim2 < 0 || dim1 > 3 || dim2 > 3) {
printf("im_permute: invalid dimensions: dim1 %d dim2 %d \n",
dim1, dim2);
return SIFT3D_FAILURE;
}
// Check for the trivial case
if (dim1 == dim2) {
return im_copy_data(src, dst);
}
// Permute the units
memcpy(SIFT3D_IM_GET_UNITS(dst), SIFT3D_IM_GET_UNITS(src),
IM_NDIMS * sizeof(double));
SIFT3D_IM_GET_UNITS(dst)[dim1] = SIFT3D_IM_GET_UNITS(src)[dim2];
SIFT3D_IM_GET_UNITS(dst)[dim2] = SIFT3D_IM_GET_UNITS(src)[dim1];
// Resize the output
memcpy(SIFT3D_IM_GET_DIMS(dst), SIFT3D_IM_GET_DIMS(src),
IM_NDIMS * sizeof(int));
SIFT3D_IM_GET_DIMS(dst)[dim1] = SIFT3D_IM_GET_DIMS(src)[dim2];
SIFT3D_IM_GET_DIMS(dst)[dim2] = SIFT3D_IM_GET_DIMS(src)[dim1];
dst->nc = src->nc;
im_default_stride(dst);
if (im_resize(dst))
return SIFT3D_FAILURE;
// Transpose the data
SIFT3D_IM_LOOP_START_C(dst, x, y, z, c)
int src_coords[] = {x, y, z};
int temp;
// Permute the coordinates
temp = src_coords[dim1];
src_coords[dim1] = src_coords[dim2];
src_coords[dim2] = temp;
// Copy the datum
SIFT3D_IM_GET_VOX(dst, x, y, z, c) = SIFT3D_IM_GET_VOX(src,
src_coords[0], src_coords[1], src_coords[2], c);
SIFT3D_IM_LOOP_END_C
return SIFT3D_SUCCESS;
}
/* Change an image's stride, preserving its data.
*
* Parameters:
* -src: The source image. Must be initialized
* -strides: an array of length IM_NDIMS specifying the new strides
* -dst: The destination image. Must be initialized
*
* Return: SIFT3D_SUCCESS (0) on success, nonzero otherwise. */
int im_restride(const Image * const src, const size_t *const strides,
Image * const dst)
{
int x, y, z, c;
// Resize the output
memcpy(SIFT3D_IM_GET_DIMS(dst), SIFT3D_IM_GET_DIMS(src),
IM_NDIMS * sizeof(int));
memcpy(SIFT3D_IM_GET_STRIDES(dst), strides, IM_NDIMS * sizeof(size_t));
dst->nc = src->nc;
if (im_resize(dst))
return SIFT3D_FAILURE;
// Copy the data
SIFT3D_IM_LOOP_START_C(dst, x, y, z, c)
SIFT3D_IM_GET_VOX(dst, x, y, z, c) =
SIFT3D_IM_GET_VOX(src, x, y, z, c);
SIFT3D_IM_LOOP_END_C
return SIFT3D_SUCCESS;
}
/* Initializes a tform to ensure memory safety.
* Either this or the type-specific version must be called prior to using
* a tform. */
int init_tform(void *const tform, const tform_type type)
{
switch (type) {
case TPS:
puts("init_tform: TPS not yet implemented \n");
return SIFT3D_FAILURE;
case AFFINE:
if (init_Affine((Affine *) tform, IM_NDIMS))
return SIFT3D_FAILURE;
break;
default:
puts("init_tform: unrecognized type \n");
return SIFT3D_FAILURE;
}
return SIFT3D_SUCCESS;
}
/* Initialize an Affine struct. This initializes
* all fields, and allocates memory for the inner
* matrix, initializing it to zero. */
int init_Affine(Affine * const affine, const int dim)
{
// Verify inputs
if (dim < 2)
return SIFT3D_FAILURE;
// Initialize the type
affine->tform.type = AFFINE;
// Initialize the vtable
affine->tform.vtable = &Affine_vtable;
// Initialize the matrix
if (init_Mat_rm(&affine->A, dim, dim + 1, SIFT3D_DOUBLE, SIFT3D_TRUE))
return SIFT3D_FAILURE;
return SIFT3D_SUCCESS;
}
/* Deep copy of a tform. Both src and dst must be initialized. */
int copy_tform(const void *const src, void *const dst)
{
return TFORM_GET_VTABLE(src)->copy(src, dst);
}
/* Deep copy of one Affine to another. Both must be initialized. */
static int copy_Affine(const void *const src, void *const dst)
{
const Affine *const srcAff = src;
Affine *const dstAff = dst;
return Affine_set_mat(&srcAff->A, dstAff);
}
/* Deep copy of one TPS to another. Both must be initialized. */
static int copy_Tps(const void *const src, void *const dst)
{
SIFT3D_ERR("copy_Tps has not yet been implemented!");
return SIFT3D_FAILURE;
}
/* Set an Affine transform to the given matrix.
* mat is copied. mat must be an n x (n + 1) matrix, where
* n is the dimensionality of the transformation. */
int Affine_set_mat(const Mat_rm * const mat, Affine * const affine)
{
// Verify inputs
if (mat->num_cols != mat->num_rows + 1 || mat->num_rows < 2)
return SIFT3D_FAILURE;
return convert_Mat_rm(mat, &affine->A, SIFT3D_DOUBLE);
}
/* Apply an arbitrary transformation to an [x, y, z] triple. */
void apply_tform_xyz(const void *const tform, const double x_in,
const double y_in, const double z_in, double *const x_out,
double *const y_out, double *const z_out)
{
TFORM_GET_VTABLE(tform)->apply_xyz(tform, x_in, y_in, z_in,
x_out, y_out, z_out);
}
/* Apply an Affine transformation to an [x, y, z] triple. */
static void apply_Affine_xyz(const void *const affine, const double x_in,
const double y_in, const double z_in,
double *const x_out, double *const y_out,
double *const z_out)
{
const Affine *const aff = affine;
const Mat_rm *const A = &aff->A;
assert(AFFINE_GET_DIM(aff) == 3);
*x_out = SIFT3D_MAT_RM_GET(A, 0, 0, double) * x_in +
SIFT3D_MAT_RM_GET(A, 0, 1, double) * y_in +
SIFT3D_MAT_RM_GET(A, 0, 2, double) * z_in +
SIFT3D_MAT_RM_GET(A, 0, 3, double);
*y_out = SIFT3D_MAT_RM_GET(A, 1, 0, double) * x_in +
SIFT3D_MAT_RM_GET(A, 1, 1, double) * y_in +
SIFT3D_MAT_RM_GET(A, 1, 2, double) * z_in +
SIFT3D_MAT_RM_GET(A, 1, 3, double);
*z_out = SIFT3D_MAT_RM_GET(A, 2, 0, double) * x_in +
SIFT3D_MAT_RM_GET(A, 2, 1, double) * y_in +
SIFT3D_MAT_RM_GET(A, 2, 2, double) * z_in +
SIFT3D_MAT_RM_GET(A, 2, 3, double);
}
/* Apply a thin-plate spline transformation to an [x, y, z] triple. */
static void apply_Tps_xyz(const void *const tps, const double x_in,
const double y_in, const double z_in,
double *const x_out, double *const y_out,
double *const z_out)
{
const Tps *const t = tps;
const Mat_rm *const params = &t->params;
const Mat_rm *const kp_src = &t->kp_src;
assert(t->dim == 3);
int n;
int ctrl_pts = kp_src->num_rows; //number of control points
double x_c, y_c, z_c, r_sq, U;
double temp_x = 0.0, temp_y = 0.0, temp_z = 0.0;
for (n = 0; n < ctrl_pts; n++) {
x_c = SIFT3D_MAT_RM_GET(kp_src, n, 0, double);
y_c = SIFT3D_MAT_RM_GET(kp_src, n, 1, double);
z_c = SIFT3D_MAT_RM_GET(kp_src, n, 2, double);
r_sq =
(x_in - x_c) * (x_in - x_c) + (y_in - y_c) * (y_in - y_c) +
(z_in - z_c) * (z_in - z_c);
if (r_sq == 0) {
U = 0.0;
} else {
U = r_sq * log(r_sq);
}
temp_x += U * SIFT3D_MAT_RM_GET(params, 0, n, double);
temp_y += U * SIFT3D_MAT_RM_GET(params, 1, n, double);
temp_z += U * SIFT3D_MAT_RM_GET(params, 2, n, double);
}
temp_x += SIFT3D_MAT_RM_GET(params, 0, ctrl_pts, double);
temp_x += SIFT3D_MAT_RM_GET(params, 0, ctrl_pts + 1, double) * x_in;
temp_x += SIFT3D_MAT_RM_GET(params, 0, ctrl_pts + 2, double) * y_in;
temp_x += SIFT3D_MAT_RM_GET(params, 0, ctrl_pts + 3, double) * z_in;
temp_y += SIFT3D_MAT_RM_GET(params, 1, ctrl_pts, double);
temp_y += SIFT3D_MAT_RM_GET(params, 1, ctrl_pts + 1, double) * x_in;
temp_y += SIFT3D_MAT_RM_GET(params, 1, ctrl_pts + 2, double) * y_in;
temp_y += SIFT3D_MAT_RM_GET(params, 1, ctrl_pts + 3, double) * z_in;
temp_z += SIFT3D_MAT_RM_GET(params, 2, ctrl_pts, double);
temp_z += SIFT3D_MAT_RM_GET(params, 2, ctrl_pts + 1, double) * x_in;
temp_z += SIFT3D_MAT_RM_GET(params, 2, ctrl_pts + 2, double) * y_in;
temp_z += SIFT3D_MAT_RM_GET(params, 2, ctrl_pts + 3, double) * z_in;
//Save results
x_out[0] = temp_x;
y_out[0] = temp_y;
z_out[0] = temp_z;
}
/* Apply an arbitrary transform to a matrix. See apply_Affine_Mat_rm for
* matrix formats. */
int apply_tform_Mat_rm(const void *const tform, const Mat_rm * const mat_in,
Mat_rm * const mat_out)
{
return TFORM_GET_VTABLE(tform)->apply_Mat_rm(tform, mat_in, mat_out);
}
/* Apply a spline transformation to a matrix,
* by multiplication. See apply_Affine_Mat_rm for format of input matrices
*
* All matrices must be initialized with init_Mat_rm prior to use. For 3D!*/
static int apply_Tps_Mat_rm(const void *const tps, const Mat_rm * const mat_in,
Mat_rm * const mat_out)
{
const Tps *const t = tps;
//Spline transformation matrix is dim * [number of chosen points+dim+1]
//sp_src is [number of chosen points] * dim
const Mat_rm *const params = &(t->params);
const Mat_rm *const kp_src = &(t->kp_src);
int num_pts = mat_in->num_cols; //number of points to be transformed
int ctrl_pts = kp_src->num_rows; //number of control points
int m, n, q;
double temp, x, y, z, r_sq, x_c, y_c, z_c;
double U[ctrl_pts];
//for each point
for (q = 0; q < num_pts; q++) {
//extract the coordinates
x = SIFT3D_MAT_RM_GET(mat_in, 0, q, double);
y = SIFT3D_MAT_RM_GET(mat_in, 1, q, double);
z = SIFT3D_MAT_RM_GET(mat_in, 2, q, double);
//Calculate U function for each control point
for (n = 0; n < ctrl_pts; n++) {
x_c = SIFT3D_MAT_RM_GET(kp_src, n, 0, double);
y_c = SIFT3D_MAT_RM_GET(kp_src, n, 1, double);
z_c = SIFT3D_MAT_RM_GET(kp_src, n, 2, double);
r_sq =
(x - x_c) * (x - x_c) + (y - y_c) * (y - y_c) + (z -
z_c)
* (z - z_c);
if (r_sq == 0) {
U[n] = 0.0;
} else {
U[n] = r_sq * log(r_sq);
}
}
//for each dimension
for (m = 0; m < 3; m++) { //For 3D!
temp = 0.0;
for (n = 0; n < ctrl_pts; n++) {
temp +=
U[n] * SIFT3D_MAT_RM_GET(params, m, n,
double);
}
temp += SIFT3D_MAT_RM_GET(params, m, ctrl_pts, double);
temp +=
SIFT3D_MAT_RM_GET(params, m, ctrl_pts + 1,
double) * x;
temp +=
SIFT3D_MAT_RM_GET(params, m, ctrl_pts + 2,
double) * y;
temp +=
SIFT3D_MAT_RM_GET(params, m, ctrl_pts + 3,
double) * z;
//Store results
SIFT3D_MAT_RM_GET(mat_out, m, q, double) = temp;
}
}
return SIFT3D_SUCCESS;
}
/* Get the type of a tform. */
tform_type tform_get_type(const void *const tform)
{
return ((Affine *) tform)->tform.type;
}
/* Get the size of a tform. */
size_t tform_get_size(const void *const tform)
{
return TFORM_GET_VTABLE(tform)->get_size();
}
/* Get the size of a type of tform. */
size_t tform_type_get_size(const tform_type type)
{
switch (type) {
case AFFINE:
return Affine_vtable.get_size();
case TPS:
return Tps_vtable.get_size();
default:
SIFT3D_ERR("tform_type_get_size: unrecognized " "type \n");
return 0;
}
}
/* Returns the size of an Affine struct */
static size_t Affine_get_size(void)
{
return sizeof(Affine);
}
/* Returns the size of a Tps struct */
static size_t Tps_get_size(void)
{
return sizeof(Tps);
}
/* Write a tform to a file. */
int write_tform(const char *path, const void *const tform)
{
return TFORM_GET_VTABLE(tform)->write(path, tform);
}
/* Write an affine transformation to a file. */
static int write_Affine(const char *path, const void *const tform)
{
const Affine *const affine = tform;
return write_Mat_rm(path, &affine->A);
}
/* Write a thin-plate spline transformation to a file. */
static int write_Tps(const char *path, const void *const tform)
{
SIFT3D_IGNORE_UNUSED
const Tps *const tps = tform;
SIFT3D_ERR("write_Tps: this function has not yet been implemented.");
return SIFT3D_FAILURE;
}
/* Free the memory associated with a tform */
void cleanup_tform(void *const tform)
{
TFORM_GET_VTABLE(tform)->cleanup(tform);
}
/* Free the memory associated with an Affine transformation. */
static void cleanup_Affine(void *const affine)
{
Affine *const aff = affine;
cleanup_Mat_rm(&aff->A);
}
/* Free the memory assocaited with a thin-plate spline. */
static void cleanup_Tps(void *const tps)
{
Tps *const t = tps;
cleanup_Mat_rm(&t->params);
cleanup_Mat_rm(&t->kp_src);
}
/* Apply an Affine transformation to a matrix, by multiplication. The format
* of Mat_in should be:
* [x1 x2 ... xN
* y1 y2 ... yN
* ...
* w1 w2 ... wN
* 1 1 ... 1]
*
* mat_out will be resized to the appropriate size. The format will be:
* [x1' x2' ... xN'
* y1' y2' ... yN'
* ...
* w1' w2' ... wN']
*
* All matrices must be initialized with init_Mat_rm prior to use. */
static int apply_Affine_Mat_rm(const void *const affine,
const Mat_rm * const mat_in, Mat_rm * const mat_out)
{
const Affine *const aff = affine;
return mul_Mat_rm(&aff->A, mat_in, mat_out);
}
/* Computes mat_in1 * mat_in2 = mat_out. mat_out will be resized
* the appropriate size.
*
* All matrices must be initialized with init_Mat_rm prior to use. */
int mul_Mat_rm(const Mat_rm * const mat_in1, const Mat_rm * const mat_in2,
Mat_rm * const mat_out)
{
int i, j, k;
// Verify inputs
if (mat_in1->num_cols != mat_in2->num_rows ||
mat_in1->type != mat_in2->type)
return SIFT3D_FAILURE;
// Resize mat_out
mat_out->type = mat_in1->type;
mat_out->num_rows = mat_in1->num_rows;
mat_out->num_cols = mat_in2->num_cols;
if (resize_Mat_rm(mat_out))
return SIFT3D_FAILURE;
#define MAT_RM_MULTIPLY(type) \
SIFT3D_MAT_RM_LOOP_START(mat_out, i, j) \
type acc = 0; \
for (k = 0; k < mat_in1->num_cols; k++) { \
acc += SIFT3D_MAT_RM_GET(mat_in1, i, k, type) * \
SIFT3D_MAT_RM_GET(mat_in2, k, j, type); \
} \
SIFT3D_MAT_RM_GET(mat_out, i, j, type) = acc; \
SIFT3D_MAT_RM_LOOP_END
// Row-major multiply
switch (mat_out->type) {
case SIFT3D_DOUBLE:
MAT_RM_MULTIPLY(double) break;
case SIFT3D_FLOAT:
MAT_RM_MULTIPLY(float) break;
case SIFT3D_INT:
MAT_RM_MULTIPLY(int) break;
default:
puts("mul_Mat_rm: unknown type \n");
return SIFT3D_FAILURE;
}
#undef MAT_RM_MULTIPLY
return SIFT3D_SUCCESS;
}
/* Computes the eigendecomposition of a real symmetric matrix,
* A = Q * diag(L) * Q', where Q is a real orthogonal matrix and L is a real
* diagonal matrix.
*
* A must be an [nxn] matrix. Q is [nxm], where m is in the interval [1, n],
* depending on the values of A. L is [nx1], where the first m elements are
* sorted in ascending order. The remaining n - m elements are zero.
*
* If Q is NULL, the eigenvectors will not be computed.
*
* The eigendecomposition is computed by divide and conquer.
*
* This function resizes all non-null outputs and sets their type to double.
*
* This function does not ensure that A is symmetric.
*
* All matrices must be initialized prior to calling this funciton.
* All matrices must have type double.
*
* Note: This function computes all of the eigenvalues, to a high degree of
* accuracy. A faster implementation is possible if you do not need high
* precision, or if you do not need all of the eigenvalues, or if you do not
* need eigenvalues outside of some interval.
*/
int eigen_Mat_rm(Mat_rm * A, Mat_rm * Q, Mat_rm * L)
{
Mat_rm A_trans;
double *work;
fortran_int *iwork;
double lwork_ret;
fortran_int info, lwork, liwork;
const char jobz = Q == NULL ? 'N' : 'V';
const char uplo = 'U';
const fortran_int n = A->num_cols;
const fortran_int lda = n;
const fortran_int lwork_query = -1;
const fortran_int liwork_query = -1;
// Verify inputs
if (A->num_rows != n) {
puts("eigen_Mat_rm: A be square \n");
return SIFT3D_FAILURE;
}
if (A->type != SIFT3D_DOUBLE) {
puts("eigen_Mat_rm: A must have type double \n");
return SIFT3D_FAILURE;
}
// Resize outputs
L->num_rows = n;
L->num_cols = 1;
L->type = SIFT3D_DOUBLE;
if (resize_Mat_rm(L))
return SIFT3D_FAILURE;
// Initialize intermediate matrices and buffers
work = NULL;
iwork = NULL;
if (init_Mat_rm(&A_trans, 0, 0, SIFT3D_DOUBLE, SIFT3D_FALSE))
goto EIGEN_MAT_RM_QUIT;
// Copy the input matrix (A = A')
if (copy_Mat_rm(A, &A_trans))
goto EIGEN_MAT_RM_QUIT;
// Query for the workspace sizes
dsyevd_(&jobz, &uplo, &n, A_trans.u.data_double, &lda, L->u.data_double,
&lwork_ret, &lwork_query, &liwork, &liwork_query, &info);
if ((int32_t) info) {
printf
("eigen_Mat_rm: LAPACK dsyevd workspace query error code %d",
info);
goto EIGEN_MAT_RM_QUIT;
}
// Allocate work spaces
lwork = (fortran_int) lwork_ret;
if ((work = (double *)malloc(lwork * sizeof(double))) == NULL ||
(iwork =
(fortran_int *) malloc(liwork * sizeof(fortran_int))) == NULL)
goto EIGEN_MAT_RM_QUIT;
// Compute the eigendecomposition
dsyevd_(&jobz, &uplo, &n, A_trans.u.data_double, &lda, L->u.data_double,
work, &lwork, iwork, &liwork, &info);
if ((int32_t) info) {
printf("eigen_Mat_rm: LAPACK dsyevd error code %d", (int) info);
goto EIGEN_MAT_RM_QUIT;
}
// Optionally return the eigenvectors
if (Q != NULL && transpose_Mat_rm(&A_trans, Q))
goto EIGEN_MAT_RM_QUIT;
free(work);
free(iwork);
cleanup_Mat_rm(&A_trans);
return SIFT3D_SUCCESS;
EIGEN_MAT_RM_QUIT:
if (work != NULL)
free(work);
if (iwork != NULL)
free(iwork);
cleanup_Mat_rm(&A_trans);
return SIFT3D_FAILURE;
}
/* Solves the system AX=B exactly. A must be a square matrix.
* This function first computes the reciprocal condition number of A.
* If it is below the parameter "limit", it returns SIFT3D_SINGULAR. If limit
* is less than 0, a default value of 100 * eps is used.
*
* The system is solved by LU decomposition.
*
* This function returns an error if A and B do not have valid dimensions.
* This function resizes X to [nx1] and changes the type to match B.
* All matrices must be initialized prior to calling this function.
* All matrices must have type double.
*/
int solve_Mat_rm(const Mat_rm *const A, const Mat_rm *const B,
const double limit, Mat_rm *const X)
{
Mat_rm A_trans, B_trans;
double *work;
fortran_int *ipiv, *iwork;
double limit_arg, anorm, rcond;
fortran_int info;
const fortran_int m = A->num_rows;
const fortran_int n = A->num_cols;
const fortran_int nrhs = B->num_cols;
const fortran_int lda = m;
const fortran_int ldb = B->num_rows;
const char norm_type = '1';
const char trans = 'N';
// Default parameters
if (limit < 0)
limit_arg = 100.0 * DBL_EPSILON;
// Verify inputs
if (m != n || ldb != m) {
puts("solve_Mat_rm: invalid dimensions! \n");
return SIFT3D_FAILURE;
}
if (A->type != SIFT3D_DOUBLE || B->type != SIFT3D_DOUBLE) {
puts("solve_mat_rm: All matrices must have type double \n");
return SIFT3D_FAILURE;
}
// Initialize intermediate matrices and buffers
ipiv = NULL;
work = NULL;
iwork = NULL;
if (init_Mat_rm(&A_trans, 0, 0, SIFT3D_DOUBLE, SIFT3D_FALSE) ||
init_Mat_rm(&B_trans, 0, 0, SIFT3D_DOUBLE, SIFT3D_FALSE) ||
(work = (double *)malloc(n * 4 * sizeof(double))) == NULL ||
(iwork = (fortran_int *) malloc(n * sizeof(fortran_int))) == NULL ||
(ipiv = (fortran_int *) calloc(m, sizeof(fortran_int))) == NULL)
goto SOLVE_MAT_RM_QUIT;
// Transpose matrices for LAPACK
if (transpose_Mat_rm(A, &A_trans) || transpose_Mat_rm(B, &B_trans))
goto SOLVE_MAT_RM_QUIT;
// Compute the L1-norm of A
anorm = dlange_(&norm_type, &m, &n, A_trans.u.data_double, &lda, work);
// Compute the LU decomposition of A in place
dgetrf_(&m, &n, A_trans.u.data_double, &lda, ipiv, &info);
if ((int32_t) info < 0) {
printf("solve_Mat_rm: LAPACK dgetrf error code %d \n", info);
goto SOLVE_MAT_RM_QUIT;
} else if ((int32_t) info > 0) {
goto SOLVE_MAT_RM_SINGULAR;
}
// Compute the reciprocal condition number of A
dgecon_(&norm_type, &n, A_trans.u.data_double, &lda, &anorm, &rcond,
work, iwork, &info);
if ((int32_t) info) {
printf("solve_Mat_rm: LAPACK dgecon error code %d \n", info);
goto SOLVE_MAT_RM_QUIT;
}
// Return if A is singular
if (rcond < limit_arg)
goto SOLVE_MAT_RM_SINGULAR;
// Solve the system
dgetrs_(&trans, &n, &nrhs, A_trans.u.data_double, &lda, ipiv,
B_trans.u.data_double, &ldb, &info);
// Check for errors
if ((int32_t) info) {
printf("solve_Mat_rm: LAPACK dgetrs error code %d \n", info);
goto SOLVE_MAT_RM_QUIT;
}
// Transpose results
if (transpose_Mat_rm(&B_trans, X))
goto SOLVE_MAT_RM_QUIT;
free(ipiv);
free(work);
free(iwork);
cleanup_Mat_rm(&A_trans);
cleanup_Mat_rm(&B_trans);
return SIFT3D_SUCCESS;
SOLVE_MAT_RM_SINGULAR:
free(ipiv);
free(work);
free(iwork);
cleanup_Mat_rm(&A_trans);
cleanup_Mat_rm(&B_trans);
return SIFT3D_SINGULAR;
SOLVE_MAT_RM_QUIT:
if (ipiv != NULL)
free(ipiv);
if (work != NULL)
free(work);
if (iwork != NULL)
free(iwork);
cleanup_Mat_rm(&A_trans);
cleanup_Mat_rm(&B_trans);
return SIFT3D_FAILURE;
}
/* Solves the system AX=B by least-squares.
*
* A least-norm solution is computed using the singular
* value decomposition. A need not be full-rank.
*
* This function returns an error if A and B do not have valid dimensions.
* This function resizes X to [nx1] and changes the type to match B.
* All matrices must be initialized prior to calling this funciton.
* All matrices must have type double.
*/
int solve_Mat_rm_ls(const Mat_rm *const A, const Mat_rm *const B,
Mat_rm *const X)
{
Mat_rm A_trans, B_trans;
double *s, *work;
double lwork_ret;
fortran_int info, rank, lwork;
int i, j;
const double rcond = -1;
const fortran_int m = A->num_rows;
const fortran_int n = A->num_cols;
const fortran_int nrhs = B->num_cols;
const fortran_int lda = m;
const fortran_int ldb = B->num_rows;
const fortran_int lwork_query = -1;
// Verify inputs
if (m != ldb) {
puts("solve_Mat_rm_ls: invalid dimensions \n");
return SIFT3D_FAILURE;
}
if (A->type != SIFT3D_DOUBLE || B->type != SIFT3D_DOUBLE) {
puts("solve_mat_rm_ls: All matrices must have type double \n");
return SIFT3D_FAILURE;
}
// Resize the output
X->type = SIFT3D_DOUBLE;
X->num_rows = A->num_cols;
X->num_cols = B->num_cols;
if (resize_Mat_rm(X))
return SIFT3D_FAILURE;
// Initialize intermediate matrices and buffers
s = NULL;
work = NULL;
if (init_Mat_rm(&A_trans, 0, 0, SIFT3D_DOUBLE, SIFT3D_FALSE) ||
init_Mat_rm(&B_trans, 0, 0, SIFT3D_DOUBLE, SIFT3D_FALSE) ||
(s = (double *)calloc(SIFT3D_MAX(m, n), sizeof(double))) == NULL)
goto SOLVE_MAT_RM_LS_QUIT;
// Transpose matrices for LAPACK
if (transpose_Mat_rm(A, &A_trans) || transpose_Mat_rm(B, &B_trans))
goto SOLVE_MAT_RM_LS_QUIT;
// Get the size of the workspace
dgelss_(&m, &n, &nrhs, A_trans.u.data_double, &lda,
B_trans.u.data_double, &ldb, s, &rcond, &rank, &lwork_ret,
&lwork_query, &info);
if ((int32_t) info) {
printf
("solve_mat_rm: LAPACK dgelss work query error code %d \n",
info);
}
lwork = (fortran_int) lwork_ret;
// Allocate the workspace
if ((work = (double *)malloc(lwork * sizeof(double))) == NULL)
goto SOLVE_MAT_RM_LS_QUIT;
// Solve the system
dgelss_(&m, &n, &nrhs, A_trans.u.data_double, &lda,
B_trans.u.data_double, &ldb, s, &rcond, &rank, work, &lwork,
&info);
if ((int32_t) info) {
printf("solve_mat_rm: LAPACK dgelss error code %d \n", info);
goto SOLVE_MAT_RM_LS_QUIT;
}
// Transpose results to the new leading dimension
SIFT3D_MAT_RM_LOOP_START(X, i, j)
SIFT3D_MAT_RM_GET(X, i, j, double) =
SIFT3D_MAT_RM_GET(&B_trans, j, i, double);
SIFT3D_MAT_RM_LOOP_END free(s);
free(work);
cleanup_Mat_rm(&A_trans);
cleanup_Mat_rm(&B_trans);
return SIFT3D_SUCCESS;
SOLVE_MAT_RM_LS_QUIT:
if (s != NULL)
free(s);
if (work != NULL)
free(work);
cleanup_Mat_rm(&A_trans);
cleanup_Mat_rm(&B_trans);
return SIFT3D_FAILURE;
}
/* Computes the trace of a matrix. trace is assumed to be the same type as
* mat. Returns an error if mat is not square.
*
* All matrices must be initialized with init_Mat_rm prior to calling
* this function. */
int trace_Mat_rm(Mat_rm * mat, void *trace)
{
int i;
// Verify inputs
if (mat->num_rows != mat->num_cols || mat->num_rows < 1) {
return SIFT3D_FAILURE;
}
#define TRACE_MAT_RM(type) \
{\
type acc = 0; \
for (i = 0; i < mat->num_rows; i++) { \
acc += SIFT3D_MAT_RM_GET(mat, i, i, type); \
} \
*((type *) trace) = acc; \
}
// Take the trace
switch (mat->type) {
case SIFT3D_DOUBLE:
TRACE_MAT_RM(double) break;
case SIFT3D_FLOAT:
TRACE_MAT_RM(float) break;
case SIFT3D_INT:
TRACE_MAT_RM(int) break;
default:
puts("trace_Mat_rm: unknown type \n");
return SIFT3D_FAILURE;
}
#undef TRACE_MAT_RM
return SIFT3D_SUCCESS;
}
/* Tranposes a matrix. Resizes dst with the type of src.
* All matrices must be initialized prior to calling this function. */
int transpose_Mat_rm(const Mat_rm *const src, Mat_rm *const dst)
{
int i, j;
// Verify inputs
if (src->num_rows < 1 || src->num_cols < 1)
return SIFT3D_FAILURE;
// Resize the output
dst->type = src->type;
dst->num_rows = src->num_cols;
dst->num_cols = src->num_rows;
if (resize_Mat_rm(dst))
return SIFT3D_FAILURE;
#define TRANSPOSE_MAT_RM(type) \
SIFT3D_MAT_RM_LOOP_START(src, i, j) \
SIFT3D_MAT_RM_GET(dst, j, i, type) = \
SIFT3D_MAT_RM_GET(src, i, j, type); \
SIFT3D_MAT_RM_LOOP_END
// Transpose
switch (src->type) {
case SIFT3D_DOUBLE:
TRANSPOSE_MAT_RM(double);
break;
case SIFT3D_FLOAT:
TRANSPOSE_MAT_RM(float);
break;
case SIFT3D_INT:
TRANSPOSE_MAT_RM(int);
break;
default:
#ifndef NDEBUG
puts("transpose_Mat_rm: unknown type \n");
#endif
return SIFT3D_FAILURE;
}
#undef TRANSPOSE_MAT_RM
return SIFT3D_SUCCESS;
}
/* Computes the determinant of a symmetric matrix. det is assumed to be the
* same type as mat. Returns an error if mat is not square.
*
* This function does not verify that mat is symmetric.
*
* All matrices must be initialized with init_Mat_rm prior to calling
* this function. */
int det_symm_Mat_rm(Mat_rm * mat, void *det)
{
Mat_rm matd, L;
double detd;
int i, j;
const int n = mat->num_cols;
// Verify inputs
if (n < 1 || mat->num_rows != n) {
puts("det_symm_Mat_rm: invalid dimensions \n");
return SIFT3D_FAILURE;
}
// Initialize intermediates
if (init_Mat_rm(&matd, 0, 0, mat->type, SIFT3D_FALSE) ||
init_Mat_rm(&L, n, 1, SIFT3D_DOUBLE, SIFT3D_FALSE))
goto DET_SYMM_QUIT;
// Convert the matrix to type double
if (convert_Mat_rm(mat, &matd, SIFT3D_DOUBLE))
goto DET_SYMM_QUIT;
// Get the eigendecomposition with LAPACK
if (eigen_Mat_rm(&matd, NULL, &L))
goto DET_SYMM_QUIT;
// Take the determinant
detd = 0.0;
SIFT3D_MAT_RM_LOOP_START(&L, i, j)
detd += SIFT3D_MAT_RM_GET(&L, i, j, double);
SIFT3D_MAT_RM_LOOP_END
// Convert the output to the correct type
switch (mat->type) {
case SIFT3D_DOUBLE:
*((double *)det) = detd;
break;
case SIFT3D_FLOAT:
*((float *)det) = (float)detd;
break;
case SIFT3D_INT:
*((int *)det) = (int)detd;
break;
default:
puts("det_symm_Mat_rm: unknown type \n");
goto DET_SYMM_QUIT;
}
cleanup_Mat_rm(&matd);
cleanup_Mat_rm(&L);
return SIFT3D_SUCCESS;
DET_SYMM_QUIT:
cleanup_Mat_rm(&matd);
cleanup_Mat_rm(&L);
return SIFT3D_FAILURE;
}
/* Apply a separable filter in multiple dimensions. This function resamples the
* input to have the same units as f, then resamples the output to the
* original units.
*
* Parameters:
* -src: The input image.
* -dst: The filtered image.
* -f: The filter to apply.
* -unit: The physical units of the filter kernel. Use -1.0 for the default,
* which is the same units as src.
*
* Return: SIFT3D_SUCCESS on success, SIFT3D_FAILURE otherwise. */
int apply_Sep_FIR_filter(const Image * const src, Image * const dst,
Sep_FIR_filter * const f, const double unit)
{
Image temp;
Image *cur_src, *cur_dst;
int i;
const double unit_default = -1.0;
// Verify inputs
if (unit < 0 && unit != unit_default) {
SIFT3D_ERR("apply_Sep_FIR_filter: invalid unit: %f, use "
"%f for default \n", unit, unit_default);
return SIFT3D_FAILURE;
}
// Resize the output
if (im_copy_dims(src, dst))
return SIFT3D_FAILURE;
// Allocate temporary storage
init_im(&temp);
if (im_copy_data(src, &temp))
goto apply_sep_f_quit;
#define SWAP_BUFFERS \
if (cur_dst == &temp) { \
cur_src = &temp; \
cur_dst = dst; \
} else { \
cur_src = dst; \
cur_dst = &temp; \
}
// Apply in n dimensions
cur_src = (Image *) src;
cur_dst = &temp;
for (i = 0; i < IM_NDIMS; i++) {
// Check for default parameters
const double unit_arg = unit == unit_default ?
SIFT3D_IM_GET_UNITS(src)[i] : unit;
#ifdef SIFT3D_USE_OPENCL
convolve_sep(cur_src, cur_dst, f, i, unit_arg);
SWAP_BUFFERS
#else
// Transpose so that the filter dimension is x
if (i != 0) {
if (im_permute(cur_src, 0, i, cur_dst))
goto apply_sep_f_quit;
SWAP_BUFFERS
}
// Apply the filter
convolve_sep(cur_src, cur_dst, f, 0, unit_arg);
SWAP_BUFFERS
// Transpose back
if (i != 0) {
if (im_permute(cur_src, 0, i, cur_dst))
goto apply_sep_f_quit;
SWAP_BUFFERS
}
#endif
}
// Swap back
SWAP_BUFFERS;
#undef SWAP_BUFFERS
// Copy result to dst, if necessary
if (cur_dst != dst && im_copy_data(cur_dst, dst))
goto apply_sep_f_quit;
// Clean up
im_free(&temp);
return SIFT3D_SUCCESS;
apply_sep_f_quit:
im_free(&temp);
return SIFT3D_FAILURE;
}
/* Initialize a separable FIR filter struct with the given parameters. If OpenCL
* support is enabled and initialized, this creates a program to apply it with
* separable filters.
*
* Note that the kernel data will be copied, so the user can free it without
* affecting f. */
int init_Sep_FIR_filter(Sep_FIR_filter *const f, const int dim, const int width,
const float *const kernel, const int symmetric)
{
const size_t kernel_size = width * sizeof(float);
// Save the data
f->dim = dim;
f->width = width;
f->symmetric = symmetric;
// Allocate the kernel memory
if ((f->kernel = (float *) malloc(kernel_size)) == NULL) {
SIFT3D_ERR("init_Sep_FIT_filter: out of memory! \n");
return SIFT3D_FAILURE;
}
// Copy the kernel data
memcpy(f->kernel, kernel, kernel_size);
#ifdef SIFT3D_USE_OPENCL
{
char src[1 << 15];
char *template;
cl_program program;
cl_int err;
float k;
int i;
const char *path = SEP_FIR_3D_PATH;
const int half_width = f->half_width;
// Load the template
if ((template = read_file(path)) == NULL) {
printf("init_Sep_FIR_Filter: error reading path %s \n",
path);
return SIFT3D_FAILURE;
}
sprintf(src, "%s\n", template);
// Write the unrolled kernel
for (i = -half_width; i < half_width; i++) {
k = f->kernel[i];
sprintf(src, "acc += %.16f * "
"read_imagef(src, sampler, center + d_xyz * %d); \n",
k, i);
}
// Write the ending
sprintf(src,
"write_imagef(dst, sampler, (float4) center); \n } \n");
// Compile the program
if (compile_cl_program_from_source(&program, cl_data.context,
cl_data.devices,
cl_data.num_devices,
(char **)&src, 1))
return SIFT3D_FAILURE;
f->cl_apply_unrolled =
clCreateKernel(program, "sep_fir_3d", &err);
check_cl_error(err, "init_Sep_FIR_Filter: create kernel");
clReleaseProgram(program);
}
#endif
return SIFT3D_SUCCESS;
}
/* Free a Sep_FIR_Filter. */
void cleanup_Sep_FIR_filter(Sep_FIR_filter *const f)
{
if (f->kernel != NULL) {
free(f->kernel);
f->kernel = NULL;
}
#ifdef SIFT3D_USE_OPENCL
//TODO release OpenCL program
#endif
}
/* Initialize the values of im so that it can be used by the
* resize function. Does not allocate memory. */
void init_im(Image *const im)
{
im->data = NULL;
im->cl_valid = SIFT3D_FALSE;
im->ux = 1;
im->uy = 1;
im->uz = 1;
im->size = 0;
im->s = -1.0;
memset(SIFT3D_IM_GET_DIMS(im), 0, IM_NDIMS * sizeof(int));
memset(SIFT3D_IM_GET_STRIDES(im), 0, IM_NDIMS * sizeof(size_t));
}
/* Initialize a normalized Gaussian filter, of the given sigma.
* If SIFT3D_GAUSS_WIDTH_FCTR is defined, use that value for
* the ratio between the width of the filter and sigma. Otherwise,
* use the default value 3.0
*/
#ifndef SIFT3D_GAUSS_WIDTH_FCTR
#define SIFT3D_GAUSS_WIDTH_FCTR 3.0
#endif
int init_Gauss_filter(Gauss_filter * const gauss, const double sigma,
const int dim)
{
float *kernel;
double x;
float acc;
int i;
const int half_width = sigma > 0 ?
SIFT3D_MAX((int)ceil(sigma * SIFT3D_GAUSS_WIDTH_FCTR), 1) :
1;
const int width = 2 * half_width + 1;
// Initialize intermediates
if ((kernel = (float *) malloc(width * sizeof(float))) == NULL)
return SIFT3D_FAILURE;
// Calculate coefficients
acc = 0;
for (i = 0; i < width; i++) {
// distance away from center of filter
x = (double)i - half_width;
// (x / sigma)^2 = x*x / (sigma*sigma)
x /= sigma + DBL_EPSILON;
// exponentiate result
kernel[i] = (float)exp(-0.5 * x * x);
// sum of all kernel elements
acc += kernel[i];
}
// normalize kernel to sum to 1
for (i = 0; i < width; i++) {
kernel[i] /= acc;
}
// Save the filter data
gauss->sigma = sigma;
if (init_Sep_FIR_filter(&gauss->f, dim, width, kernel,
SIFT3D_TRUE))
goto init_Gauss_filter_quit;
// Clean up
free(kernel);
return SIFT3D_SUCCESS;
init_Gauss_filter_quit:
free(kernel);
return SIFT3D_FAILURE;
}
/* Initialize a Gaussian filter to go from scale s_cur to s_next. */
int init_Gauss_incremental_filter(Gauss_filter * const gauss,
const double s_cur, const double s_next,
const int dim)
{
double sigma;
if (s_cur > s_next) {
SIFT3D_ERR("init_Gauss_incremental_filter: "
"s_cur (%f) > s_next (%f) \n", s_cur, s_next);
return SIFT3D_FAILURE;
}
assert(dim > 0);
// Compute filter width parameter (sigma)
sigma = sqrt(s_next * s_next - s_cur * s_cur);
// Initialize filter kernel
if (init_Gauss_filter(gauss, sigma, dim))
return SIFT3D_FAILURE;
return SIFT3D_SUCCESS;
}
/* Free a Gauss_filter */
void cleanup_Gauss_filter(Gauss_filter * gauss)
{
cleanup_Sep_FIR_filter(&gauss->f);
}
/* Initialize a GSS filters stuct. This must be called before gss can be
* used in any other functions. */
void init_GSS_filters(GSS_filters * const gss)
{
gss->num_filters = -1;
gss->gauss_octave = NULL;
}
/* Create GSS filters to create the given scale-space
* pyramid. */
int make_gss(GSS_filters * const gss, const Pyramid * const pyr)
{
Image *cur, *next;
int o, s;
const int dim = 3;
const int num_filters = pyr->num_levels - 1;
const int first_level = pyr->first_level;
const int last_level = SIFT3D_PYR_LAST_LEVEL(pyr);
// Verify inputs
if (num_filters < 1) {
SIFT3D_ERR("make_gss: pyr has only %d levels, must have "
"at least 2", pyr->num_levels);
return SIFT3D_FAILURE;
}
// Free all previous data, if any
cleanup_GSS_filters(gss);
init_GSS_filters(gss);
// Copy pyramid parameters
gss->num_filters = num_filters;
gss->first_level = first_level;
// Allocate the filter array (num_filters cannot be zero)
if ((gss->gauss_octave = (Gauss_filter *)
SIFT3D_safe_realloc(gss->gauss_octave,
num_filters * sizeof(Gauss_filter))) == NULL)
return SIFT3D_FAILURE;
// Make the filter for the very first blur
next = SIFT3D_PYR_IM_GET(pyr, pyr->first_octave, first_level);
if (init_Gauss_incremental_filter(&gss->first_gauss, pyr->sigma_n,
next->s, dim))
return SIFT3D_FAILURE;
// Make one octave of filters (num_levels - 1)
o = pyr->first_octave;
for (s = first_level; s < last_level; s++) {
cur = SIFT3D_PYR_IM_GET(pyr, o, s);
next = SIFT3D_PYR_IM_GET(pyr, o, s + 1);
if (init_Gauss_incremental_filter(SIFT3D_GAUSS_GET(gss, s),
cur->s, next->s, dim))
return SIFT3D_FAILURE;
}
return SIFT3D_SUCCESS;
}
/* Free all memory associated with the GSS filters. gss cannot be reused
* unless it is reinitialized. */
void cleanup_GSS_filters(GSS_filters * const gss)
{
int i;
const int num_filters = gss->num_filters;
// We are done if gss has no filters
if (num_filters < 1)
return;
// Free the first filter
cleanup_Gauss_filter(&gss->first_gauss);
// Free the octave filters
for (i = 0; i < num_filters; i++) {
Gauss_filter *const g = gss->gauss_octave + i;
cleanup_Gauss_filter(g);
}
// Free the octave filter buffer
free(gss->gauss_octave);
}
/* Initialize a Pyramid for use. Must be called before a Pyramid can be used
* in any other functions. */
void init_Pyramid(Pyramid * const pyr)
{
pyr->levels = NULL;
pyr->first_level = 0;
pyr->num_levels = pyr->num_kp_levels = 0;
pyr->first_octave = 0;
pyr->num_octaves = 0;
pyr->sigma0 = pyr->sigma_n = 0.0;
}
/* Resize a scale-space pyramid according to the size of base image im.
*
* Parameters:
* -im: An image with the desired dimensions and units at octave 0
* -first_level: The index of the first pyramid level per octave
* -num_kp_levels: The number of levels per octave in which keypoints are
* detected
* -num_levels: The total number of levels. Must be greater than or equal to
* num_kp_levels.
* -first_octave: The index of the first octave (0 is the base)
* -num_octaves: The total number of octaves
* -sigma0: The scale parameter of level 0, octave 0
* -sigma_n: The nominal scale of the image im.
* -pyr: The Pyramid to be resized.
*
* Returns SIFT3D_SUCCESS on success, SIFT3D_FAILURE otherwise. */
int resize_Pyramid(const Image *const im, const int first_level,
const unsigned int num_kp_levels, const unsigned int num_levels,
const int first_octave, const unsigned int num_octaves,
Pyramid *const pyr) {
double units[IM_NDIMS];
int dims[IM_NDIMS];
double factor;
int i, o, s;
const double sigma0 = pyr->sigma0;
const double sigma_n = pyr->sigma_n;
const int old_num_total_levels = pyr->num_levels * pyr->num_octaves;
const int num_total_levels = num_levels * num_octaves;
// Verify inputs
if (num_levels < num_kp_levels) {
SIFT3D_ERR("resize_Pyramid: num_levels (%u) < "
"num_kp_levels (%d)", num_levels, num_kp_levels);
return SIFT3D_FAILURE;
}
// Store the new parameters
pyr->first_level = first_level;
pyr->num_kp_levels = num_kp_levels;
pyr->first_octave = first_octave;
pyr->num_octaves = num_octaves;
pyr->num_levels = num_levels;
// Clean up old levels which are no longer needed
for (i = num_total_levels; i < old_num_total_levels; i++) {
Image *const level = pyr->levels + i;
im_free(level);
}
// Resize the outer array
if (num_total_levels != 0 &&
((pyr->levels = SIFT3D_safe_realloc(pyr->levels,
num_total_levels * sizeof(Image))) == NULL))
return SIFT3D_FAILURE;
// We have nothing more to do if there are no levels
if (num_total_levels == 0)
return SIFT3D_SUCCESS;
// Initalize new levels
for (i = old_num_total_levels; i < num_total_levels; i++) {
Image *const level = pyr->levels + i;
init_im(level);
}
// We have nothing more to do if the image is empty
if (im->data == NULL)
return SIFT3D_SUCCESS;
// Calculate base image dimensions and units
factor = pow(2.0, -first_octave);
for (i = 0; i < IM_NDIMS; i++) {
dims[i] = (int) ((double) SIFT3D_IM_GET_DIMS(im)[i] * factor);
units[i] = SIFT3D_IM_GET_UNITS(im)[i] * factor;
}
// Initialize each level separately
SIFT3D_PYR_LOOP_START(pyr, o, s)
// Initialize Image fields
Image *const level = SIFT3D_PYR_IM_GET(pyr, o, s);
memcpy(SIFT3D_IM_GET_DIMS(level), dims,
IM_NDIMS * sizeof(int));
memcpy(SIFT3D_IM_GET_UNITS(level), units,
IM_NDIMS * sizeof(double));
level->nc = im->nc;
im_default_stride(level);
// Re-size data memory
if (im_resize(level))
return SIFT3D_FAILURE;
SIFT3D_PYR_LOOP_SCALE_END
// Adjust dimensions and recalculate image size
for (i = 0; i < IM_NDIMS; i++) {
dims[i] /= 2;
units[i] *= 2;
}
SIFT3D_PYR_LOOP_OCTAVE_END
// Set the scales for the new levels
return set_scales_Pyramid(pyr->sigma0, pyr->sigma_n, pyr);
}
/* Set the scale-space parameters on a Pyramid struct. Operates on all levels
* of the pyramid. This function is called automatically by resize_Pyramid.
*
* Parameters:
* -sigma0: The scale parameter of level 0, octave 0
* -sigma_n: The nominal scale parameter of images being transfomed into
* this pyramid struct.
* -Pyr: The Pyramid to be modified. */
int set_scales_Pyramid(const double sigma0, const double sigma_n,
Pyramid *const pyr) {
int o, s;
const int num_kp_levels = pyr->num_kp_levels;
const Image *const first_level =
SIFT3D_PYR_IM_GET(pyr, pyr->first_octave, pyr->first_level);
// Compute the scales of each level
SIFT3D_PYR_LOOP_START(pyr, o, s)
// Compute the scale
Image *const level = SIFT3D_PYR_IM_GET(pyr, o, s);
const double scale =
sigma0 * pow(2.0, o + (double) s / num_kp_levels);
// Verify that sigma_n is not too large
if (o == pyr->first_octave && s == pyr->first_level &&
scale < sigma_n) {
SIFT3D_ERR("set_scales_Pyramid: sigma_n too large "
"for these settings. Max allowed: %f \n",
scale - DBL_EPSILON);
return SIFT3D_FAILURE;
}
// Save the scale
level->s = scale;
SIFT3D_PYR_LOOP_END
// Store the parameters
pyr->sigma0 = sigma0;
pyr->sigma_n = sigma_n;
return SIFT3D_SUCCESS;
}
/* Make a deep copy of a pyramid. */
int copy_Pyramid(const Pyramid * const src, Pyramid * const dst)
{
Image dummy;
const Image *base;
int o, s, have_levels;
// Initialize intermediates
init_im(&dummy);
// Set the scale parameters
if (set_scales_Pyramid(src->sigma0, src->sigma_n, dst))
return SIFT3D_FAILURE;
// Get the base image
if (src->levels == NULL || src->num_octaves <= 0 ||
src->num_levels <= 0) {
base = &dummy;
have_levels = SIFT3D_FALSE;
} else {
base = src->levels;
have_levels = SIFT3D_TRUE;
}
// Resize dst
if (resize_Pyramid(base, src->first_level, src->num_kp_levels,
src->num_levels, src->first_octave, src->num_octaves, dst))
goto copy_Pyramid_failure;
// We are done if src has no levels
if (!have_levels)
goto copy_Pyramid_success;
// Copy the levels
SIFT3D_PYR_LOOP_START(dst, o, s)
const Image *const src_level = SIFT3D_PYR_IM_GET(src, o, s);
Image *const dst_level = SIFT3D_PYR_IM_GET(dst, o, s);
if (src_level->data != NULL &&
im_copy_data(src_level, dst_level))
return SIFT3D_FAILURE;
SIFT3D_PYR_LOOP_END
copy_Pyramid_success:
im_free(&dummy);
return SIFT3D_SUCCESS;
copy_Pyramid_failure:
im_free(&dummy);
return SIFT3D_FAILURE;
}
/* Release all memory associated with a Pyramid. pyr cannot be used again,
* unless it is reinitialized. */
void cleanup_Pyramid(Pyramid * const pyr)
{
int o, s;
// We are done if there are no levels
if (pyr->levels == NULL)
return;
// Free the levels
SIFT3D_PYR_LOOP_START(pyr, o, s)
Image *const level = SIFT3D_PYR_IM_GET(pyr, o, s);
im_free(level);
SIFT3D_PYR_LOOP_END
// Free the pyramid level buffer
free(pyr->levels);
}
/* Initialize a Slab for first use */
void init_Slab(Slab *const slab) {
slab->buf_size = slab->num = 0;
slab->buf = NULL;
}
/* Free all memory associated with a slab. Slab cannot be re-used after
* calling this function, unless re-initialized. */
void cleanup_Slab(Slab * const slab)
{
if (slab->buf != NULL)
free(slab->buf);
}
/* Write the levels of a pyramid to separate files
* for debugging. The path is prepended to the
* octave and scale number of each image.
*
* File type is inferred from the extension in path.
*
* Supported file formats:
* -NIFTI
*/
int write_pyramid(const char *path, Pyramid * pyr)
{
char path_appended[1024];
int o, s;
// Validate or create output directory
if (mkpath(path, out_mode))
return SIFT3D_FAILURE;
// Save each image a separate file
SIFT3D_PYR_LOOP_START(pyr, o, s)
sprintf(path_appended, "%s_o%i_s%i", path, o, s);
if (write_nii(path_appended, SIFT3D_PYR_IM_GET(pyr, o, s)))
return SIFT3D_FAILURE;
SIFT3D_PYR_LOOP_END return SIFT3D_SUCCESS;
}
/* Exit and print a message to stdout. */
void err_exit(const char *str)
{
SIFT3D_ERR("Error! Exiting at %s \n", str);
exit(1);
}
/* Read a whole ASCII file into a string. Returns NULL
* on error. */
SIFT3D_IGNORE_UNUSED
static char *read_file(const char *path)
{
FILE *file;
char *buf;
size_t len;
if ((file = fopen(path, "r")) == NULL) {
return NULL;
}
fseek(file, 0, SEEK_END);
len = ftell(file);
rewind(file);
if (ferror(file) || ((buf = malloc(len)) == NULL))
return NULL;
fread(buf, sizeof(char), len, file);
return ferror(file) ? NULL : buf;
}
/* Ensure all directories in the given path exist.
* Thanks to Jonathan Leffler
* Modifications: Ignore everything after the last '/'
*/
static int mkpath(const char *path, mode_t mode)
{
char *pp, *sp, *copypath;
int status;
if ((copypath = strndup(path, FILENAME_MAX)) == NULL)
status = -1;
/* Ignore everything after the last '/' */
if ((sp = strrchr(copypath, '/')) != NULL) {
*sp = '\0';
} else {
/* If there is no '/', we have nothing to do */
free(copypath);
return SIFT3D_SUCCESS;
}
status = 0;
pp = copypath;
while (status == 0 && (sp = strchr(pp, '/')) != NULL) {
if (sp != pp) {
/* Neither root nor double slash in path */
*sp = '\0';
status = do_mkdir(copypath, mode);
*sp = '/';
}
pp = sp + 1;
}
if (status == 0)
status = do_mkdir(copypath, mode);
free(copypath);
return (status);
}
/* Make a directory if it does not exist.
* Thanks to Jonathan Leffler */
static int do_mkdir(const char *path, mode_t mode)
{
struct stat st;
int status = 0;
if (stat(path, &st) != 0) {
/* Directory does not exist. EEXIST for race condition */
if (cross_mkdir(path, mode) != 0 && errno != EEXIST)
status = -1;
} else if (!S_ISDIR(st.st_mode)) {
errno = ENOTDIR;
status = -1;
}
return (status);
}
/* Cross-platform mkdir */
static int cross_mkdir(const char *path, mode_t mode) {
#ifdef _MINGW_WINDOWS
return mkdir(path);
#elif defined( _WINDOWS )
return _mkdir(path);
#else
return mkdir(path, mode);
#endif
}
/* Initialize a Tps struct. This initializes
* all fields, and allocates memory for the inner
* matrix, initializing it to zero. */
int init_Tps(Tps * tps, int dim, int terms)
{
// Verify inputs
if (dim < 2)
return SIFT3D_FAILURE;
// Initialize the type
tps->tform.type = TPS;
// Initialize the vtable
tps->tform.vtable = &Tps_vtable;
// Initialize the matrices
if (init_Mat_rm(&tps->params, dim, terms, SIFT3D_DOUBLE, SIFT3D_TRUE))
return SIFT3D_FAILURE;
if (init_Mat_rm(&tps->kp_src, terms - dim - 1, dim,
SIFT3D_DOUBLE, SIFT3D_TRUE))
return SIFT3D_FAILURE;
tps->dim = dim;
return SIFT3D_SUCCESS;
}
/* Initialize a RANSAC struct with the default parameters */
void init_Ransac(Ransac *const ran)
{
ran->err_thresh = SIFT3D_err_thresh_default;
ran->num_iter = SIFT3D_num_iter_default;
}
/* Set the err_thresh parameter in a Ransac struct, checking for validity. */
int set_err_thresh_Ransac(Ransac *const ran, double err_thresh)
{
if (err_thresh < 0.0) {
SIFT3D_ERR("set_err_thresh_Ransac: invalid error "
"threshold: %f \n", err_thresh);
return SIFT3D_FAILURE;
}
ran->err_thresh = err_thresh;
return SIFT3D_SUCCESS;
}
/* Set the num_iter parameter in a Ransac struct. */
int set_num_iter_Ransac(Ransac *const ran, int num_iter)
{
if (num_iter < 1) {
SIFT3D_ERR("set_num_iter_Ransac: invalid number of "
"iterations: %d \n", num_iter);
return SIFT3D_FAILURE;
}
ran->num_iter = num_iter;
return SIFT3D_SUCCESS;
}
/* Copy a Ransac struct from src to dst. */
int copy_Ransac(const Ransac *const src, Ransac *const dst) {
return set_num_iter_Ransac(dst, src->num_iter) ||
set_err_thresh_Ransac(dst, src->err_thresh);
}
/* Returns an array of k integers, (uniformly) randomly chosen from the
* integers 0 through n - 1.
*
* The value of *ret must either be NULL, or a pointer to a previously
* allocated block. On successful return, *ret contains the k random integers.
*
* Returns SIFT3D_SUCCESS on succes, SIFT3D_FAILURE otherwise. */
static int n_choose_k(const int n, const int k, int **ret) {
int i;
// Verify inputs
if (n < k || k < 1)
goto n_choose_k_fail;
// Allocate the array of n elements
if ((*ret = malloc(n * sizeof(int))) == NULL)
goto n_choose_k_fail;
// Initialize the array of indices
for (i = 0; i < n; i++) {
(*ret)[i] = i;
}
// Randomize the first k indices using Knuth shuffles
for (i = 0; i < k; i++) {
int *const ints = *ret;
const int temp = ints[i];
const int rand_idx = i + rand() % (n - i);
ints[i] = ints[rand_idx];
ints[rand_idx] = temp;
}
// Release unused memory
if ((*ret = SIFT3D_safe_realloc(*ret, k * sizeof(int))) == NULL)
goto n_choose_k_fail;
return SIFT3D_SUCCESS;
n_choose_k_fail:
if (*ret != NULL) {
free(*ret);
*ret = NULL;
}
return SIFT3D_FAILURE;
}
//make the system matrix for spline
SIFT3D_IGNORE_UNUSED
static int make_spline_matrix(Mat_rm * src, Mat_rm * src_in, Mat_rm * sp_src,
int K_terms, int *r, int dim)
{
int i, d;
double x, y, z, x2, y2, z2, r_sq, U;
src_in->type = SIFT3D_DOUBLE;
sp_src->type = SIFT3D_DOUBLE;
if (init_Mat_rm
(src_in, K_terms + dim + 1, K_terms + dim + 1, SIFT3D_DOUBLE,
SIFT3D_TRUE)) {
return SIFT3D_FAILURE;
}
if (init_Mat_rm(sp_src, K_terms, dim, SIFT3D_DOUBLE, SIFT3D_TRUE)) {
return SIFT3D_FAILURE;
}
for (i = 0; i < K_terms; i++) {
//get the coordinate of current point
switch (dim) {
case 2:
x = SIFT3D_MAT_RM_GET(src, r[i], 0, double);
y = SIFT3D_MAT_RM_GET(src, r[i], 1, double);
break;
case 3:
x = SIFT3D_MAT_RM_GET(src, r[i], 0, double);
y = SIFT3D_MAT_RM_GET(src, r[i], 1, double);
z = SIFT3D_MAT_RM_GET(src, r[i], 2, double);
break;
}
for (d = 0; d < i; d++) {
//compute r
switch (dim) {
case 2:
x2 = SIFT3D_MAT_RM_GET(src, r[d], 0, double);
y2 = SIFT3D_MAT_RM_GET(src, r[d], 1, double);
r_sq =
(x - x2) * (x - x2) + (y - y2) * (y - y2);
break;
case 3:
x2 = SIFT3D_MAT_RM_GET(src, r[d], 0, double);
y2 = SIFT3D_MAT_RM_GET(src, r[d], 1, double);
z2 = SIFT3D_MAT_RM_GET(src, r[d], 2, double);
r_sq =
(x - x2) * (x - x2) + (y - y2) * (y - y2) +
(z - z2) * (z - z2);
break;
}
//compute U
U = r_sq * log(r_sq);
//construct K
SIFT3D_MAT_RM_GET(src_in, i, d, double) = U;
SIFT3D_MAT_RM_GET(src_in, d, i, double) = U;
}
SIFT3D_MAT_RM_GET(src_in, i, i, double) = 0.0;
//construct P and P'
SIFT3D_MAT_RM_GET(src_in, i, K_terms, double) = 1.0;
SIFT3D_MAT_RM_GET(src_in, K_terms, i, double) = 1.0;
switch (dim) {
case 2:
SIFT3D_MAT_RM_GET(src_in, i, K_terms + 1, double) = x;
SIFT3D_MAT_RM_GET(src_in, i, K_terms + 2, double) = y;
SIFT3D_MAT_RM_GET(src_in, K_terms + 1, i, double) = x;
SIFT3D_MAT_RM_GET(src_in, K_terms + 2, i, double) = y;
break;
case 3:
SIFT3D_MAT_RM_GET(src_in, i, K_terms + 1, double) = x;
SIFT3D_MAT_RM_GET(src_in, i, K_terms + 2, double) = y;
SIFT3D_MAT_RM_GET(src_in, i, K_terms + 3, double) = z;
SIFT3D_MAT_RM_GET(src_in, K_terms + 1, i, double) = x;
SIFT3D_MAT_RM_GET(src_in, K_terms + 2, i, double) = y;
SIFT3D_MAT_RM_GET(src_in, K_terms + 3, i, double) = z;
break;
}
//construct sp_src matrix(matrix that stores control points)
switch (dim) {
case 2:
SIFT3D_MAT_RM_GET(sp_src, i, 0, double) = x;
SIFT3D_MAT_RM_GET(sp_src, i, 1, double) = y;
break;
case 3:
SIFT3D_MAT_RM_GET(sp_src, i, 0, double) = x;
SIFT3D_MAT_RM_GET(sp_src, i, 1, double) = y;
SIFT3D_MAT_RM_GET(sp_src, i, 2, double) = z;
break;
}
}
//construct O
for (i = 0; i < dim; i++) {
for (d = 0; d < dim; d++) {
SIFT3D_MAT_RM_GET(src_in, K_terms + i, K_terms + d,
double) = 0.0;
}
}
return SIFT3D_SUCCESS;
}
//make the system matrix for affine
static int make_affine_matrix(const Mat_rm *const pts_in, const int dim,
Mat_rm *const mat_out)
{
int i, j;
const int num_rows = pts_in->num_rows;
mat_out->type = SIFT3D_DOUBLE;
mat_out->num_rows = num_rows;
mat_out->num_cols = dim + 1;
if (resize_Mat_rm(mat_out))
return SIFT3D_FAILURE;
for (i = 0; i < num_rows; i++) {
//Add one row to the matrix
for (j = 0; j < dim; j++) {
SIFT3D_MAT_RM_GET(mat_out, i, j, double) =
SIFT3D_MAT_RM_GET(pts_in, i, j, double);
}
SIFT3D_MAT_RM_GET(mat_out, i, dim, double) = 1.0;
}
return SIFT3D_SUCCESS;
}
//extract the control matrix from tform struct (only valid for spline)
SIFT3D_IGNORE_UNUSED
static Mat_rm *extract_ctrl_pts(void *tform, tform_type type)
{
Mat_rm *T;
Tps *tps = (Tps *) tform;
switch (type) {
case TPS:
T = extract_ctrl_pts_Tps(tps);
break;
case AFFINE:
break;
default:
return NULL;
}
return T;
}
static Mat_rm *extract_ctrl_pts_Tps(Tps * tps)
{
Mat_rm *kp_src = &tps->kp_src;
return kp_src;
}
/* Solve for a transformation struct.
*
* Paramters:
* src - See ransac().
* ref - See ransac()
* tform - See ransac()
*
* Returns SIFT3D_SUCCESS, SIFT3D_SINGULAR, or SIFT3D_FAILURE. See ransac() for
* interpretation. */
static int solve_system(const Mat_rm *const src, const Mat_rm *const ref,
void *const tform)
{
const tform_type type = tform_get_type(tform);
//Mat_rm *kp_ref;
Mat_rm ref_sys, X;
int dim, ret;
init_Mat_rm(&ref_sys, 0, 0, SIFT3D_DOUBLE, SIFT3D_FALSE);
init_Mat_rm(&X, 0, 0, SIFT3D_DOUBLE, SIFT3D_FALSE);
//construct source matrix and initialize reference vector
switch (type) {
case TPS:
//kp_ref = extract_ctrl_pts(tform, type);
// make_spline_matrix(ref, &ref_in, kp_ref, num_pts, r, dim);
puts("solve_system: TPS not yet implemented");
goto SOLVE_SYSTEM_FAIL;
case AFFINE:
dim = AFFINE_GET_DIM((Affine *const) tform);
make_affine_matrix(ref, dim, &ref_sys);
break;
default:
puts("solve_system: unknown type");
goto SOLVE_SYSTEM_FAIL;
}
// solve for the coefficients
ret = ref_sys.num_rows == ref_sys.num_cols ?
solve_Mat_rm(&ref_sys, src, -1.0, &X) :
solve_Mat_rm_ls(&ref_sys, src, &X);
switch (ret) {
case SIFT3D_SUCCESS:
break;
case SIFT3D_SINGULAR:
goto SOLVE_SYSTEM_SINGULAR;
default:
goto SOLVE_SYSTEM_FAIL;
}
// Save the transformation matrix
switch (type) {
case TPS:
//TODO
goto SOLVE_SYSTEM_FAIL;
case AFFINE:
{
Mat_rm X_trans;
init_Mat_rm(&X_trans, 0, 0, SIFT3D_DOUBLE, SIFT3D_FALSE);
ret = transpose_Mat_rm(&X, &X_trans) ||
Affine_set_mat(&X_trans, (Affine *) tform);
cleanup_Mat_rm(&X_trans);
if (ret)
goto SOLVE_SYSTEM_FAIL;
break;
}
default:
goto SOLVE_SYSTEM_FAIL;
}
// Clean up
cleanup_Mat_rm(&ref_sys);
cleanup_Mat_rm(&X);
return SIFT3D_SUCCESS;
SOLVE_SYSTEM_SINGULAR:
cleanup_Mat_rm(&ref_sys);
cleanup_Mat_rm(&X);
return SIFT3D_SINGULAR;
SOLVE_SYSTEM_FAIL:
cleanup_Mat_rm(&ref_sys);
cleanup_Mat_rm(&X);
return SIFT3D_FAILURE;
}
//Find the SSD error for the i'th point
static double tform_err_sq(const void *const tform, const Mat_rm *const src,
const Mat_rm *const ref, const int i)
{
double err = 0.0;
//Initialization
//in -- inputs coordinates of source points
//out -- registered points
//r -- reference points (ground truth)
double x_in, y_in, z_in, x_r, y_r, z_r, x_out, y_out, z_out;
//Find the source point
x_in = SIFT3D_MAT_RM_GET(ref, i, 0, double);
y_in = SIFT3D_MAT_RM_GET(ref, i, 1, double);
z_in = SIFT3D_MAT_RM_GET(ref, i, 2, double);
//Register
apply_tform_xyz(tform, x_in, y_in, z_in, &x_out, &y_out, &z_out);
//Find the reference point
x_r = SIFT3D_MAT_RM_GET(src, i, 0, double);
y_r = SIFT3D_MAT_RM_GET(src, i, 1, double);
z_r = SIFT3D_MAT_RM_GET(src, i, 2, double);
//Find the SSD error
err = (x_r - x_out) * (x_r - x_out) + (y_r - y_out) * (y_r - y_out) +
(z_r - z_out) * (z_r - z_out);
//return the result
return err;
}
/* Perform one iteration of RANSAC.
*
* Parameters:
* src - The source points.
* ref - The reference points.
* tform - The output transformation. Must be initialized.
* cset - An array in which to store the concensus set. The value *cset must
* either be NULL, or a pointer to a previously allocated block.
* len - A location in which to store the length of the cset.
*
* Returns SIFT3D_SUCCESS on success, SIFT3D_SINGULAR if the system is
* near singular, and SIFT3D_FAILURE otherwise. */
static int ransac(const Mat_rm *const src, const Mat_rm *const ref,
const Ransac *const ran, void *tform, int **const cset, int *const len)
{
int *rand_indices;
Mat_rm src_rand, ref_rand;
int i, j, num_rand, cset_len;
const double err_thresh = ran->err_thresh;
const double err_thresh_sq = err_thresh * err_thresh;
const int num_pts = src->num_rows;
const int num_dim = src->num_cols;
const tform_type type = tform_get_type(tform);
// Verify inputs
if (src->type != SIFT3D_DOUBLE || src->type != ref->type) {
puts("ransac: all matrices must have type double \n");
return SIFT3D_FAILURE;
}
if (src->num_rows != ref->num_rows || src->num_cols != ref->num_cols) {
puts("ransac: src and ref must have the same dimensions \n");
return SIFT3D_FAILURE;
}
// Get the number of points for this transform
switch (type) {
case AFFINE:
num_rand = AFFINE_GET_DIM((Affine *const) tform) + 1;
break;
default:
printf("ransac: unknown transformation type \n");
return SIFT3D_FAILURE;
}
// Initialize intermediates
rand_indices = NULL;
init_Mat_rm(&src_rand, num_rand, num_dim, SIFT3D_DOUBLE, SIFT3D_FALSE);
init_Mat_rm(&ref_rand, num_rand, num_dim, SIFT3D_DOUBLE, SIFT3D_FALSE);
// Draw random point indices
if (n_choose_k(num_pts, num_rand, &rand_indices))
goto RANSAC_FAIL;
// Copy the random points
SIFT3D_MAT_RM_LOOP_START(&src_rand, i, j)
const int rand_idx = rand_indices[i];
SIFT3D_MAT_RM_GET(&src_rand, i, j, double) =
SIFT3D_MAT_RM_GET(src, rand_idx, j, double);
SIFT3D_MAT_RM_GET(&ref_rand, i, j, double) =
SIFT3D_MAT_RM_GET(ref, rand_idx, j, double);
SIFT3D_MAT_RM_LOOP_END
// Fit a transform to the random points
switch (solve_system(&src_rand, &ref_rand, tform)) {
case SIFT3D_SUCCESS:
break;
case SIFT3D_SINGULAR:
goto RANSAC_SINGULAR;
default:
goto RANSAC_FAIL;
}
// Extract the consensus set
cset_len = 0;
for (i = 0; i < num_pts; i++) {
// Calculate the error
const double err_sq = tform_err_sq(tform, src, ref, i);
// Reject points below the error threshold
if (err_sq > err_thresh_sq)
continue;
// Add to the consensus set (++cset_len cannot be zero)
if ((*cset = SIFT3D_safe_realloc(*cset,
++cset_len * sizeof(int))) == NULL)
goto RANSAC_FAIL;
(*cset)[cset_len - 1] = i;
}
// Return the new length of cset
*len = cset_len;
if (rand_indices != NULL)
free(rand_indices);
cleanup_Mat_rm(&src_rand);
cleanup_Mat_rm(&ref_rand);
return SIFT3D_SUCCESS;
RANSAC_SINGULAR:
if (rand_indices != NULL)
free(rand_indices);
cleanup_Mat_rm(&src_rand);
cleanup_Mat_rm(&ref_rand);
return SIFT3D_SINGULAR;
RANSAC_FAIL:
if (rand_indices != NULL)
free(rand_indices);
cleanup_Mat_rm(&src_rand);
cleanup_Mat_rm(&ref_rand);
return SIFT3D_FAILURE;
}
//Resize spline struct based on number of selected points
int resize_Tps(Tps * tps, int num_pts, int dim)
{
Mat_rm *params = &(tps->params);
Mat_rm *kp_src = &(tps->kp_src);
params->num_cols = num_pts + dim + 1;
params->num_rows = dim;
kp_src->num_rows = num_pts;
kp_src->num_cols = dim;
if (resize_Mat_rm(params)) {
return SIFT3D_FAILURE;
}
if (resize_Mat_rm(kp_src)) {
return SIFT3D_FAILURE;
}
tps->dim = dim;
return SIFT3D_SUCCESS;
}
/* Fit a transformation from ref to src points, using random sample concensus
* (RANSAC).
*
* Parameters:
* ran - Struct storing RANSAC parameters.
* src - The [mxn] source points.
* ref - The [mxn] reference points.
* tform - The output transform. Must be initialized with init_from prior to
* calling this function.
*
* Returns SIFT3D_SUCCESS on success, SIFT3D_FAILURE otherwise. */
int find_tform_ransac(const Ransac *const ran, const Mat_rm *const src,
const Mat_rm *const ref, void *const tform)
{
Mat_rm ref_cset, src_cset;
void *tform_cur;
int *cset, *cset_best;
int i, j, dim, num_terms, ret, len, len_best, min_num_inliers;
const int num_iter = ran->num_iter;
const int num_pts = src->num_rows;
const size_t tform_size = tform_get_size(tform);
const tform_type type = tform_get_type(tform);
// Initialize data structures
cset = cset_best = NULL;
len_best = 0;
if ((tform_cur = malloc(tform_size)) == NULL ||
init_tform(tform_cur, type) ||
init_Mat_rm(&src_cset, len_best, IM_NDIMS, SIFT3D_DOUBLE,
SIFT3D_FALSE) ||
init_Mat_rm(&ref_cset, len_best, IM_NDIMS, SIFT3D_DOUBLE,
SIFT3D_FALSE))
goto find_tform_quit;
// initialize type-specific variables
switch (type) {
case AFFINE:
dim = AFFINE_GET_DIM((Affine *const) tform);
num_terms = dim + 1;
min_num_inliers = 5;
break;
default:
puts("find_tform_ransac: unsupported transformation "
"type \n");
goto find_tform_quit;
}
if (num_pts < num_terms) {
printf("Not enough matched points \n");
goto find_tform_quit;
}
// Ransac iterations
for (i = 0; i < num_iter; i++) {
do {
ret = ransac(src, ref, ran, tform_cur, &cset, &len);
} while (ret == SIFT3D_SINGULAR);
if (ret == SIFT3D_FAILURE)
goto find_tform_quit;
if (len > len_best) {
len_best = len;
if ((cset_best = (int *)SIFT3D_safe_realloc(cset_best,
len * sizeof(int))) == NULL ||
copy_tform(tform_cur, tform))
goto find_tform_quit;
memcpy(cset_best, cset, len * sizeof(int));
}
}
// Check if the minimum number of inliers was found
if (len_best < min_num_inliers) {
puts("find_tform_ransac: No good model was found! \n");
goto find_tform_quit; }
// Resize the concensus set matrices
src_cset.num_rows = ref_cset.num_rows = len_best;
if (resize_Mat_rm(&src_cset) || resize_Mat_rm(&ref_cset))
goto find_tform_quit;
// Extract the concensus set
SIFT3D_MAT_RM_LOOP_START(&src_cset, i, j)
const int idx = cset_best[i];
SIFT3D_MAT_RM_GET(&src_cset, i, j, double) =
SIFT3D_MAT_RM_GET(src, idx, j, double);
SIFT3D_MAT_RM_GET(&ref_cset, i, j, double) =
SIFT3D_MAT_RM_GET(ref, idx, j, double);
SIFT3D_MAT_RM_LOOP_END
#ifdef SIFT3D_RANSAC_REFINE
// Refine with least squares
switch (solve_system(&src_cset, &ref_cset, tform_cur)) {
case SIFT3D_SUCCESS:
// Copy the refined transformation to the output
if (copy_tform(tform_cur, tform))
goto find_tform_quit;
break;
case SIFT3D_SINGULAR:
// Stick with the old transformation
#ifdef VERBOSE
printf("find_tform_ransac: warning: least-squares refinement "
"abandoned due to numerical precision \n");
#endif
break;
default:
goto find_tform_quit;
}
#endif
// Clean up
free(cset);
free(cset_best);
cleanup_tform(tform_cur);
cleanup_Mat_rm(&ref_cset);
cleanup_Mat_rm(&src_cset);
if (tform_cur != NULL)
free(tform_cur);
return SIFT3D_SUCCESS;
find_tform_quit:
// Clean up and return an error
if (cset != NULL)
free(cset);
if (cset_best != NULL)
free(cset_best);
cleanup_tform(tform_cur);
if (tform_cur != NULL)
free(tform_cur);
cleanup_Mat_rm(&ref_cset);
cleanup_Mat_rm(&src_cset);
return SIFT3D_FAILURE;
}
/* Parse the GNU standard arguments (--version, --help). On return, the
* getopt state is restored to the original.
*
* Return values:
* -SIFT3D_HELP - "--help" was found
* -SIFT3D_VERSION - "--version" was found, and the version message printed
* -SIFT3D_FALSE - no GNU standard arguments were found */
int parse_gnu(const int argc, char *const *argv)
{
int c;
const int opterr_start = opterr;
// Options
const struct option longopts[] = {
{"help", no_argument, NULL, SIFT3D_HELP},
{"version", no_argument, NULL, SIFT3D_VERSION},
{0, 0, 0, 0}
};
// Process the arguments
opterr = 0;
while ((c = getopt_long(argc, argv, "+", longopts, NULL)) != -1) {
switch (c) {
case SIFT3D_HELP:
return SIFT3D_HELP;
case SIFT3D_VERSION:
puts(version_msg);
return SIFT3D_VERSION;
}
}
// Restore the state
optind = 0;
opterr = opterr_start;
return SIFT3D_FALSE;
}
/* Print the bug message to stderr. */
void print_bug_msg()
{
SIFT3D_ERR(bug_msg);
}
|
GB_binop__bget_int32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bget_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__bget_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__bget_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__bget_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bget_int32)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bget_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__bget_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bget_int32)
// C=scalar+B GB (_bind1st__bget_int32)
// C=scalar+B' GB (_bind1st_tran__bget_int32)
// C=A+scalar GB (_bind2nd__bget_int32)
// C=A'+scalar GB (_bind2nd_tran__bget_int32)
// C type: int32_t
// A type: int32_t
// A pattern? 0
// B type: int32_t
// B pattern? 0
// BinaryOp: cij = GB_BITGET (aij, bij, int32_t, 32)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITGET (x, y, int32_t, 32) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BGET || GxB_NO_INT32 || GxB_NO_BGET_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bget_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bget_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bget_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bget_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int32_t alpha_scalar ;
int32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int32_t *) alpha_scalar_in)) ;
beta_scalar = (*((int32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bget_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bget_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bget_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bget_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bget_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITGET (x, bij, int32_t, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bget_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITGET (aij, y, int32_t, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITGET (x, aij, int32_t, 32) ; \
}
GrB_Info GB (_bind1st_tran__bget_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITGET (aij, y, int32_t, 32) ; \
}
GrB_Info GB (_bind2nd_tran__bget_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__islt_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__islt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__islt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__islt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__islt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_uint8)
// A*D function (colscale): GB (_AxD__islt_uint8)
// D*A function (rowscale): GB (_DxB__islt_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__islt_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__islt_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_uint8)
// C=scalar+B GB (_bind1st__islt_uint8)
// C=scalar+B' GB (_bind1st_tran__islt_uint8)
// C=A+scalar GB (_bind2nd__islt_uint8)
// C=A'+scalar GB (_bind2nd_tran__islt_uint8)
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLT || GxB_NO_UINT8 || GxB_NO_ISLT_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__islt_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__islt_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__islt_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__islt_uint8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__islt_uint8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__islt_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__islt_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__islt_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__islt_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__islt_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__islt_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__islt_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__islt_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__islt_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 16;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
core_clacpy_band.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zlacpy_band.c, normal z -> c, Fri Sep 28 17:38:19 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "plasma_internal.h"
#include "core_lapack.h"
/*******************************************************************************
*
* @ingroup core_plasma_complex32_t
*
* plasma_core_clacpy copies a sub-block A of a band matrix stored in LAPACK's band format
* to a corresponding sub-block B of a band matrix in PLASMA's band format
*
*******************************************************************************
*
* @param[in] it
* The row block index of the tile.
*
* @param[in] jt
* The column block index of the tile.
*
* @param[in] m
* The number of rows of the matrices A and B. M >= 0.
*
* @param[in] n
* The number of columns of the matrices A and B. N >= 0.
*
* @param[in] A
* The M-by-N matrix to copy.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,M).
*
* @param[out] B
* The M-by-N copy of the matrix A.
* On exit, B = A ONLY in the locations specified by uplo.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,M).
*
******************************************************************************/
__attribute__((weak))
void plasma_core_clacpy_lapack2tile_band(plasma_enum_t uplo,
int it, int jt,
int m, int n, int nb, int kl, int ku,
const plasma_complex32_t *A, int lda,
plasma_complex32_t *B, int ldb)
{
int i, j;
int j_start, j_end;
if (uplo == PlasmaGeneral) {
j_start = 0; // pivot back and could fill in
j_end = (jt <= it ? n : imin(n, (it-jt)*nb+m+ku+kl+1));
}
else if (uplo == PlasmaUpper) {
j_start = 0;
j_end = imin(n, (it-jt)*nb+m+ku+1);
}
else {
j_start = imax(0, (it-jt)*nb-kl);
j_end = n;
}
for (j = 0; j < j_start; j++) {
for (i = 0; i < m; i++) {
B[i + j*ldb] = 0.0;
}
}
for (j = j_start; j < j_end; j++) {
int i_start, i_end;
if (uplo == PlasmaGeneral) {
i_start = (jt <= it ? 0 : imax(0, (jt-it)*nb+j-ku-kl));
i_end = (jt >= it ? m : imin(m, (jt-it)*nb+j+kl+nb+1));
// +nb because we use cgetrf on panel and pivot back within the panel.
// so the last tile in panel could fill.
}
else if (uplo == PlasmaUpper) {
i_start = imax(0, (jt-it)*nb+j-ku);
i_end = imin(m, (jt-it)*nb+j+1);
}
else {
i_start = imax(0, (jt-it)*nb+j);
i_end = imin(m, (jt-it)*nb+j+kl+1);
}
for (i = 0; i < i_start; i++) {
B[i + j*ldb] = 0.0;
}
for (i = i_start; i < i_end; i++) {
B[i + j*ldb] = A[i + j*lda];
}
for (i = i_end; i < m; i++) {
B[i + j*ldb] = 0.0;
}
}
for (j = j_end; j < n; j++) {
for (i = 0; i < m; i++) {
B[i + j*ldb] = 0.0;
}
}
}
/******************************************************************************/
void plasma_core_omp_clacpy_lapack2tile_band(plasma_enum_t uplo,
int it, int jt,
int m, int n, int nb, int kl, int ku,
const plasma_complex32_t *A, int lda,
plasma_complex32_t *B, int ldb)
{
#pragma omp task depend(in:A[0:lda*n]) \
depend(out:B[0:ldb*n])
plasma_core_clacpy_lapack2tile_band(uplo,
it, jt, m, n, nb, kl, ku,
A, lda,
B, ldb);
}
/*******************************************************************************
*
* @ingroup core_plasma_complex32_t
*
* plasma_core_clacpy copies all or part of a two-dimensional matrix A to another
* matrix B
*
*******************************************************************************
*
* @param[in] it
* The row block index of the tile.
*
* @param[in] jt
* The column block index of the tile.
*
* @param[in] m
* The number of rows of the matrices A and B. m >= 0.
*
* @param[in] n
* The number of columns of the matrices A and B. n >= 0.
*
* @param[in] A
* The m-by-n matrix to copy.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1, m).
*
* @param[out] B
* The m-by-n copy of the matrix A.
* On exit, B = A ONLY in the locations specified by uplo.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1, m).
*
******************************************************************************/
__attribute__((weak))
void plasma_core_clacpy_tile2lapack_band(plasma_enum_t uplo,
int it, int jt,
int m, int n, int nb, int kl, int ku,
const plasma_complex32_t *B, int ldb,
plasma_complex32_t *A, int lda)
{
int i, j;
int j_start, j_end;
if (uplo == PlasmaGeneral) {
j_start = 0; // pivot back and could fill in
j_end = (jt <= it ? n : imin(n, (it-jt)*nb+m+ku+kl+1));
}
else if (uplo == PlasmaUpper) {
j_start = 0;
j_end = imin(n, (it-jt)*nb+m+ku+1);
}
else {
j_start = imax(0, (it-jt)*nb-kl);
j_end = n;
}
for (j = j_start; j < j_end; j++) {
int i_start, i_end;
if (uplo == PlasmaGeneral) {
i_start = (jt <= it ? 0 : imax(0, (jt-it)*nb+j-ku-kl));
i_end = (jt >= it ? m : imin(m, (jt-it)*nb+j+kl+nb+1));
// +nb because we use cgetrf on panel and pivot back within the panel.
// so the last tile in panel could fill.
}
else if (uplo == PlasmaUpper) {
i_start = imax(0, (jt-it)*nb+j-ku);
i_end = imin(m, (jt-it)*nb+j+1);
}
else {
i_start = imax(0, (jt-it)*nb+j);
i_end = imin(m, (jt-it)*nb+j+kl+1);
}
for (i = i_start; i < i_end; i++) {
A[i + j*lda] = B[i + j*ldb];
}
}
}
/******************************************************************************/
void plasma_core_omp_clacpy_tile2lapack_band(plasma_enum_t uplo,
int it, int jt,
int m, int n, int nb, int kl, int ku,
const plasma_complex32_t *B, int ldb,
plasma_complex32_t *A, int lda)
{
#pragma omp task depend(in:B[0:ldb*n]) \
depend(out:A[0:lda*n])
plasma_core_clacpy_tile2lapack_band(uplo,
it, jt, m, n, nb, kl, ku,
B, ldb,
A, lda);
}
|
omp_sections.c | /******************************************************************************
* FILE: omp_workshare2.c
* DESCRIPTION:
* OpenMP Example - Sections Work-sharing - C Version
* In this example, the OpenMP SECTION directive is used to assign
* different array operations to each thread that executes a SECTION.
* AUTHOR: Blaise Barney 5/99
* LAST REVISED: 07/16/07
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define N 50
int main (int argc, char *argv[])
{
int i, nthreads, tid;
float a[N], b[N], c[N], d[N];
/* Some initializations */
for (i=0; i<N; i++) {
a[i] = i * 1.5;
b[i] = i + 22.35;
c[i] = d[i] = 0.0;
}
#pragma omp parallel shared(a,b,c,d,nthreads) private(i,tid)
{
tid = omp_get_thread_num();
if (tid == 0)
{
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
printf("Thread %d starting...\n",tid);
{
{
for (i=0; i<N; i++)
{
c[i] = a[i] + b[i];
printf("Thread %d: c[%d]= %f\n",tid,i,c[i]);
}
}
{
for (i=0; i<N; i++)
{
d[i] = a[i] * b[i];
printf("Thread %d: d[%d]= %f\n",tid,i,d[i]);
}
}
}
printf("Thread %d done.\n",tid);
}
}
|
imginputfileconn.h | /**
* DeepDetect
* Copyright (c) 2014 Emmanuel Benazera
* Author: Emmanuel Benazera <beniz@droidnik.fr>
*
* This file is part of deepdetect.
*
* deepdetect is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* deepdetect is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with deepdetect. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef IMGINPUTFILECONN_H
#define IMGINPUTFILECONN_H
#include "inputconnectorstrategy.h"
#include <opencv2/opencv.hpp>
#ifdef USE_CUDA_CV
#include <opencv2/cudaimgproc.hpp>
#include <opencv2/cudawarping.hpp>
#include <opencv2/cudaarithm.hpp>
#endif
#if CV_VERSION_MAJOR >= 3
#define CV_LOAD_IMAGE_COLOR cv::IMREAD_COLOR
#define CV_LOAD_IMAGE_GRAYSCALE cv::IMREAD_GRAYSCALE
#define CV_LOAD_IMAGE_UNCHANGED cv::IMREAD_UNCHANGED
#define CV_BGR2RGB cv::COLOR_BGR2RGB
#define CV_BGR2GRAY cv::COLOR_BGR2GRAY
#define CV_GRAY2RGB cv::COLOR_GRAY2RGB
#define CV_YCrCb2RGB cv::COLOR_YCrCb2RGB
#define CV_YCrCb2BGR cv::COLOR_YCrCb2BGR
#define CV_BGR2YCrCb cv::COLOR_BGR2YCrCb
#define CV_INTER_CUBIC cv::INTER_CUBIC
#endif
#include "ext/base64/base64.h"
#include "utils/apitools.h"
#include <random>
#include "dto/input_connector.hpp"
namespace dd
{
class DDImg
{
public:
DDImg()
{
}
~DDImg()
{
}
// base64 detection
bool is_within_base64_range(char c) const
{
if ((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z')
|| (c >= '0' && c <= '9') || (c == '+' || c == '/' || c == '='))
return true;
else
return false;
}
bool possibly_base64(const std::string &s) const
{
bool ism = is_multiple_four(s);
if (!ism)
return false;
for (char c : s)
{
bool within_64 = is_within_base64_range(c);
if (!within_64)
return false;
}
return true;
}
bool is_multiple_four(const std::string &s) const
{
if (s.length() % 4 == 0)
return true;
else
return false;
}
/** apply preprocessing to image */
void prepare(const cv::Mat &src, cv::Mat &dst,
const std::string &img_name) const
{
try
{
if (_scaled)
scale(src, dst);
else if (_width == 0 || _height == 0)
{
if (_width == 0 && _height == 0)
{
// Do nothing and keep native resolution. May cause issues if
// batched images are different resolutions
dst = src;
}
else
{
// Resize so that the larger dimension is set to whichever
// (width or height) is non-zero, maintaining aspect ratio
// XXX - This may cause issues if batch images are different
// resolutions
size_t currMaxDim = std::max(src.rows, src.cols);
double scale = static_cast<double>(std::max(_width, _height))
/ static_cast<double>(currMaxDim);
cv::resize(src, dst, cv::Size(), scale, scale,
select_cv_interp());
}
}
else
{
// Resize normally to the specified width and height
cv::resize(src, dst, cv::Size(_width, _height), 0, 0,
select_cv_interp());
}
}
catch (...)
{
throw InputConnectorBadParamException("failed resizing image "
+ img_name);
}
// cropping
if (_crop_width != 0 && _crop_height != 0)
{
int widthBorder = (_width - _crop_width) / 2;
int heightBorder = (_height - _crop_height) / 2;
try
{
dst = dst(cv::Rect(widthBorder, heightBorder, _crop_width,
_crop_height));
}
catch (...)
{
throw InputConnectorBadParamException("failed cropping image "
+ img_name);
}
}
// color adjustments
if (_histogram_equalization)
{
if (_bw)
{
cv::equalizeHist(dst, dst);
if (_rgb)
cv::cvtColor(dst, dst, CV_GRAY2RGB);
}
else
{
// We don't apply equalizeHist on each BGR channels to keep
// the color balance of the image. equalizeHist(V) of HSV can
// works too, the result is almost the same
cv::cvtColor(dst, dst, CV_BGR2YCrCb);
std::vector<cv::Mat> vec_channels;
cv::split(dst, vec_channels);
cv::equalizeHist(vec_channels[0], vec_channels[0]);
cv::merge(vec_channels, dst);
if (_rgb)
cv::cvtColor(dst, dst, CV_YCrCb2RGB);
else
cv::cvtColor(dst, dst, CV_YCrCb2BGR);
}
}
else if (_rgb)
{
if (_bw)
cv::cvtColor(dst, dst, CV_GRAY2RGB);
else
cv::cvtColor(dst, dst, CV_BGR2RGB);
}
}
#ifdef USE_CUDA_CV
/** apply preprocessing to cuda image */
void prepare_cuda(const cv::cuda::GpuMat &src, cv::cuda::GpuMat &dst,
const std::string &img_name) const
{
try
{
if (_scaled)
scale_cuda(src, dst);
else if (_width == 0 || _height == 0)
{
if (_width == 0 && _height == 0)
{
// Do nothing and keep native resolution. May cause issues if
// batched images are different resolutions
dst = src;
}
else
{
// Resize so that the larger dimension is set to whichever
// (width or height) is non-zero, maintaining aspect ratio
// XXX - This may cause issues if batch images are different
// resolutions
size_t currMaxDim = std::max(src.rows, src.cols);
double scale = static_cast<double>(std::max(_width, _height))
/ static_cast<double>(currMaxDim);
cv::cuda::resize(src, dst, cv::Size(), scale, scale,
select_cv_interp(), *_cuda_stream);
}
}
else
{
// Resize normally to the specified width and height
cv::cuda::resize(src, dst, cv::Size(_width, _height), 0, 0,
select_cv_interp(), *_cuda_stream);
}
}
catch (...)
{
throw InputConnectorBadParamException("failed resizing image "
+ img_name);
}
// cropping
if (_crop_width != 0 && _crop_height != 0)
{
int widthBorder = (_width - _crop_width) / 2;
int heightBorder = (_height - _crop_height) / 2;
try
{
// TODO cuda crop with stream
dst = dst(cv::Rect(widthBorder, heightBorder, _crop_width,
_crop_height));
}
catch (...)
{
throw InputConnectorBadParamException("failed cropping image "
+ img_name);
}
}
if (_histogram_equalization)
{
if (_bw)
{
cv::cuda::equalizeHist(dst, dst, *_cuda_stream);
if (_rgb)
cv::cuda::cvtColor(dst, dst, CV_GRAY2RGB, 0, *_cuda_stream);
}
else
{
// We don't apply equalizeHist on each BGR channels to keep
// the color balance of the image. equalizeHist(V) of HSV can
// works too, the result is almost the same
cv::cuda::cvtColor(dst, dst, CV_BGR2YCrCb, 0, *_cuda_stream);
std::vector<cv::cuda::GpuMat> vec_channels;
cv::cuda::split(dst, vec_channels, *_cuda_stream);
cv::cuda::equalizeHist(vec_channels[0], vec_channels[0],
*_cuda_stream);
cv::cuda::merge(vec_channels, dst, *_cuda_stream);
if (_rgb)
cv::cuda::cvtColor(dst, dst, CV_YCrCb2RGB, 0, *_cuda_stream);
else
cv::cuda::cvtColor(dst, dst, CV_YCrCb2BGR, 0, *_cuda_stream);
}
}
else if (_rgb)
{
if (_bw)
cv::cuda::cvtColor(dst, dst, CV_GRAY2RGB, 0, *_cuda_stream);
else
cv::cuda::cvtColor(dst, dst, CV_BGR2RGB, 0, *_cuda_stream);
}
}
#endif
void scale(const cv::Mat &src, cv::Mat &dst) const
{
float coef = std::min(
static_cast<float>(_scale_max) / std::max(src.rows, src.cols),
static_cast<float>(_scale_min) / std::min(src.rows, src.cols));
cv::resize(src, dst, cv::Size(), coef, coef, select_cv_interp());
}
#ifdef USE_CUDA_CV
void scale_cuda(const cv::cuda::GpuMat &src, cv::cuda::GpuMat &dst) const
{
float coef = std::min(
static_cast<float>(_scale_max) / std::max(src.rows, src.cols),
static_cast<float>(_scale_min) / std::min(src.rows, src.cols));
cv::cuda::resize(src, dst, cv::Size(), coef, coef, select_cv_interp(),
*_cuda_stream);
}
#endif
/// Apply preprocessing to image and add it to the list of images
/// img_name: name of the image as displayed in error messages
int add_image(const cv::Mat &img, const std::string &img_name)
{
if (img.empty())
{
_logger->error("empty image {}", img_name);
return -1;
}
_imgs_size.push_back(std::pair<int, int>(img.rows, img.cols));
#ifdef USE_CUDA_CV
if (_cuda)
{
cv::cuda::GpuMat d_src;
d_src.upload(img);
if (_keep_orig)
_cuda_orig_imgs.push_back(d_src);
cv::cuda::GpuMat d_dst;
prepare_cuda(d_src, d_dst, img_name);
_cuda_imgs.push_back(std::move(d_dst));
}
else
#endif
{
if (_keep_orig)
_orig_imgs.push_back(img);
cv::Mat rimg;
prepare(img, rimg, img_name);
_imgs.push_back(std::move(rimg));
}
return 0;
}
#ifdef USE_CUDA_CV
/// add_image but directly from a cv::cuda::GpuMat
int add_image_cuda(const cv::cuda::GpuMat &d_src,
const std::string &img_name)
{
_imgs_size.push_back(std::pair<int, int>(d_src.rows, d_src.cols));
if (_keep_orig)
_cuda_orig_imgs.push_back(d_src);
cv::cuda::GpuMat d_dst;
prepare_cuda(d_src, d_dst, img_name);
_cuda_imgs.push_back(std::move(d_dst));
return 0;
}
#endif
// decode image
void decode(const std::string &str)
{
std::vector<unsigned char> vdat(str.begin(), str.end());
cv::Mat img = cv::Mat(cv::imdecode(
cv::Mat(vdat, false),
_unchanged_data
? CV_LOAD_IMAGE_UNCHANGED
: (_bw ? CV_LOAD_IMAGE_GRAYSCALE : CV_LOAD_IMAGE_COLOR)));
add_image(img, "base64 image");
}
// deserialize image, independent of format
void deserialize(std::stringstream &input)
{
size_t size = 0;
input.seekg(0, input.end);
size = input.tellg();
input.seekg(0, input.beg);
char *data = new char[size];
input.read(data, size);
std::string str(data, data + size);
delete[] data;
decode(str);
}
// data acquisition
int read_file(const std::string &fname, int test_id)
{
(void)test_id;
cv::Mat img
= cv::imread(fname, _unchanged_data ? CV_LOAD_IMAGE_UNCHANGED
: (_bw ? CV_LOAD_IMAGE_GRAYSCALE
: CV_LOAD_IMAGE_COLOR));
return add_image(img, fname);
}
int read_db(const std::string &fname)
{
_db_fname = fname;
return 0;
}
int read_mem(const std::string &content)
{
_in_mem = true;
cv::Mat timg;
_b64 = possibly_base64(content);
if (_b64)
{
std::string ccontent;
Base64::Decode(content, &ccontent);
std::stringstream sstr;
sstr << ccontent;
deserialize(sstr);
}
else
{
decode(content);
}
if (_imgs.at(0).empty())
return -1;
return 0;
}
int read_dir(const std::string &dir, int test_id)
{
(void)test_id;
// list directories in dir
std::unordered_set<std::string> subdirs;
if (fileops::list_directory(dir, false, true, false, subdirs))
throw InputConnectorBadParamException(
"failed reading text subdirectories in data directory " + dir);
_logger->info("imginputfileconn: list subdirs size={}", subdirs.size());
// list files and classes
std::vector<std::pair<std::string, int>> lfiles; // labeled files
std::unordered_map<int, std::string>
hcorresp; // correspondence class number / class name
if (!subdirs.empty())
{
int cl = 0;
auto uit = subdirs.begin();
while (uit != subdirs.end())
{
std::unordered_set<std::string> subdir_files;
if (fileops::list_directory((*uit), true, false, true,
subdir_files))
throw InputConnectorBadParamException(
"failed reading image data sub-directory " + (*uit));
auto fit = subdir_files.begin();
while (fit != subdir_files.end()) // XXX: re-iterating the file
// is not optimal
{
lfiles.push_back(std::pair<std::string, int>((*fit), cl));
++fit;
}
++cl;
++uit;
}
}
else
{
std::unordered_set<std::string> test_files;
fileops::list_directory(dir, true, false, false, test_files);
auto fit = test_files.begin();
while (fit != test_files.end())
{
lfiles.push_back(
std::pair<std::string, int>((*fit), -1)); // -1 for no class
++fit;
}
}
// read images
_imgs.reserve(lfiles.size());
_img_files.reserve(lfiles.size());
_labels.reserve(lfiles.size());
for (std::pair<std::string, int> &p : lfiles)
{
cv::Mat img = cv::imread(
p.first, _unchanged_data ? CV_LOAD_IMAGE_UNCHANGED
: (_bw ? CV_LOAD_IMAGE_GRAYSCALE
: CV_LOAD_IMAGE_COLOR));
add_image(img, p.first);
_img_files.push_back(p.first);
if (p.second >= 0)
_labels.push_back(p.second);
if (_imgs.size() % 1000 == 0)
_logger->info("read {} images", _imgs.size());
}
return 0;
}
int select_cv_interp() const
{
if (_interp == "nearest")
return cv::INTER_NEAREST;
else if (_interp == "linear")
return cv::INTER_LINEAR;
else if (_interp == "area")
return cv::INTER_AREA;
else if (_interp == "lanczos4")
return cv::INTER_LANCZOS4;
else /* if (_interp == "cubic") */
return cv::INTER_CUBIC; // default
}
std::vector<cv::Mat> _imgs;
std::vector<cv::Mat> _orig_imgs;
std::vector<std::string> _img_files;
std::vector<std::pair<int, int>> _imgs_size;
bool _bw = false;
bool _rgb = false;
bool _histogram_equalization = false;
bool _in_mem = false;
bool _unchanged_data = false;
std::vector<int> _labels;
int _width = 224;
int _height = 224;
int _crop_width = 0;
int _crop_height = 0;
float _scale = 1.0;
bool _scaled = false;
int _scale_min = 600;
int _scale_max = 1000;
bool _keep_orig = false;
bool _b64 = false;
std::string _interp = "cubic";
#ifdef USE_CUDA_CV
bool _cuda = false;
std::vector<cv::cuda::GpuMat> _cuda_imgs;
std::vector<cv::cuda::GpuMat> _cuda_orig_imgs;
cv::cuda::Stream *_cuda_stream = nullptr;
#endif
std::string _db_fname;
std::shared_ptr<spdlog::logger> _logger;
};
class ImgInputFileConn : public InputConnectorStrategy
{
public:
ImgInputFileConn() : InputConnectorStrategy()
{
}
ImgInputFileConn(const ImgInputFileConn &i)
: InputConnectorStrategy(i), _width(i._width), _height(i._height),
_crop_width(i._crop_width), _crop_height(i._crop_height), _bw(i._bw),
_rgb(i._rgb), _unchanged_data(i._unchanged_data),
_test_split(i._test_split), _mean(i._mean),
_has_mean_scalar(i._has_mean_scalar), _scale(i._scale),
_scaled(i._scaled), _scale_min(i._scale_min),
_scale_max(i._scale_max), _keep_orig(i._keep_orig),
_interp(i._interp)
#ifdef USE_CUDA_CV
,
_cuda(i._cuda)
#endif
{
}
~ImgInputFileConn()
{
}
void init(const APIData &ad)
{
fillup_parameters(ad);
}
void fillup_parameters(const APIData &ad)
{
auto params = ad.createSharedDTO<dd::DTO::InputConnector>();
fillup_parameters(params);
}
void fillup_parameters(oatpp::Object<DTO::InputConnector> params)
{
// optional parameters.
if (params->width)
_width = params->width;
if (params->height)
_height = params->height;
if (params->crop_width)
{
if (params->crop_width > _width)
{
_logger->error("Crop width must be less than or equal to width");
throw InputConnectorBadParamException(
"Crop width must be less than or equal to width");
}
_width = params->crop_width;
}
if (params->crop_height)
{
if (params->crop_height > _height)
{
_logger->error(
"Crop height must be less than or equal to height");
throw InputConnectorBadParamException(
"Crop height must be less than or equal to height");
}
_height = params->crop_height;
}
if (params->bw != nullptr)
_bw = params->bw;
if (params->rgb != nullptr)
_rgb = params->rgb;
if (params->histogram_equalization != nullptr)
_histogram_equalization = params->histogram_equalization;
if (params->unchanged_data != nullptr)
_unchanged_data = params->unchanged_data;
if (params->shuffle != nullptr)
_shuffle = params->shuffle;
if (params->seed)
_seed = params->seed;
if (params->test_split)
_test_split = params->test_split;
if (params->mean)
{
// NOTE(sileht): if we have two much of this we can create
// an oat++ type that directly handle std::vector<float> instead
// of using the oatpp::Vector<oatpp::Float32>
_mean = std::vector<float>();
for (auto &v : *params->mean)
_mean.push_back(v);
_has_mean_scalar = true;
}
if (params->std)
{
_std = std::vector<float>();
for (auto &v : *params->std)
_std.push_back(v);
}
// Variable size
_scaled |= params->scaled;
if (params->scale)
try
{
_scale = params->scale.retrieve<oatpp::Float64>();
}
catch (const std::runtime_error &error)
{
std::string msg
= "could not read double value for scale input parameter";
_logger->error(msg);
throw InputConnectorBadParamException(msg);
}
if (params->scale_min)
{
_scaled = true;
_scale_min = params->scale_min;
}
if (params->scale_max)
{
_scaled = true;
_scale_max = params->scale_max;
}
// whether to keep original image (for chained ops, e.g. cropping)
_keep_orig |= params->keep_orig;
// image interpolation method
if (params->interp)
_interp = params->interp->std_str();
// timeout
this->set_timeout(params);
#ifdef USE_CUDA_CV
// image resizing on GPU
_cuda |= params->cuda;
#endif
}
void copy_parameters_to(DDImg &dimg) const
{
dimg._bw = _bw;
dimg._rgb = _rgb;
dimg._histogram_equalization = _histogram_equalization;
dimg._unchanged_data = _unchanged_data;
dimg._width = _width;
dimg._height = _height;
dimg._crop_width = _crop_width;
dimg._crop_height = _crop_height;
dimg._scale = _scale;
dimg._scaled = _scaled;
dimg._scale_min = _scale_min;
dimg._scale_max = _scale_max;
dimg._keep_orig = _keep_orig;
dimg._interp = _interp;
#ifdef USE_CUDA_CV
dimg._cuda = _cuda;
dimg._cuda_stream = _cuda_stream;
#endif
dimg._logger = _logger;
}
int feature_size() const
{
if (_bw || _unchanged_data)
{
// XXX: only valid for single channels
if (_crop_width != 0 && _crop_height != 0)
return _crop_width * _crop_height;
else
return _width * _height;
}
else
{
// RGB
if (_crop_width != 0 && _crop_height != 0)
return _crop_width * _crop_height * 3;
else
return _width * _height * 3;
}
}
int batch_size() const
{
return _images.size();
}
int test_batch_size() const
{
return _test_images.size();
}
void get_data(oatpp::Object<DTO::ServicePredict> pred_in)
{
if (!pred_in->_data_raw_img.empty()
#ifdef USE_CUDA_CV
|| !pred_in->_data_raw_img_cuda.empty()
#endif
)
{
_ids = pred_in->_ids;
_meta_uris = pred_in->_meta_uris;
_index_uris = pred_in->_index_uris;
std::vector<std::string> uris;
DataEl<DDImg> dimg(this->_input_timeout);
copy_parameters_to(dimg._ctype);
int i = 0;
// preprocess
#ifdef USE_CUDA_CV
for (auto cuda_img : pred_in->_data_raw_img_cuda)
{
if (!_ids.empty())
uris.push_back(_ids.at(i));
else
{
_ids.push_back(std::to_string(i));
uris.push_back(_ids.back());
}
dimg._ctype.add_image_cuda(cuda_img, _ids.back());
++i;
}
#endif
for (auto img : pred_in->_data_raw_img)
{
if (!_ids.empty())
uris.push_back(_ids.at(i));
else
{
_ids.push_back(std::to_string(i));
uris.push_back(_ids.back());
}
dimg._ctype.add_image(img, _ids.back());
++i;
}
// add preprocessed images
#ifdef USE_CUDA_CV
if (_cuda)
{
if (_keep_orig)
_cuda_orig_images.insert(_cuda_orig_images.end(),
dimg._ctype._cuda_orig_imgs.begin(),
dimg._ctype._cuda_orig_imgs.end());
_cuda_images.insert(_cuda_images.end(),
dimg._ctype._cuda_imgs.begin(),
dimg._ctype._cuda_imgs.end());
}
else
#endif
{
if (_keep_orig)
_orig_images = dimg._ctype._orig_imgs;
_images = dimg._ctype._imgs;
}
_images_size.insert(_images_size.end(),
dimg._ctype._imgs_size.begin(),
dimg._ctype._imgs_size.end());
if (!uris.empty())
_uris = uris;
}
else
InputConnectorStrategy::get_data(pred_in);
}
void get_data(const APIData &ad)
{
// check for raw cv::Mat
if (ad.has("data_raw_img"))
{
if (ad.has("ids"))
_ids = ad.get("ids").get<std::vector<std::string>>();
if (ad.has("meta_uris"))
_meta_uris = ad.get("meta_uris").get<std::vector<std::string>>();
if (ad.has("index_uris"))
_index_uris = ad.get("index_uris").get<std::vector<std::string>>();
_images = ad.get("data_raw_img").get<std::vector<cv::Mat>>();
std::vector<cv::Mat> rimgs;
std::vector<std::string> uris;
int i = 0;
for (auto img : _images)
{
cv::Mat rimg;
resize(img, rimg, cv::Size(_width, _height), 0, 0);
if (_bw && rimg.channels() > 1)
{
cv::Mat bwimg;
cv::cvtColor(rimg, bwimg, CV_BGR2GRAY);
rimg = bwimg;
}
_images_size.push_back(std::pair<int, int>(img.rows, img.cols));
if (_keep_orig)
_orig_images.push_back(std::move(img));
if (!_ids.empty())
uris.push_back(_ids.at(i));
else
{
_ids.push_back(std::to_string(i));
uris.push_back(_ids.back());
}
rimgs.push_back(std::move(rimg));
++i;
}
_images = rimgs;
if (!uris.empty())
_uris = uris;
}
else
InputConnectorStrategy::get_data(ad);
}
void transform(const APIData &ad)
{
if (ad.has(
"parameters")) // hotplug of parameters, overriding the defaults
{
APIData ad_param = ad.getobj("parameters");
if (ad_param.has("input"))
{
fillup_parameters(ad_param.getobj("input"));
}
}
get_data(ad);
transform(nullptr);
}
void transform(oatpp::Object<DTO::ServicePredict> input_dto)
{
if (input_dto != nullptr) // [temporary] == nullptr if called from
// transform(APIData)
{
fillup_parameters(input_dto->parameters->input);
get_data(input_dto);
}
if (!_images.empty() // got ready raw images
#ifdef USE_CUDA_CV
|| !_cuda_images.empty() // got ready cuda images
#endif
)
{
return;
}
int catch_read = 0;
std::string catch_msg;
std::vector<std::string> uris;
std::vector<std::string> meta_uris;
std::vector<std::string> index_uris;
std::vector<std::string> failed_uris;
#pragma omp parallel for
for (size_t i = 0; i < _uris.size(); i++)
{
bool no_img = false;
std::string u = _uris.at(i);
DataEl<DDImg> dimg(this->_input_timeout);
copy_parameters_to(dimg._ctype);
try
{
if (dimg.read_element(u, this->_logger))
{
_logger->error("no data for image {}", u);
no_img = true;
}
if (!dimg._ctype._db_fname.empty())
_db_fname = dimg._ctype._db_fname;
}
catch (std::exception &e)
{
#pragma omp critical
{
++catch_read;
catch_msg = e.what();
failed_uris.push_back(u);
no_img = true;
}
}
if (no_img)
continue;
if (!_db_fname.empty())
continue;
#pragma omp critical
{
#ifdef USE_CUDA_CV
if (_cuda)
{
_cuda_images.insert(
_cuda_images.end(),
std::make_move_iterator(dimg._ctype._cuda_imgs.begin()),
std::make_move_iterator(dimg._ctype._cuda_imgs.end()));
_cuda_orig_images.insert(
_cuda_orig_images.end(),
std::make_move_iterator(
dimg._ctype._cuda_orig_imgs.begin()),
std::make_move_iterator(
dimg._ctype._cuda_orig_imgs.end()));
}
else
#endif
{
_images.insert(
_images.end(),
std::make_move_iterator(dimg._ctype._imgs.begin()),
std::make_move_iterator(dimg._ctype._imgs.end()));
if (_keep_orig)
_orig_images.insert(
_orig_images.end(),
std::make_move_iterator(dimg._ctype._orig_imgs.begin()),
std::make_move_iterator(dimg._ctype._orig_imgs.end()));
}
_images_size.insert(
_images_size.end(),
std::make_move_iterator(dimg._ctype._imgs_size.begin()),
std::make_move_iterator(dimg._ctype._imgs_size.end()));
if (!dimg._ctype._labels.empty())
_test_labels.insert(
_test_labels.end(),
std::make_move_iterator(dimg._ctype._labels.begin()),
std::make_move_iterator(dimg._ctype._labels.end()));
if (!_ids.empty())
uris.push_back(_ids.at(i));
else if (!dimg._ctype._b64 && dimg._ctype._imgs.size() == 1)
uris.push_back(u);
else if (!dimg._ctype._img_files.empty())
uris.insert(
uris.end(),
std::make_move_iterator(dimg._ctype._img_files.begin()),
std::make_move_iterator(dimg._ctype._img_files.end()));
else
uris.push_back(std::to_string(i));
if (!_meta_uris.empty())
meta_uris.push_back(_meta_uris.at(i));
if (!_index_uris.empty())
index_uris.push_back(_index_uris.at(i));
}
}
if (catch_read)
{
for (auto s : failed_uris)
_logger->error("failed reading image {}", s);
throw InputConnectorBadParamException(catch_msg);
}
_uris = uris;
_ids = _uris; // since uris may be in different order than before
// transform
_meta_uris = meta_uris;
_index_uris = index_uris;
if (!_db_fname.empty())
return; // db filename is passed to backend
// shuffle before possible split
if (_shuffle)
{
std::mt19937 g;
if (_seed >= 0)
g = std::mt19937(_seed);
else
{
std::random_device rd;
g = std::mt19937(rd());
}
std::shuffle(_images.begin(), _images.end(),
g); // XXX beware: labels are not shuffled, i.e. let's
// not shuffle while testing
}
// split as required
if (_test_split > 0)
{
int split_size = std::floor(_images.size() * (1.0 - _test_split));
auto chit = _images.begin();
auto dchit = chit;
int cpos = 0;
while (chit != _images.end())
{
if (cpos == split_size)
{
if (dchit == _images.begin())
dchit = chit;
_test_images.push_back((*chit));
}
else
++cpos;
++chit;
}
_images.erase(dchit, _images.end());
_logger->info("data split test size={} / remaining data size={}",
_test_images.size(), _images.size());
}
if (_images.empty()
#ifdef USE_CUDA_CV
&& _cuda_images.empty()
#endif
)
throw InputConnectorBadParamException("no image could be found");
}
static std::vector<double>
img_resize_vector(const std::vector<double> &vals, const int height_net,
const int width_net, const int height_dest,
const int width_dest, bool resize_nn)
{
cv::Mat segimg = cv::Mat(height_net, width_net, CV_64FC1);
std::memcpy(segimg.data, vals.data(), vals.size() * sizeof(double));
cv::Mat segimg_res;
if (resize_nn)
cv::resize(segimg, segimg_res, cv::Size(width_dest, height_dest), 0, 0,
cv::INTER_NEAREST);
else
cv::resize(segimg, segimg_res, cv::Size(width_dest, height_dest), 0, 0,
cv::INTER_LINEAR);
return std::vector<double>((double *)segimg_res.data,
(double *)segimg_res.data
+ segimg_res.rows * segimg_res.cols);
}
// data
std::vector<cv::Mat> _images;
std::vector<cv::Mat> _orig_images; /**< stored upon request. */
std::vector<cv::Mat> _test_images;
std::vector<int> _test_labels;
std::vector<std::pair<int, int>> _images_size;
#ifdef USE_CUDA_CV
std::vector<cv::cuda::GpuMat>
_cuda_images; /**< cuda images for full-GPU processing. */
std::vector<cv::cuda::GpuMat>
_cuda_orig_images; /**< original images stored on GPU */
#endif
// image parameters
int _width = 224;
int _height = 224;
int _crop_width = 0;
int _crop_height = 0;
bool _bw = false; /**< whether to convert to black & white. */
bool _rgb = false; /**< whether to convert to rgb. */
bool _histogram_equalization
= false; /**< whether to apply histogram equalizer. */
bool _unchanged_data = false; /**< IMREAD_UNCHANGED flag. */
double _test_split = 0.0; /**< auto-split of the dataset. */
int _seed = -1; /**< shuffling seed. */
std::vector<float>
_mean; /**< mean image pixels, to be subtracted from images. */
std::vector<float> _std; /**< std, to divide image values. */
bool _has_mean_scalar = false; /**< whether scalar is set. */
std::string _db_fname;
double _scale = 1.0;
bool _scaled = false;
int _scale_min = 600;
int _scale_max = 1000;
bool _keep_orig = false;
std::string _interp = "cubic";
#ifdef USE_CUDA_CV
bool _cuda = false;
cv::cuda::Stream *_cuda_stream = &cv::cuda::Stream::Null();
#endif
};
}
#ifdef USE_CAFFE
#include "caffeinputconns.h"
#endif
#ifdef USE_TF
#include "backends/tf/tfinputconns.h"
#endif
#ifdef USE_DLIB
#include "backends/dlib/dlibinputconns.h"
#endif
#ifdef USE_NCNN
#include "backends/ncnn/ncnninputconns.h"
#endif
#ifdef USE_CAFFE2
#include "backends/caffe2/caffe2inputconns.h"
#endif
#ifdef USE_TENSORRT
#include "backends/tensorrt/tensorrtinputconns.h"
#endif
#ifdef USE_TORCH
#include "backends/torch/torchinputconns.h"
#endif
#endif
|
SpatialConvolutionLocal.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/SpatialConvolutionLocal.c"
#else
static inline void THNN_(SpatialConvolutionLocal_shapeCheck)(
THTensor *input, THTensor *gradOutput,
THTensor *weight, THTensor *bias,
int kH, int kW, int dH,
int dW, int padH, int padW,
int64_t inputHeight, int64_t inputWidth,
int64_t outputHeight, int64_t outputWidth) {
THArgCheck(kW > 0 && kH > 0, 9,
"kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW);
THArgCheck(dW > 0 && dH > 0, 11,
"stride should be greater than zero, but got dH: %d dW: %d", dH, dW);
int ndim = input->nDimension;
int dimf = 0;
int dimh = 1;
int dimw = 2;
if (ndim == 4) {
dimf++;
dimh++;
dimw++;
}
THNN_ARGCHECK(ndim == 3 || ndim == 4, 2, input,
"3D or 4D input tensor expected but got: %s");
int64_t nInputPlane = weight->size[2] / (kH * kW);
int64_t nOutputPlane = weight->size[1];
if (bias != NULL) {
THNN_CHECK_DIM_SIZE(bias, 3, 0, nOutputPlane);
THNN_CHECK_DIM_SIZE(bias, 3, 1, outputHeight);
THNN_CHECK_DIM_SIZE(bias, 3, 2, outputWidth);
}
THNN_CHECK_DIM_SIZE(input, ndim, dimf, nInputPlane);
if (gradOutput != NULL) {
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, outputWidth);
}
}
static THTensor* THNN_(view_weight_local)(THTensor *_weight)
{
THTensor *weight = THTensor_(newContiguous)(_weight);
THArgCheck(weight->nDimension == 3 || weight->nDimension == 6, 4,
"weight tensor should be 3D or 6D - got %dD", weight->nDimension);
if (weight->nDimension == 6) {
int64_t s1 = weight->size[0] * weight->size[1];
int64_t s2 = weight->size[2];
int64_t s3 = weight->size[3] * weight->size[4] * weight->size[5];
THTensor *old_weight = weight;
weight = THTensor_(newWithStorage3d)(weight->storage,
weight->storageOffset,
s1, -1, s2, -1, s3, -1);
THTensor_(free)(old_weight);
}
return weight;
}
static void THNN_(SpatialConvolutionLocal_updateOutput_frame)
(
THTensor *input, THTensor *output,
THTensor *weight, THTensor *bias, THTensor *finput,
int kW, int kH, int dW, int dH, int padW, int padH,
int64_t nInputPlane, int64_t inputWidth, int64_t inputHeight,
int64_t nOutputPlane, int64_t outputWidth, int64_t outputHeight)
{
THTensor *output3d, *finput3d;
THNN_(unfolded_copy)(finput, input, kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
outputWidth, outputHeight);
THTensor_(copy)(output, bias);
output3d = THTensor_(newWithStorage3d)
(output->storage, output->storageOffset,
outputHeight * outputWidth, 1,
nOutputPlane, outputHeight * outputWidth,
1, nOutputPlane * outputHeight * outputWidth);
finput3d = THTensor_(newWithStorage3d)
(finput->storage, finput->storageOffset,
outputHeight * outputWidth, 1,
kW * kH * nInputPlane, outputHeight * outputWidth,
1, kW * kH * nInputPlane * outputHeight * outputWidth);
// weight: oH*oW x nOutputPlane x nInputPlane*kH*kW
// finput3d: oH*oW x nInputPlane*kH*kW x 1
THTensor_(baddbmm)(output3d, 1.0, output3d, 1.0, weight, finput3d);
// output3d: oH*oW x nOutputPlane x 1
THTensor_(free)(output3d);
THTensor_(free)(finput3d);
}
void THNN_(SpatialConvolutionLocal_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
THTensor *weight,
THTensor *bias,
THTensor *finput,
THTensor *fgradInput,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int64_t inputWidth, int64_t inputHeight,
int64_t outputWidth, int64_t outputHeight)
{
weight = THNN_(view_weight_local)(weight);
THNN_(SpatialConvolutionLocal_shapeCheck)
(input, NULL, weight, bias, kH, kW, dH, dW, padH, padW,
inputHeight, inputWidth, outputHeight, outputWidth);
input = THTensor_(newContiguous)(input);
int64_t nInputPlane = THTensor_(size)(weight, 2)/ (kW * kH);
int64_t nOutputPlane = THTensor_(size)(weight, 1);
if(input->nDimension == 3)
{
THTensor_(resize2d)(finput, kW*kH*nInputPlane, outputHeight*outputWidth);
THTensor_(resize3d)(output, nOutputPlane, outputHeight, outputWidth);
THNN_(SpatialConvolutionLocal_updateOutput_frame)
(input, output, weight, bias, finput,
kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
nOutputPlane, outputWidth, outputHeight);
}
else
{
int64_t T = input->size[0];
int64_t t;
THTensor_(resize3d)(finput, T, kW*kH*nInputPlane, outputHeight*outputWidth);
THTensor_(resize4d)(output, T, nOutputPlane, outputHeight, outputWidth);
#pragma omp parallel for private(t)
for(t = 0; t < T; t++)
{
THTensor *input_t = THTensor_(newSelect)(input, 0, t);
THTensor *output_t = THTensor_(newSelect)(output, 0, t);
THTensor *finput_t = THTensor_(newSelect)(finput, 0, t);
THNN_(SpatialConvolutionLocal_updateOutput_frame)
(input_t, output_t, weight, bias, finput_t,
kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
nOutputPlane, outputWidth, outputHeight);
THTensor_(free)(input_t);
THTensor_(free)(output_t);
THTensor_(free)(finput_t);
}
}
THTensor_(free)(input);
THTensor_(free)(weight);
}
static void THNN_(SpatialConvolutionLocal_updateGradInput_frame)
(THTensor *gradInput, THTensor *gradOutput,
THTensor *weight, THTensor *fgradInput,
int kW, int kH, int dW, int dH, int padW, int padH,
int64_t nInputPlane, int64_t inputWidth, int64_t inputHeight,
int64_t nOutputPlane, int64_t outputWidth, int64_t outputHeight)
{
THTensor *gradOutput3d, *fgradInput3d;
gradOutput3d = THTensor_(newWithStorage3d)(gradOutput->storage, gradOutput->storageOffset,
outputHeight*outputWidth, 1,
nOutputPlane, outputHeight*outputWidth,
1, nOutputPlane*outputHeight*outputWidth);
fgradInput3d = THTensor_(newWithStorage3d)(fgradInput->storage, fgradInput->storageOffset,
outputHeight*outputWidth, 1,
kW*kH*nInputPlane, outputHeight*outputWidth,
1, kW*kH*nInputPlane*outputHeight*outputWidth);
// weight: oH*oW x nInputPlane*kH*kW x nOutputPlane
// gradOutput3d: oH*oW x nOutputPlane x 1
THTensor_(baddbmm)(fgradInput3d, 0.0, fgradInput3d, 1.0, weight, gradOutput3d);
// fgradInput3d: oH*oW x nInputPlane*kH*kW x 1
THTensor_(free)(gradOutput3d);
THTensor_(free)(fgradInput3d);
THTensor_(zero)(gradInput);
THNN_(unfolded_acc)(fgradInput, gradInput, kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
outputWidth, outputHeight);
}
void THNN_(SpatialConvolutionLocal_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
THTensor *weight,
THTensor *finput,
THTensor *fgradInput,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int64_t inputWidth, int64_t inputHeight,
int64_t outputWidth, int64_t outputHeight)
{
weight = THNN_(view_weight_local)(weight);
THNN_(SpatialConvolutionLocal_shapeCheck)
(input, gradOutput, weight, NULL, kH, kW, dH, dW, padH, padW,
inputHeight, inputWidth, outputHeight, outputWidth);
input = THTensor_(newContiguous)(input);
gradOutput = THTensor_(newContiguous)(gradOutput);
int64_t nInputPlane = THTensor_(size)(weight,2)/(kW*kH);
int64_t nOutputPlane = THTensor_(size)(weight,1);
THTensor_(resizeAs)(gradInput, input);
THTensor_(resizeAs)(fgradInput, finput);
THTensor *tweight = THTensor_(new)();
THTensor_(transpose)(tweight, weight, 1, 2);
if(input->nDimension == 3)
{
THNN_(SpatialConvolutionLocal_updateGradInput_frame)
(gradInput, gradOutput, tweight,
fgradInput, kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
nOutputPlane, outputWidth, outputHeight);
}
else
{
int64_t T = input->size[0];
int64_t t;
#pragma omp parallel for private(t)
for(t = 0; t < T; t++)
{
THTensor *gradInput_t = THTensor_(newSelect)(gradInput, 0, t);
THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t);
THTensor *fgradInput_t = THTensor_(newSelect)(fgradInput, 0, t);
THNN_(SpatialConvolutionLocal_updateGradInput_frame)
(gradInput_t, gradOutput_t, tweight, fgradInput_t,
kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
nOutputPlane, outputWidth, outputHeight);
THTensor_(free)(gradInput_t);
THTensor_(free)(gradOutput_t);
THTensor_(free)(fgradInput_t);
}
}
THTensor_(free)(tweight);
THTensor_(free)(input);
THTensor_(free)(gradOutput);
THTensor_(free)(weight);
}
static void THNN_(SpatialConvolutionLocal_accGradParameters_frame)
(THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias,
THTensor *finput, real scale,
int kW, int kH, int dW, int dH, int padW, int padH,
int64_t nInputPlane, int64_t inputWidth, int64_t inputHeight,
int64_t nOutputPlane, int64_t outputWidth, int64_t outputHeight)
{
THTensor *gradOutput3d, *finput3d;
gradOutput3d = THTensor_(newWithStorage3d)(gradOutput->storage, gradOutput->storageOffset,
outputHeight*outputWidth, 1,
nOutputPlane, outputHeight*outputWidth,
1, nOutputPlane*outputHeight*outputWidth);
finput3d = THTensor_(newWithStorage3d)(finput->storage, finput->storageOffset,
outputHeight*outputWidth, 1,
1, kW*kH*nInputPlane*outputHeight*outputWidth,
kW*kH*nInputPlane, outputHeight*outputWidth);
// gradOutput3d: oH*oW x nOutputPlane x 1
// finput3d: oH*oW x 1 x kW*kH*nInputPlane
THTensor_(baddbmm)(gradWeight, 1.0, gradWeight, scale, gradOutput3d, finput3d);
// gradWeight: oH*oW x nOutputPlane x kW*kH*nInputPlane
THTensor_(cadd)(gradBias, gradBias, scale, gradOutput);
THTensor_(free)(gradOutput3d);
THTensor_(free)(finput3d);
}
void THNN_(SpatialConvolutionLocal_accGradParameters)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradWeight,
THTensor *gradBias,
THTensor *finput,
THTensor *fgradInput,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int64_t inputWidth, int64_t inputHeight,
int64_t outputWidth, int64_t outputHeight,
accreal scale_)
{
THArgCheck(THTensor_(isContiguous)(gradWeight), 4, "gradWeight needs to be contiguous");
THArgCheck(THTensor_(isContiguous)(gradBias), 5, "gradBias needs to be contiguous");
real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
gradWeight = THNN_(view_weight_local)(gradWeight);
THNN_(SpatialConvolutionLocal_shapeCheck)
(input, gradOutput, gradWeight, gradBias, kH, kW, dH, dW, padH, padW,
inputHeight, inputWidth, outputHeight, outputWidth);
input = THTensor_(newContiguous)(input);
gradOutput = THTensor_(newContiguous)(gradOutput);
int64_t nInputPlane = THTensor_(size)(gradWeight,2)/(kW*kH);
int64_t nOutputPlane = THTensor_(size)(gradWeight,1);
if(input->nDimension == 3)
{
THNN_(SpatialConvolutionLocal_accGradParameters_frame)
(gradOutput, gradWeight, gradBias, finput, scale,
kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
nOutputPlane, outputWidth, outputHeight);
}
else
{
int64_t T = input->size[0];
int64_t t;
for(t = 0; t < T; t++)
{
THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t);
THTensor *finput_t = THTensor_(newSelect)(finput, 0, t);
THNN_(SpatialConvolutionLocal_accGradParameters_frame)
(gradOutput_t, gradWeight, gradBias, finput_t, scale,
kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
nOutputPlane, outputWidth, outputHeight);
THTensor_(free)(gradOutput_t);
THTensor_(free)(finput_t);
}
}
THTensor_(free)(input);
THTensor_(free)(gradOutput);
THTensor_(free)(gradWeight);
}
#endif
|
matvec_int.c | //matvec.c
//Multiplies a matrix by a vector
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/timeb.h>
#include <malloc.h>
#define N_RUNS 1000
#define N 1200
// read timer in second
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
//Create a matrix and a vector and fill with random numbers
void init(int **matrix, int *vector) {
for (int i = 0; i<N; i++) {
for (int j = 0; j<N; j++) {
matrix[i][j] = (int)rand()/(int)(RAND_MAX/10.0);
}
vector[i] = (int)rand()/(int)(RAND_MAX/10.0);
}
}
//Our sum function- what it does is pretty straight-forward.
void sum(int **matrix, int *vector, int **dest) {
int s = 0;
for (int i = 0; i<N; i++) {
s = 0;
#pragma omp simd
for (int j = 0; j<N; j++) {
dest[i][j] = matrix[i][j] * vector[j];
}
}
}
// Debug functions
void serial(int **matrix, int *vector, int **dest) {
for (int i = 0; i<N; i++) {
for (int j = 0; j<N; j++) {
dest[i][j] = matrix[i][j] * vector[j];
}
}
}
void print_matrix(int **matrix) {
for (int i = 0; i<8; i++) {
printf("[");
for (int j = 0; j<8; j++) {
printf("%d ", matrix[i][j]);
}
puts("]");
}
puts("");
}
void print_vector(int *vector) {
printf("[");
for (int i = 0; i<8; i++) {
printf("%d ", vector[i]);
}
puts("]");
}
int check(int **A, int **B){
int difference = 0;
for(int i = 0;i<N; i++){
for (int j = 0; j<N; j++)
{ difference += A[i][j]- B[i][j];}
}
return difference;
}
int main(int argc, char **argv) {
//Set everything up
int **dest_matrix = malloc(sizeof(int*)*N);
int **serial_matrix = malloc(sizeof(int*)*N);
int **matrix = malloc(sizeof(int*)*N);
int *vector = malloc(sizeof(int)*N);
for (int i = 0; i<N; i++) {
dest_matrix[i] = malloc(sizeof(int)*N);
serial_matrix[i] = malloc(sizeof(int)*N);
matrix[i] = malloc(sizeof(int)*N);
}
srand(time(NULL));
init(matrix, vector);
double start = read_timer();
for (int i = 0; i<N_RUNS; i++)
sum(matrix, vector, dest_matrix);
double t = (read_timer() - start);
double start_serial = read_timer();
for (int i = 0; i<N_RUNS; i++)
serial(matrix, vector, serial_matrix);
double t_serial = (read_timer() - start_serial);
print_matrix(matrix);
print_vector(vector);
puts("=\n");
print_matrix(dest_matrix);
puts("---------------------------------");
print_matrix(serial_matrix);
double gflops = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t);
double gflops_serial = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t_serial);
printf("==================================================================\n");
printf("Performance:\t\t\tRuntime (s)\t GFLOPS\n");
printf("------------------------------------------------------------------\n");
printf("Matrix-vector (SIMD):\t\t%4f\t%4f\n", t, gflops);
printf("Matrix-vector (Serial):\t\t%4f\t%4f\n", t_serial, gflops_serial);
printf("Correctness check: %d\n", check(dest_matrix,serial_matrix));
free(dest_matrix);
free(serial_matrix);
free(matrix);
free(vector);
return 0;
}
|
single-modificado-master.c | #include <stdio.h>
#include <omp.h>
int main() {
int n = 9, i, a, b[n];
for (i=0; i<n; i++)
b[i] = -1;
#pragma omp parallel
{
#pragma omp single
{
printf("Dentro de la región parallel:\n");
}
#pragma omp single
{
printf("Introduce valor de inicialización a:");
scanf("%d", &a );
printf("Single ejecutada por el thread %d\n", omp_get_thread_num());
}
#pragma omp for
for (i=0; i<n; i++)
b[i] = a;
}
#pragma omp master
{
for (i=0; i<n; i++)
printf("b[%d] = %d\t",i,b[i]);
printf("\n");
printf("Master ejecutada por el thread %d\n", omp_get_thread_num());
}
printf("Después de la región parallel:\n");
printf("Single ejecutada por el thread %d\n", omp_get_thread_num());
for (i=0; i<n; i++)
printf("b[%d] = %d\t",i,b[i]);
printf("\n");
}
|
convolution_3x3_packn_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd64_transform_kernel_packn_fp16sa_rvv(const Mat& kernel, Mat& kernel_tm_packn, int inch, int outch, const Option& opt)
{
const int packn = csrr_vlenb() / 2;
// winograd63 transform kernel
Mat kernel_tm;
kernel_tm.create(8 * 8, inch, outch);
const float ktm[8][3] = {
{1.0f, 0.0f, 0.0f},
{-2.0f / 9, -2.0f / 9, -2.0f / 9},
{-2.0f / 9, 2.0f / 9, -2.0f / 9},
{1.0f / 90, 1.0f / 45, 2.0f / 45},
{1.0f / 90, -1.0f / 45, 2.0f / 45},
{1.0f / 45, 1.0f / 90, 1.0f / 180},
{1.0f / 45, -1.0f / 90, 1.0f / 180},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel, transposed
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[8][3];
for (int i = 0; i < 8; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// v
for (int j = 0; j < 8; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 8; i++)
{
kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 64-inch-outch
// dst = pb-pa-inch/pa-64-outch/pb
kernel_tm_packn.create(inch / packn, 64, outch / packn, (size_t)2u * packn * packn, packn * packn);
for (int q = 0; q + (packn - 1) < outch; q += packn)
{
Mat g0 = kernel_tm_packn.channel(q / packn);
for (int k = 0; k < 64; k++)
{
__fp16* g00 = g0.row<__fp16>(k);
for (int p = 0; p + (packn - 1) < inch; p += packn)
{
for (int i = 0; i < packn; i++)
{
for (int j = 0; j < packn; j++)
{
const float* k00 = kernel_tm.channel(q + j).row(p + i);
g00[0] = (__fp16)k00[k];
g00++;
}
}
}
}
}
}
static void conv3x3s1_winograd64_packn_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt)
{
const int packn = csrr_vlenb() / 2;
const word_type vl = vsetvl_e16m1(packn);
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tiles = outw / 6;
int h_tiles = outh / 6;
const int tiles = w_tiles * h_tiles;
bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
conv3x3s1_winograd64_transform_input_packn_fp16sa_rvv(bottom_blob_bordered, bottom_blob_tm, opt);
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = h_tm / 8 * w_tm / 8;
// permute
// bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, 2u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 64, 2u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 64; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 7 < tiles; i += 8)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 8);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * packn;
for (int q = 0; q < inch; q++)
{
#if C906
for (int l = 0; l < packn; l++)
{
tmpptr[0] = r0[l];
tmpptr[1] = r0[l + packn];
tmpptr[2] = r0[l + packn * 2];
tmpptr[3] = r0[l + packn * 3];
tmpptr[4] = r0[l + packn * 4];
tmpptr[5] = r0[l + packn * 5];
tmpptr[6] = r0[l + packn * 6];
tmpptr[7] = r0[l + packn * 7];
tmpptr += 8;
}
r0 += bottom_blob_tm.cstep * packn;
#else
vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl);
vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl);
vfloat16m1_t _val2 = vle16_v_f16m1(r0 + packn * 2, vl);
vfloat16m1_t _val3 = vle16_v_f16m1(r0 + packn * 3, vl);
vfloat16m1_t _val4 = vle16_v_f16m1(r0 + packn * 4, vl);
vfloat16m1_t _val5 = vle16_v_f16m1(r0 + packn * 5, vl);
vfloat16m1_t _val6 = vle16_v_f16m1(r0 + packn * 6, vl);
vfloat16m1_t _val7 = vle16_v_f16m1(r0 + packn * 7, vl);
vsseg8e16_v_f16m1x8(tmpptr, vcreate_f16m1x8(_val0, _val1, _val2, _val3, _val4, _val5, _val6, _val7), vl);
r0 += bottom_blob_tm.cstep * packn;
tmpptr += packn * 8;
#endif
}
}
for (; i + 3 < tiles; i += 4)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * packn;
for (int q = 0; q < inch; q++)
{
#if C906
for (int l = 0; l < packn; l++)
{
tmpptr[0] = r0[l];
tmpptr[1] = r0[l + packn];
tmpptr[2] = r0[l + packn * 2];
tmpptr[3] = r0[l + packn * 3];
tmpptr += 4;
}
r0 += bottom_blob_tm.cstep * packn;
#else
vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl);
vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl);
vfloat16m1_t _val2 = vle16_v_f16m1(r0 + packn * 2, vl);
vfloat16m1_t _val3 = vle16_v_f16m1(r0 + packn * 3, vl);
vsseg4e16_v_f16m1x4(tmpptr, vcreate_f16m1x4(_val0, _val1, _val2, _val3), vl);
r0 += bottom_blob_tm.cstep * packn;
tmpptr += packn * 4;
#endif
}
}
for (; i + 1 < tiles; i += 2)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * packn;
for (int q = 0; q < inch; q++)
{
#if C906
for (int l = 0; l < packn; l++)
{
tmpptr[0] = r0[l];
tmpptr[1] = r0[l + packn];
tmpptr += 2;
}
r0 += bottom_blob_tm.cstep * packn;
#else
vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl);
vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl);
vsseg2e16_v_f16m1x2(tmpptr, vcreate_f16m1x2(_val0, _val1), vl);
r0 += bottom_blob_tm.cstep * packn;
tmpptr += packn * 2;
#endif
}
}
for (; i < tiles; i++)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * packn;
for (int q = 0; q < inch; q++)
{
vfloat16m1_t _val = vle16_v_f16m1(r0, vl);
vse16_v_f16m1(tmpptr, _val, vl);
r0 += bottom_blob_tm.cstep * packn;
tmpptr += packn;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 64, outch, 2u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
__fp16* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 7 < tiles; i += 8)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch * packn; // inch always > 0
vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum2 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum3 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum4 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum5 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum6 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum7 = vfmv_v_f_f16m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
__fp16 val0 = *r0++;
__fp16 val1 = *r0++;
__fp16 val2 = *r0++;
__fp16 val3 = *r0++;
__fp16 val4 = *r0++;
__fp16 val5 = *r0++;
__fp16 val6 = *r0++;
__fp16 val7 = *r0++;
vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, val2, _w0, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, val3, _w0, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, val4, _w0, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, val5, _w0, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, val6, _w0, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, val7, _w0, vl);
k0 += packn;
}
vse16_v_f16m1(output0_tm, _sum0, vl);
vse16_v_f16m1(output0_tm + packn, _sum1, vl);
vse16_v_f16m1(output0_tm + packn * 2, _sum2, vl);
vse16_v_f16m1(output0_tm + packn * 3, _sum3, vl);
vse16_v_f16m1(output0_tm + packn * 4, _sum4, vl);
vse16_v_f16m1(output0_tm + packn * 5, _sum5, vl);
vse16_v_f16m1(output0_tm + packn * 6, _sum6, vl);
vse16_v_f16m1(output0_tm + packn * 7, _sum7, vl);
output0_tm += packn * 8;
}
for (; i + 3 < tiles; i += 4)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch * packn; // inch always > 0
vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum2 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum3 = vfmv_v_f_f16m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
__fp16 val0 = *r0++;
__fp16 val1 = *r0++;
__fp16 val2 = *r0++;
__fp16 val3 = *r0++;
vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, val2, _w0, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, val3, _w0, vl);
k0 += packn;
}
vse16_v_f16m1(output0_tm, _sum0, vl);
vse16_v_f16m1(output0_tm + packn, _sum1, vl);
vse16_v_f16m1(output0_tm + packn * 2, _sum2, vl);
vse16_v_f16m1(output0_tm + packn * 3, _sum3, vl);
output0_tm += packn * 4;
}
for (; i + 1 < tiles; i += 2)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch * packn; // inch always > 0
vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
__fp16 val0 = *r0++;
__fp16 val1 = *r0++;
vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl);
k0 += packn;
}
vse16_v_f16m1(output0_tm, _sum0, vl);
vse16_v_f16m1(output0_tm + packn, _sum1, vl);
output0_tm += packn * 2;
}
for (; i < tiles; i++)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch * packn; // inch always > 0
vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
__fp16 val = *r0++;
vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl);
_sum = vfmacc_vf_f16m1(_sum, val, _w0, vl);
k0 += packn;
}
vse16_v_f16m1(output0_tm, _sum, vl);
output0_tm += packn;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
conv3x3s1_winograd64_transform_output_packn_fp16sa_rvv(top_blob_tm, top_blob_bordered, bias, opt);
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd42_transform_kernel_packn_fp16sa_rvv(const Mat& kernel, Mat& kernel_tm_packn, int inch, int outch, const Option& opt)
{
const int packn = csrr_vlenb() / 2;
// winograd42 transform kernel
Mat kernel_tm(6 * 6, inch, outch);
const float ktm[6][3] = {
{1.0f / 4, 0.0f, 0.0f},
{-1.0f / 6, -1.0f / 6, -1.0f / 6},
{-1.0f / 6, 1.0f / 6, -1.0f / 6},
{1.0f / 24, 1.0f / 12, 1.0f / 6},
{1.0f / 24, -1.0f / 12, 1.0f / 6},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 36-inch-outch
// dst = pb-pa-inch/pa-36-outch/pb
kernel_tm_packn.create(inch / packn, 36, outch / packn, (size_t)2u * packn * packn, packn * packn);
for (int q = 0; q + (packn - 1) < outch; q += packn)
{
Mat g0 = kernel_tm_packn.channel(q / packn);
for (int k = 0; k < 36; k++)
{
__fp16* g00 = g0.row<__fp16>(k);
for (int p = 0; p + (packn - 1) < inch; p += packn)
{
for (int i = 0; i < packn; i++)
{
for (int j = 0; j < packn; j++)
{
const float* k00 = kernel_tm.channel(q + j).row(p + i);
g00[0] = (__fp16)k00[k];
g00++;
}
}
}
}
}
}
static void conv3x3s1_winograd42_packn_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt)
{
const int packn = csrr_vlenb() / 2;
const word_type vl = vsetvl_e16m1(packn);
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tiles = outw / 4;
int h_tiles = outh / 4;
const int tiles = w_tiles * h_tiles;
bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator);
conv3x3s1_winograd42_transform_input_packn_fp16sa_rvv(bottom_blob_bordered, bottom_blob_tm, opt);
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = h_tm / 6 * w_tm / 6;
// permute
// bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 36; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 7 < tiles; i += 8)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 8);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * packn;
for (int q = 0; q < inch; q++)
{
#if C906
for (int l = 0; l < packn; l++)
{
tmpptr[0] = r0[l];
tmpptr[1] = r0[l + packn];
tmpptr[2] = r0[l + packn * 2];
tmpptr[3] = r0[l + packn * 3];
tmpptr[4] = r0[l + packn * 4];
tmpptr[5] = r0[l + packn * 5];
tmpptr[6] = r0[l + packn * 6];
tmpptr[7] = r0[l + packn * 7];
tmpptr += 8;
}
r0 += bottom_blob_tm.cstep * packn;
#else
vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl);
vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl);
vfloat16m1_t _val2 = vle16_v_f16m1(r0 + packn * 2, vl);
vfloat16m1_t _val3 = vle16_v_f16m1(r0 + packn * 3, vl);
vfloat16m1_t _val4 = vle16_v_f16m1(r0 + packn * 4, vl);
vfloat16m1_t _val5 = vle16_v_f16m1(r0 + packn * 5, vl);
vfloat16m1_t _val6 = vle16_v_f16m1(r0 + packn * 6, vl);
vfloat16m1_t _val7 = vle16_v_f16m1(r0 + packn * 7, vl);
vsseg8e16_v_f16m1x8(tmpptr, vcreate_f16m1x8(_val0, _val1, _val2, _val3, _val4, _val5, _val6, _val7), vl);
r0 += bottom_blob_tm.cstep * packn;
tmpptr += packn * 8;
#endif
}
}
for (; i + 3 < tiles; i += 4)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * packn;
for (int q = 0; q < inch; q++)
{
#if C906
for (int l = 0; l < packn; l++)
{
tmpptr[0] = r0[l];
tmpptr[1] = r0[l + packn];
tmpptr[2] = r0[l + packn * 2];
tmpptr[3] = r0[l + packn * 3];
tmpptr += 4;
}
r0 += bottom_blob_tm.cstep * packn;
#else
vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl);
vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl);
vfloat16m1_t _val2 = vle16_v_f16m1(r0 + packn * 2, vl);
vfloat16m1_t _val3 = vle16_v_f16m1(r0 + packn * 3, vl);
vsseg4e16_v_f16m1x4(tmpptr, vcreate_f16m1x4(_val0, _val1, _val2, _val3), vl);
r0 += bottom_blob_tm.cstep * packn;
tmpptr += packn * 4;
#endif
}
}
for (; i + 1 < tiles; i += 2)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * packn;
for (int q = 0; q < inch; q++)
{
#if C906
for (int l = 0; l < packn; l++)
{
tmpptr[0] = r0[l];
tmpptr[1] = r0[l + packn];
tmpptr += 2;
}
r0 += bottom_blob_tm.cstep * packn;
#else
vfloat16m1_t _val0 = vle16_v_f16m1(r0, vl);
vfloat16m1_t _val1 = vle16_v_f16m1(r0 + packn, vl);
vsseg2e16_v_f16m1x2(tmpptr, vcreate_f16m1x2(_val0, _val1), vl);
r0 += bottom_blob_tm.cstep * packn;
tmpptr += packn * 2;
#endif
}
}
for (; i < tiles; i++)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * packn;
for (int q = 0; q < inch; q++)
{
vfloat16m1_t _val = vle16_v_f16m1(r0, vl);
vse16_v_f16m1(tmpptr, _val, vl);
r0 += bottom_blob_tm.cstep * packn;
tmpptr += packn;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 36, outch, 2u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
__fp16* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int r = 0; r < 36; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 7 < tiles; i += 8)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch * packn; // inch always > 0
vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum2 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum3 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum4 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum5 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum6 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum7 = vfmv_v_f_f16m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
__fp16 val0 = *r0++;
__fp16 val1 = *r0++;
__fp16 val2 = *r0++;
__fp16 val3 = *r0++;
__fp16 val4 = *r0++;
__fp16 val5 = *r0++;
__fp16 val6 = *r0++;
__fp16 val7 = *r0++;
vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, val2, _w0, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, val3, _w0, vl);
_sum4 = vfmacc_vf_f16m1(_sum4, val4, _w0, vl);
_sum5 = vfmacc_vf_f16m1(_sum5, val5, _w0, vl);
_sum6 = vfmacc_vf_f16m1(_sum6, val6, _w0, vl);
_sum7 = vfmacc_vf_f16m1(_sum7, val7, _w0, vl);
k0 += packn;
}
vse16_v_f16m1(output0_tm, _sum0, vl);
vse16_v_f16m1(output0_tm + packn, _sum1, vl);
vse16_v_f16m1(output0_tm + packn * 2, _sum2, vl);
vse16_v_f16m1(output0_tm + packn * 3, _sum3, vl);
vse16_v_f16m1(output0_tm + packn * 4, _sum4, vl);
vse16_v_f16m1(output0_tm + packn * 5, _sum5, vl);
vse16_v_f16m1(output0_tm + packn * 6, _sum6, vl);
vse16_v_f16m1(output0_tm + packn * 7, _sum7, vl);
output0_tm += packn * 8;
}
for (; i + 3 < tiles; i += 4)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch * packn; // inch always > 0
vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum2 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum3 = vfmv_v_f_f16m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
__fp16 val0 = *r0++;
__fp16 val1 = *r0++;
__fp16 val2 = *r0++;
__fp16 val3 = *r0++;
vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl);
_sum2 = vfmacc_vf_f16m1(_sum2, val2, _w0, vl);
_sum3 = vfmacc_vf_f16m1(_sum3, val3, _w0, vl);
k0 += packn;
}
vse16_v_f16m1(output0_tm, _sum0, vl);
vse16_v_f16m1(output0_tm + packn, _sum1, vl);
vse16_v_f16m1(output0_tm + packn * 2, _sum2, vl);
vse16_v_f16m1(output0_tm + packn * 3, _sum3, vl);
output0_tm += packn * 4;
}
for (; i + 1 < tiles; i += 2)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch * packn; // inch always > 0
vfloat16m1_t _sum0 = vfmv_v_f_f16m1(0.f, vl);
vfloat16m1_t _sum1 = vfmv_v_f_f16m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
__fp16 val0 = *r0++;
__fp16 val1 = *r0++;
vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl);
_sum0 = vfmacc_vf_f16m1(_sum0, val0, _w0, vl);
_sum1 = vfmacc_vf_f16m1(_sum1, val1, _w0, vl);
k0 += packn;
}
vse16_v_f16m1(output0_tm, _sum0, vl);
vse16_v_f16m1(output0_tm + packn, _sum1, vl);
output0_tm += packn * 2;
}
for (; i < tiles; i++)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch * packn; // inch always > 0
vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl);
for (int j = 0; j < nn; j++)
{
__fp16 val = *r0++;
vfloat16m1_t _w0 = vle16_v_f16m1(k0, vl);
_sum = vfmacc_vf_f16m1(_sum, val, _w0, vl);
k0 += packn;
}
vse16_v_f16m1(output0_tm, _sum, vl);
output0_tm += packn;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
conv3x3s1_winograd42_transform_output_packn_fp16sa_rvv(top_blob_tm, top_blob_bordered, bias, opt);
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
|
GB_unop__isinf_bool_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__isinf_bool_fc64)
// op(A') function: GB (_unop_tran__isinf_bool_fc64)
// C type: bool
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = (aij)
// unaryop: cij = GB_cisinf (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_cisinf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = (aij) ; \
Cx [pC] = GB_cisinf (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISINF || GxB_NO_BOOL || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__isinf_bool_fc64)
(
bool *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = (aij) ;
Cx [p] = GB_cisinf (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = (aij) ;
Cx [p] = GB_cisinf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__isinf_bool_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
thdat95.c | /*
* Redistribution and use in source and binary forms, with
* or without modification, are permitted provided that the
* following conditions are met:
*
* 1. Redistributions of source code must retain this list
* of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce this
* list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
#include <config.h>
#include <stdlib.h>
#include <thtk/thtk.h>
#include "thcrypt.h"
#include "thdat.h"
#include "thlzss.h"
#include "util.h"
#include "dattypes.h"
static unsigned int
th95_get_crypt_param_index(
const char *name)
{
char index = 0;
while (*name) index += *name++;
return index & 7;
}
static int
th95_open(
thdat_t* thdat,
thtk_error_t** error)
{
th95_archive_header_t header;
if (thtk_io_read(thdat->stream, &header, sizeof(header), error) == -1)
return 0;
th_decrypt((unsigned char*)&header, sizeof(header), 0x1b, 0x37,
sizeof(header), sizeof(header));
if (strncmp((const char*)header.magic, "THA1", 4) != 0) {
thtk_error_new(error, "wrong magic for archive");
return 0;
}
header.size -= 123456789;
header.zsize -= 987654321;
header.entry_count -= 135792468;
if (thtk_io_seek(thdat->stream, -(off_t)header.zsize, SEEK_END, error) == -1)
return 0;
unsigned char* zdata = malloc(header.zsize);
if (thtk_io_read(thdat->stream, zdata, header.zsize, error) != header.zsize) {
free(zdata);
return 0;
}
th_decrypt(zdata, header.zsize, 0x3e, 0x9b, 0x80, header.zsize);
thtk_io_t* zdata_stream = thtk_io_open_memory(zdata, header.zsize, error);
if (!zdata_stream)
return 0;
thtk_io_t* data_stream = thtk_io_open_growing_memory(error);
if (!data_stream)
return 0;
if (th_unlzss(zdata_stream, data_stream, header.size, error) == -1)
return 0;
thtk_io_close(zdata_stream);
unsigned char* data = malloc(header.size);
if (thtk_io_seek(data_stream, 0, SEEK_SET, error) == -1)
return 0;
if (thtk_io_read(data_stream, data, header.size, error) != header.size)
return 0;
thtk_io_close(data_stream);
thdat->entry_count = header.entry_count;
thdat->entries = calloc(header.entry_count, sizeof(thdat_entry_t));
if (header.entry_count) {
thdat_entry_t* prev = NULL;
const uint32_t* ptr = (uint32_t*)data;
for (uint32_t i = 0; i < header.entry_count; ++i) {
thdat_entry_t* entry = &thdat->entries[i];
thdat_entry_init(entry);
strcpy(entry->name, (char*)ptr);
ptr = (uint32_t*)((char*)ptr + strlen(entry->name) + (4 - strlen(entry->name) % 4));
entry->offset = *ptr++;
entry->size = *ptr++;
/* Zero. */
entry->extra = *ptr++;
if (prev)
prev->zsize = entry->offset - prev->offset;
prev = entry;
}
off_t filesize = thtk_io_seek(thdat->stream, 0, SEEK_END, error);
if (filesize == -1)
return 0;
prev->zsize = (filesize - header.zsize) - prev->offset;
}
free(data);
return 1;
}
static void
th95_decrypt_data(
thdat_t* archive,
thdat_entry_t* entry,
unsigned char* data)
{
const unsigned int i = th95_get_crypt_param_index(entry->name);
const crypt_params_t* crypt_params;
if (archive->version == 95 || archive->version == 10 || archive->version == 103 || archive->version == 11) {
crypt_params = th95_crypt_params;
} else if (archive->version == 12 || archive->version == 125 || archive->version == 128) {
crypt_params = th12_crypt_params;
} else if (archive->version == 13) {
crypt_params = th13_crypt_params;
} else {
crypt_params = th14_crypt_params;
}
th_decrypt(data, entry->zsize, crypt_params[i].key, crypt_params[i].step,
crypt_params[i].block, crypt_params[i].limit);
}
static ssize_t
th95_read(
thdat_t* thdat,
int entry_index,
thtk_io_t* output,
thtk_error_t** error)
{
thdat_entry_t* entry = &thdat->entries[entry_index];
unsigned char* data;
unsigned char* zdata = malloc(entry->zsize);
int failed = 0;
#pragma omp critical
{
failed = (thtk_io_seek(thdat->stream, entry->offset, SEEK_SET, error) == -1) ||
(thtk_io_read(thdat->stream, zdata, entry->zsize, error) != entry->zsize);
}
if (failed)
return -1;
th95_decrypt_data(thdat, entry, zdata);
if (entry->zsize == entry->size) {
data = zdata;
} else {
thtk_io_t* zdata_stream = thtk_io_open_memory(zdata, entry->zsize, error);
if (!zdata_stream)
return -1;
thtk_io_t* data_stream = thtk_io_open_growing_memory(error);
if (!data_stream)
return -1;
if (th_unlzss(zdata_stream, data_stream, entry->size, error) == -1)
return -1;
thtk_io_close(zdata_stream);
if (thtk_io_seek(data_stream, 0, SEEK_SET, error) == -1)
return -1;
data = malloc(entry->size);
if (thtk_io_read(data_stream, data, entry->size, error) != entry->size)
return -1;
thtk_io_close(data_stream);
}
if (thtk_io_write(output, data, entry->size, error) == -1)
return -1;
free(data);
return 1;
}
static int
th95_create(
thdat_t* thdat,
thtk_error_t** error)
{
thdat->offset = 16;
if (thtk_io_seek(thdat->stream, thdat->offset, SEEK_SET, error) == -1)
return 0;
return 1;
}
static void
th95_encrypt_data(
thdat_t* archive,
thdat_entry_t* entry,
unsigned char* data)
{
const unsigned int i = th95_get_crypt_param_index(entry->name);
const crypt_params_t* crypt_params;
if (archive->version == 95 ||
archive->version == 10 ||
archive->version == 103 ||
archive->version == 11) {
crypt_params = th95_crypt_params;
} else if (archive->version == 12 ||
archive->version == 125 ||
archive->version == 128) {
crypt_params = th12_crypt_params;
} else if (archive->version == 13) {
crypt_params = th13_crypt_params;
} else {
crypt_params = th14_crypt_params;
}
th_encrypt(data, entry->zsize, crypt_params[i].key, crypt_params[i].step,
crypt_params[i].block, crypt_params[i].limit);
}
static ssize_t
th95_write(
thdat_t* thdat,
int entry_index,
thtk_io_t* input,
size_t input_length,
thtk_error_t** error)
{
thdat_entry_t* entry = &thdat->entries[entry_index];
unsigned char* data;
off_t first_offset = thtk_io_seek(input, 0, SEEK_CUR, error);
if (first_offset == -1)
return -1;
entry->size = input_length;
thtk_io_t* data_stream = thtk_io_open_growing_memory(error);
if (!data_stream)
return -1;
if ((entry->zsize = th_lzss(input, entry->size, data_stream, error)) == -1)
return -1;
if (entry->zsize >= entry->size) {
thtk_io_close(data_stream);
if (thtk_io_seek(input, first_offset, SEEK_SET, error) == -1)
return -1;
data = malloc(entry->size);
if (thtk_io_read(input, data, entry->size, error) != entry->size)
return -1;
entry->zsize = entry->size;
} else {
data = malloc(entry->zsize);
if (thtk_io_seek(data_stream, 0, SEEK_SET, error) == -1)
return -1;
int ret = thtk_io_read(data_stream, data, entry->zsize, error);
if (ret != entry->zsize)
return -1;
thtk_io_close(data_stream);
}
th95_encrypt_data(thdat, entry, data);
int failed = 0;
#pragma omp critical
{
failed = (thtk_io_write(thdat->stream, data, entry->zsize, error) != entry->zsize);
if (!failed) {
entry->offset = thdat->offset;
thdat->offset += entry->zsize;
}
}
free(data);
if (failed)
return -1;
return entry->zsize;
}
static int
th95_close(
thdat_t* thdat,
thtk_error_t** error)
{
unsigned char* buffer;
unsigned int i;
unsigned char* zbuffer;
uint32_t header[4];
ssize_t list_size = 0;
ssize_t list_zsize = 0;
for (i = 0; i < thdat->entry_count; ++i) {
const size_t namelen = strlen(thdat->entries[i].name);
list_size += (sizeof(uint32_t) * 3) + namelen + (4 - namelen % 4);
}
if (list_size == 0) {
thtk_error_new(error, "no entries");
return 0;
}
buffer = malloc(list_size);
uint32_t* buffer_ptr = (uint32_t*)buffer;
for (i = 0; i < thdat->entry_count; ++i) {
const thdat_entry_t* entry = &thdat->entries[i];
const size_t namelen = strlen(entry->name);
buffer_ptr = mempcpy(buffer_ptr, entry->name,
namelen + (4 - namelen % 4));
*buffer_ptr++ = entry->offset;
*buffer_ptr++ = entry->size;
*buffer_ptr++ = 0;
}
thtk_io_t* buffer_stream = thtk_io_open_memory(buffer, list_size, error);
if (!buffer_stream)
return 0;
thtk_io_t* zbuffer_stream = thtk_io_open_growing_memory(error);
if (!zbuffer_stream)
return 0;
if ((list_zsize = th_lzss(buffer_stream, list_size, zbuffer_stream, error)) == -1)
return 0;
thtk_io_close(buffer_stream);
zbuffer = malloc(list_zsize);
if (thtk_io_seek(zbuffer_stream, 0, SEEK_SET, error) == -1)
return 0;
if (thtk_io_read(zbuffer_stream, zbuffer, list_zsize, error) == -1)
return 0;
thtk_io_close(zbuffer_stream);
th_encrypt(zbuffer, list_zsize, 0x3e, 0x9b, 0x80, list_size);
if (thtk_io_write(thdat->stream, zbuffer, list_zsize, error) == -1) {
free(zbuffer);
return 0;
}
free(zbuffer);
if (thtk_io_seek(thdat->stream, 0, SEEK_SET, error) == -1)
return 0;
memcpy(&header[0], "THA1", 4);
header[1] = list_size + 123456789;
header[2] = list_zsize + 987654321;
header[3] = thdat->entry_count + 135792468;
th_encrypt((unsigned char*)&header, sizeof(header), 0x1b, 0x37,
sizeof(header), sizeof(header));
if (thtk_io_write(thdat->stream, &header, sizeof(header), error) == -1)
return 0;
return 1;
}
const thdat_module_t archive_th95 = {
THDAT_BASENAME,
th95_open,
th95_create,
th95_close,
th95_read,
th95_write
};
|
cpu_rnnt.h | #pragma once
#include <tuple>
#include <cmath>
#include <cstring>
#include <limits>
#include <algorithm>
#include <numeric>
#include <chrono>
#if !defined(RNNT_DISABLE_OMP) && !defined(APPLE)
#include <omp.h>
#endif
#include "rnnt_helper.h"
template<typename ProbT>
class CpuRNNT {
public:
// Noncopyable
CpuRNNT(int minibatch, int alphabet_size, void* workspace,
int blank, int num_threads) :
minibatch_(minibatch), alphabet_size_(alphabet_size),
workspace_(workspace), blank_(blank), num_threads_(num_threads) {
#if defined(RNNT_DISABLE_OMP) || defined(APPLE)
#else
if (num_threads > 0) {
omp_set_num_threads(num_threads);
} else {
num_threads_ = omp_get_max_threads();
}
#endif
};
CpuRNNT(const CpuRNNT&) = delete;
CpuRNNT& operator=(const CpuRNNT&) = delete;
rnntStatus_t cost_and_grad(const ProbT* const acts,
ProbT* grads,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths);
rnntStatus_t score_forward(const ProbT* const acts,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths);
private:
class CpuRNNT_index {
public:
CpuRNNT_index(int U, int alphabet_size);
int U;
int alphabet_size;
int operator()(int t, int u);
int operator()(int t, int u, int v);
};
class CpuRNNT_metadata {
public:
CpuRNNT_metadata(int T, int U, void* workspace, size_t bytes_used, int alphabet_size, const ProbT* const acts, CpuRNNT_index& idx);
ProbT* denom;
ProbT* alphas;
ProbT* betas;
private:
void setup_log_softmax_denom(const ProbT* const acts, int T, int U, int alphabet_size, CpuRNNT_index& idx);
};
int minibatch_;
int alphabet_size_; // Number of characters plus blank
void* workspace_;
int blank_;
int num_threads_;
ProbT cost_and_grad_kernel(const ProbT* const acts, ProbT* grad,
const int* const labels, int mb,
int T, int U, size_t bytes_used);
ProbT compute_alphas(const ProbT* const acts, const int* const labels, int T, int U, const ProbT* const denom, ProbT* alphas);
ProbT compute_betas_and_grad(ProbT* grad, const ProbT* const acts,
int T, int U, const ProbT* const denom,
ProbT* alphas, ProbT* betas,
const int* const labels, ProbT logll);
};
template<typename ProbT>
CpuRNNT<ProbT>::CpuRNNT_metadata::CpuRNNT_metadata(int T, int U, void* workspace, size_t bytes_used, int alphabet_size, const ProbT* const acts, CpuRNNT_index& idx) {
denom = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(ProbT) * T * U;
alphas = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(ProbT) * T * U;
betas = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(ProbT) * T * U;
setup_log_softmax_denom(acts, T, U, alphabet_size, idx);
}
template<typename ProbT>
void
CpuRNNT<ProbT>::CpuRNNT_metadata::setup_log_softmax_denom(const ProbT* const acts, int T, int U, int alphabet_size, CpuRNNT_index& idx) {
for (int t = 0; t < T; ++t) {
for (int u = 0; u < U; ++u) {
ProbT max_v = rnnt_helper::neg_inf<ProbT>();
for (int v = 0; v < alphabet_size; v++) {
max_v = std::max(max_v, acts[idx(t, u, v)]);
}
ProbT den = rnnt_helper::neg_inf<ProbT>();
for (int v = 0; v < alphabet_size; v++) {
den = rnnt_helper::log_sum_exp<ProbT>(den, acts[idx(t, u, v)] - max_v);
}
denom[idx(t, u)] = -max_v - den;
}
}
#if defined(DEBUG_KERNEL)
printf("cpu acts and denoms\n");
for (int t = 0; t < T; t++) {
for (int u = 0; u < U; u++) {
for (int v = 0; v < alphabet_size; v++) {
printf("%.4f ", acts[idx(t, u, v)]);
}
printf("=> %.4f; ", denom[idx(t, u)]);
}
printf("\n");
}
printf("\n");
#endif
}
template<typename ProbT>
CpuRNNT<ProbT>::CpuRNNT_index::CpuRNNT_index(int U, int alphabet_size) :
U(U), alphabet_size(alphabet_size) {}
template<typename ProbT>
inline int CpuRNNT<ProbT>::CpuRNNT_index::operator()(int t, int u) {
return t * U + u;
}
template<typename ProbT>
inline int CpuRNNT<ProbT>::CpuRNNT_index::operator()(int t, int u, int v) {
return (t * U + u) * alphabet_size + v;
}
template<typename ProbT>
ProbT
CpuRNNT<ProbT>::cost_and_grad_kernel(const ProbT* const acts, ProbT* grad,
const int* const labels,
int mb, int T, int U, size_t bytes_used) {
CpuRNNT_index idx(U, alphabet_size_);
CpuRNNT_metadata rnntm(T, U, workspace_, bytes_used, alphabet_size_, acts, idx);
// zero grads
memset(grad, 0, sizeof(ProbT) * T * U * alphabet_size_);
ProbT llForward = compute_alphas(acts, labels, T, U, rnntm.denom, rnntm.alphas);
ProbT llBackward = compute_betas_and_grad(grad, acts,
T, U,
rnntm.denom,
rnntm.alphas,
rnntm.betas,
labels,
llForward);
ProbT diff = std::abs(llForward - llBackward);
if (diff > 1e-1) {
printf("WARNING: Forward backward likelihood mismatch %f\n", diff);
}
return -llForward;
}
template<typename ProbT>
ProbT
CpuRNNT<ProbT>::compute_alphas(const ProbT* const acts, const int* const labels, int T, int U, const ProbT* const denom, ProbT* alphas) {
CpuRNNT_index idx(U, alphabet_size_);
alphas[0] = 0;
for (int u = 1; u < U; ++u) {
alphas[u] = rnnt_helper::neg_inf<ProbT>();
}
for (int t = 1; t < T; ++t) {
alphas[idx(t, 0)] = alphas[idx(t-1, 0)] + acts[idx(t-1, 0, blank_)] + denom[idx(t-1, 0)];
for (int u = 1; u < U; ++u) {
ProbT no_emit = alphas[idx(t-1, u)] + acts[idx(t-1, u, blank_)] + denom[idx(t-1, u)];
ProbT emit = alphas[idx(t-1, u-1)] + acts[idx(t-1, u-1, labels[u-1])] + denom[idx(t-1, u-1)];
alphas[idx(t, u)] = rnnt_helper::log_sum_exp<ProbT>(emit, no_emit);
}
}
#ifdef DEBUG_KERNEL
printf("cpu alphas:\n");
printf("%d %d\n", T, U);
for (int t = 0; t < T; t++) {
for (int u = 0; u < U; u++) {
printf("%.2f ", alphas[idx(t, u)]);
}
printf("\n");
}
printf("\n");
#endif
ProbT loglike = alphas[idx(T-1, U-1)] + acts[idx(T-1, U-1, blank_)] + denom[idx(T-1, U-1)];
return loglike;
}
template<typename ProbT>
ProbT
CpuRNNT<ProbT>::compute_betas_and_grad(ProbT* grad, const ProbT* const acts, int T, int U,
const ProbT* const denom, ProbT* alphas, ProbT* betas,
const int* const labels, ProbT logll) {
CpuRNNT_index idx(U, alphabet_size_);
betas[idx(T-1, U-1)] = acts[idx(T-1, U-1, blank_)] + denom[idx(T-1, U-1)];
for (int u = 0; u < U-1; ++u) {
betas[idx(T-1, u)] = rnnt_helper::neg_inf<ProbT>();
}
for (int t = T-2; t >= 0; --t) {
betas[idx(t, U-1)] = betas[idx(t+1, U-1)] + acts[idx(t, U-1, blank_)] + denom[idx(t, U-1)];
for (int u = 0; u < U-1; ++u) {
ProbT no_emit = betas[idx(t+1, u)] + acts[idx(t, u, blank_)] + denom[idx(t, u)];
ProbT emit = betas[idx(t+1, u+1)] + acts[idx(t, u, labels[u])] + denom[idx(t, u)];
betas[idx(t, u)] = rnnt_helper::log_sum_exp<ProbT>(emit, no_emit);
}
}
#ifdef DEBUG_KERNEL
printf("cpu betas:\n");
printf("%d %d\n", T, U);
for (int t = 0; t < T; t++) {
for (int u = 0; u < U; u++) {
printf("%.2f ", betas[idx(t, u)]);
}
printf("\n");
}
printf("\n");
#endif
ProbT loglike = betas[0];
// Gradients w.r.t. log probabilities
for (int t = 0; t < T - 1; ++t) {
for (int u = 0; u < U; ++u) {
for (int v = 0; v < alphabet_size_; ++v) {
ProbT g = std::exp(acts[idx(t, u, v)] + denom[idx(t, u)] + alphas[idx(t, u)] + betas[idx(t, u)] - loglike);
if (v == blank_) {
g -= std::exp(acts[idx(t, u, v)] + denom[idx(t, u)] + alphas[idx(t, u)] + betas[idx(t+1, u)] - loglike);
} else if (u < U-1 && v == labels[u]) {
g -= std::exp(acts[idx(t, u, v)] + denom[idx(t, u)] + alphas[idx(t, u)] + betas[idx(t+1, u+1)] - loglike);
}
grad[idx(t, u, v)] = g;
}
}
}
for (int v = 0; v < alphabet_size_; ++v) {
if (v != blank_) {
grad[idx(T-1, U-1, v)] = std::exp(acts[idx(T-1, U-1, v)] + denom[idx(T-1, U-1)]);
}
}
#if defined(DEBUG_KERNEL)
printf("cpu grads\n");
int V = alphabet_size_;
for (int t = 0; t < T; ++t) {
for (int u = 0; u < U; ++u) {
for (int v = 0; v < V; ++v) {
printf("%.2f ", grad[(t*U + u) * V + v]);
}
printf("; ");
}
printf("\n");
}
printf("\n");
#endif
return loglike;
}
template<typename ProbT>
rnntStatus_t
CpuRNNT<ProbT>::cost_and_grad(const ProbT* const acts,
ProbT* grads,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths) {
size_t bytes_used[minibatch_ + 1];
size_t start_indices[minibatch_ + 1];
bytes_used[0] = 0;
start_indices[0] = 0;
int max_U = 0;
for (int mb = 0; mb < minibatch_; ++mb) {
const int T = input_lengths[mb];
const int U = label_lengths[mb] + 1;
// alphas & betas; log-softmax denom
bytes_used[mb + 1] = bytes_used[mb] + sizeof(ProbT) * T * U * 3;
start_indices[mb + 1] = start_indices[mb] + T * U * alphabet_size_;
max_U = std::max(U, max_U);
}
#pragma omp parallel for
for (int mb = 0; mb < minibatch_; ++mb) {
const int T = input_lengths[mb]; // Length of utterance (time)
const int U = label_lengths[mb] + 1; // Number of labels in transcription
costs[mb] = cost_and_grad_kernel(acts + start_indices[mb],
grads + start_indices[mb],
flat_labels + mb * (max_U - 1),
mb, T, U, bytes_used[mb]);
}
return RNNT_STATUS_SUCCESS;
}
template<typename ProbT>
rnntStatus_t
CpuRNNT<ProbT>::score_forward(const ProbT* const acts,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths) {
size_t bytes_used[minibatch_ + 1];
size_t start_indices[minibatch_ + 1];
bytes_used[0] = 0;
start_indices[0] = 0;
int max_U = 0;
for (int mb = 0; mb < minibatch_; ++mb) {
const int T = input_lengths[mb];
const int U = label_lengths[mb] + 1;
// alphas & betas; log-softmax denom
bytes_used[mb + 1] = bytes_used[mb] + sizeof(ProbT) * T * U * 3;
start_indices[mb + 1] = start_indices[mb] + T * U * alphabet_size_;
std::max(max_U, U);
}
#pragma omp parallel for
for (int mb = 0; mb < minibatch_; ++mb) {
const int T = input_lengths[mb]; // Length of utterance (time)
const int U = label_lengths[mb] + 1; // Number of labels in transcription
CpuRNNT_index idx(U, alphabet_size_);
CpuRNNT_metadata rnntm(T, U, workspace_, bytes_used[mb], alphabet_size_, acts + start_indices[mb], idx);
costs[mb] = -compute_alphas(acts + start_indices[mb], flat_labels + mb * (max_U - 1), T, U, rnntm.denom, rnntm.alphas);
}
return RNNT_STATUS_SUCCESS;
}
|
vbHmm_Common.c | /*
* vbHmm_Common.c
* Common VB-HMM engine.
* Reference:
* Christopher M. Bishop, "Pattern Recognition and Machine Learning", Springer, 2006
*
* Created by OKAMOTO Kenji, SAKO Yasushi and RIKEN
* Copyright 2011-2015
* Cellular Informatics Laboratory, Advance Science Institute, RIKEN, Japan.
* All rights reserved.
*
* Ver. 1.1.0
* Last modified on 2016.11.04
*/
#include "vbHmm_Common.h"
#include <string.h>
#include <float.h>
// Pointers of functions to call model-specific functions.
new_model_parameters_func newModelParameters = NULL;
free_model_parameters_func freeModelParameters = NULL;
new_model_stats_func newModelStats = NULL;
free_model_stats_func freeModelStats = NULL;
initialize_vbHmm_func initializeVbHmm = NULL;
pTilde_z1_func pTilde_z1 = NULL;
pTilde_zn_zn1_func pTilde_zn_zn1 = NULL;
pTilde_xn_zn_func pTilde_xn_zn = NULL;
calcStatsVars_func calcStatsVars = NULL;
maximization_func maximization = NULL;
varLowerBound_func varLowerBound = NULL;
reorderParameters_func reorderParameters = NULL;
outputResults_func outputResults = NULL;
// This function must be called to connect with the model before executing analysis.
void setFunctions( funcs )
commonFunctions funcs;
{
newModelParameters = funcs.newModelParameters;
freeModelParameters = funcs.freeModelParameters;
newModelStats = funcs.newModelStats;
freeModelStats = funcs.freeModelStats;
initializeVbHmm = funcs.initializeVbHmm;
pTilde_z1 = funcs.pTilde_z1;
pTilde_zn_zn1 = funcs.pTilde_zn_zn1;
pTilde_xn_zn = funcs.pTilde_xn_zn;
calcStatsVars = funcs.calcStatsVars;
maximization = funcs.maximization;
varLowerBound = funcs.varLowerBound;
reorderParameters = funcs.reorderParameters;
outputResults = funcs.outputResults;
}
////////////////////////////////////////////////////////////////// VB-HMM Execution Functions
int modelComparison( xn, sFrom, sTo, trials, maxIteration, threshold, logFP )
xnDataSet *xn;
int sFrom, sTo, trials;
int maxIteration;
double threshold;
FILE *logFP;
{
int s, t;
if( logFP != NULL ){
fprintf( logFP, " No. of states from %d to %d, trials = %d, ", sFrom, sTo, trials);
fprintf( logFP, " analyze: maxIteration = %d, threshold = %g \n\n", maxIteration, threshold);
}
double *LqVsK = malloc( trials * (sTo - sFrom + 1) * sizeof(double) );
int maxS = 0;
double maxLq = -DBL_MAX;
globalVars **gvArray = (globalVars**)malloc( trials * sizeof(globalVars*) );
indVars **ivArray = (indVars**)malloc( trials * sizeof(indVars*) );
for( s = sFrom ; s <= sTo ; s++ ){
#ifdef _OPENMP
#pragma omp parallel for private(t)
#endif
for( t = 0 ; t < trials ; t++ ){
int st = (s - sFrom) * trials + t;
gvArray[t] = newGlobalVars( xn, s );
ivArray[t] = newIndVars( xn, gvArray[t] );
LqVsK[st] = vbHmm_Main( xn, gvArray[t], ivArray[t], maxIteration, threshold, logFP );
if( LqVsK[st] > maxLq ){
maxLq = LqVsK[st];
maxS = s;
}
}
double maxLqForS = 0.0;
int maxT = 0;
for( t = 0 ; t < trials ; t++ ){
int st = (s - sFrom) * trials + t;
if( LqVsK[st] > maxLqForS ){
maxLqForS = LqVsK[st];
maxT = t;
}
}
(*outputResults)( xn, gvArray[maxT], ivArray[maxT], logFP );
for( t = 0 ; t < trials ; t++ ){
freeIndVars( xn, gvArray[t], &ivArray[t] );
freeGlobalVars( xn, &gvArray[t] );
}
if( s >= (maxS+3) ){
s++;
break;
}
}
sTo = s - 1;
free( gvArray );
free( ivArray );
char fn[256];
FILE *fp = NULL;
strncpy( fn, xn->name, sizeof(fn) );
strncat( fn, ".LqVsK", sizeof(fn) - strlen(fn) - 1 );
if( (fp = fopen( fn, "w" )) != NULL ){
for( s = 0 ; s < trials * (sTo - sFrom + 1) ; s++ ){
fprintf( fp, "%2d, %.20g\n", (s/trials) + sFrom, LqVsK[s] );
}
fclose(fp);
}
free( LqVsK );
return maxS;
}
globalVars *newGlobalVars( xn, sNo )
xnDataSet *xn;
int sNo;
{
globalVars *gv = (globalVars*)malloc( sizeof(globalVars) );
gv->sNo = sNo;
gv->iteration = 0;
gv->maxLq = 0.0;
gv->LqArr = NULL;
gv->params = (*newModelParameters)( xn, sNo );
return gv;
}
void freeGlobalVars( xn, gv )
xnDataSet *xn;
globalVars **gv;
{
free( (*gv)->LqArr );
(*freeModelParameters)( &(*gv)->params, xn, (*gv)->sNo );
free( *gv );
*gv = NULL;
}
indVars *newIndVars( xn, gv )
xnDataSet *xn;
globalVars *gv;
{
size_t dLen = xn->N;
int sNo = gv->sNo;
int i, n;
indVars *iv = (indVars*)malloc( sizeof(indVars) );
// gamma
iv->gmMat = (double**)malloc( dLen * sizeof(double*) );
double **gmMat = iv->gmMat;
for( n = 0 ; n < dLen ; n++ ){
gmMat[n] = (double*)malloc( sNo * sizeof(double) );
memset( gmMat[n], 0, sNo * sizeof(double) );
}
// xi
iv->xiMat = (double***)malloc( dLen * sizeof(double**) );
double ***xiMat = iv->xiMat;
for( n = 0 ; n < dLen ; n++ ){
xiMat[n] = (double**)malloc( sNo * sizeof(double*) );
for( i = 0 ; i < sNo ; i++ ){
xiMat[n][i] = (double*)malloc( sNo * sizeof(double) );
memset( xiMat[n][i], 0, sNo * sizeof(double) );
}
}
// alpha for E-step
iv->aMat = (double**)malloc( dLen * sizeof(double*) );
for( n = 0 ; n < dLen ; n++ )
{ iv->aMat[n] = (double*)malloc( sNo * sizeof(double) ); }
// beta for E-step
iv->bMat = (double**)malloc( dLen * sizeof(double*) );
for( n = 0 ; n < dLen ; n++ )
{ iv->bMat[n] = (double*)malloc( sNo * sizeof(double) ); }
// scaling factor for E-step
iv->cn = (double*)malloc( dLen * sizeof(double) );
// temporary storage of calculation resutls to save time
iv->valpZnZn1 = (double**)malloc( sNo * sizeof(double*) );
for( i = 0 ; i < sNo ; i++ )
{ iv->valpZnZn1[i] = (double*)malloc( sNo * sizeof(double) ); }
iv->valpXnZn = (double**)malloc( dLen * sizeof(double*) );
for( n = 0 ; n < dLen ; n++ )
{ iv->valpXnZn[n] = (double*)malloc( sNo * sizeof(double) ); }
iv->stats = (*newModelStats)( xn, gv, iv );
#ifdef OUTPUT_MAX_GAMMA
iv->gammaTraj = NULL;
#endif
iv->stateTraj = NULL;
return iv;
}
void freeIndVars( xn, gv, iv )
xnDataSet *xn;
globalVars *gv;
indVars **iv;
{
size_t dLen = xn->N;
int sNo = gv->sNo;
int i, n;
// gamma
for( n = 0 ; n < dLen ; n++ )
{ free( (*iv)->gmMat[n] ); }
free( (*iv)->gmMat );
// xi
for( n = 0 ; n < dLen ; n++ ){
for( i = 0 ; i < sNo ; i++ ){
free( (*iv)->xiMat[n][i] );
}
free( (*iv)->xiMat[n] );
}
free( (*iv)->xiMat );
// alpha
for( n = 0 ; n < dLen ; n++ )
{ free( (*iv)->aMat[n] ); }
free( (*iv)->aMat );
// beta
for( n = 0 ; n < dLen ; n++ )
{ free( (*iv)->bMat[n] ); }
free( (*iv)->bMat );
// scaling factor
free( (*iv)->cn );
// temporary storage
for( i = 0 ; i < sNo ; i++ )
{ free( (*iv)->valpZnZn1[i] ); }
free( (*iv)->valpZnZn1 );
for( n = 0 ; n < dLen ; n++ )
{ free( (*iv)->valpXnZn[n] ); }
free( (*iv)->valpXnZn );
#ifdef OUTPUT_MAX_GAMMA
free( (*iv)->gammaTraj );
#endif
free( (*iv)->stateTraj );
(*freeModelStats)( &(*iv)->stats, xn, gv, (*iv) );
free( *iv );
*iv = NULL;
}
////////////////////////////////////////////////////////////////// VB-HMM Common Engine
double vbHmm_Main( xn, gv, iv ,maxIteration, threshold, logFP )
xnDataSet *xn;
globalVars *gv;
indVars *iv;
int maxIteration;
double threshold;
FILE *logFP;
{
double **LqArr = &gv->LqArr;
*LqArr = realloc( *LqArr, maxIteration * sizeof(double) );
(*initializeVbHmm)( xn, gv, iv );
int i;
for( i = 0 ; i < maxIteration ; i++ ){
// E-step
forwardBackward( xn, gv, iv );
(*calcStatsVars)( xn, gv, iv );
(*LqArr)[i] = (*varLowerBound)( xn, gv, iv );
// End loop if derivative of variational lower bound reaches threshold.
if( (i>0) && ( fabs( ((*LqArr)[i] - (*LqArr)[i-1]) / (*LqArr)[i] ) < threshold ) ){
break;
}
// M-step
(*maximization)( xn, gv, iv );
}
if( i == maxIteration ){
if( logFP != NULL ){
fprintf(logFP, "MAX iteration (%d) reached.\n", maxIteration);
}
i--;
}
(*reorderParameters)( xn, gv, iv );
#ifdef OUTPUT_MAX_GAMMA
maxGamma( xn, gv, iv );
#endif
maxSum( xn, gv, iv );
gv->iteration = i+1;
*LqArr = realloc( *LqArr, (i+1) * sizeof(double) );
gv->maxLq = (*LqArr)[i];
if( logFP != NULL ){
fprintf( logFP, " iteration: %d evidence p(x|K=%d) = %.20g \n", i+1, gv->sNo, gv->maxLq );
}
return gv->maxLq;
}
// Baum-Welch algorithm for E-step calculation
void forwardBackward( xn, gv, iv )
xnDataSet *xn;
globalVars *gv;
indVars *iv;
{
size_t dLen = xn->N; // number of time stamp data points
int sNo = gv->sNo;
double **gmMat = iv->gmMat, ***xiMat = iv->xiMat;
double **aMat = iv->aMat, **bMat = iv->bMat;
double *cn = iv->cn;
double **valpZnZn1 = iv->valpZnZn1, **valpXnZn = iv->valpXnZn;
size_t n, i, j;
// forward
cn[0] = 0.0;
for( i = 0 ; i < sNo ; i++ ){
valpXnZn[0][i] = (*pTilde_xn_zn)( xn, 0, (int)i, gv->params );
aMat[0][i] = (*pTilde_z1)( (int)i, gv->params ) * valpXnZn[0][i];
cn[0] += aMat[0][i];
for( j = 0 ; j < sNo ; j++ ){
valpZnZn1[i][j] = (*pTilde_zn_zn1)( (int)i, (int)j, gv->params );
}
}
for( i = 0 ; i < sNo ; i++ ){
aMat[0][i] /= cn[0];
}
for( n = 1 ; n < dLen ; n++ ){
cn[n] = 0.0;
for( j = 0 ; j < sNo ; j++ ){
aMat[n][j] = 0;
for( i = 0 ; i < sNo ; i++ ){
aMat[n][j] += aMat[n-1][i] * valpZnZn1[i][j];
}
valpXnZn[n][j] = (*pTilde_xn_zn)( xn, n, (int)j, gv->params );
aMat[n][j] *= valpXnZn[n][j];
cn[n] += aMat[n][j];
}
for( j = 0 ; j < sNo ; j++ ){
aMat[n][j] /= cn[n];
}
}
// backward
for( i = 0 ; i < sNo ; i++ ){
bMat[dLen-1][i] = 1;
}
double betaTerm;
for( n = dLen-1 ; n > 0 ; ){
n--;
for( i = 0 ; i < sNo ; i++ ){
bMat[n][i] = 0;
for( j = 0 ; j < sNo ; j++ ){
betaTerm = bMat[n+1][j];
betaTerm *= valpZnZn1[i][j];
betaTerm *= valpXnZn[n+1][j];
bMat[n][i] += betaTerm;
}
bMat[n][i] /= cn[n+1];
}
}
// update gamma
for( n = 0 ; n < dLen ; n++ ){
for( i = 0 ; i < sNo ; i++ ){
gmMat[n][i] = aMat[n][i] * bMat[n][i];
}
}
// update xi
double xiTerm;
for( i = 0 ; i < sNo ; i++ ){
for( j = 0 ; j < sNo ; j++ ){
xiMat[0][i][j] = 0;
} }
for( n = 1 ; n < dLen ; n++ ){
for( i = 0 ; i < sNo ; i++ ){
for( j = 0 ; j < sNo ; j++ ){
xiTerm = aMat[n-1][i];
xiTerm *= valpXnZn[n][j];
xiTerm *= valpZnZn1[i][j];
xiTerm *= bMat[n][j];
xiMat[n][i][j] = xiTerm / cn[n];
}
}
}
}
// construct most likely trajectory to trace max Gamma
#ifdef OUTPUT_MAX_GAMMA
int *maxGamma( xn, gv, iv )
xnDataSet *xn;
globalVars *gv;
indVars *iv;
{
size_t dLen = xn->N;
int sNo = gv->sNo;
double **gmMat = iv->gmMat;
size_t n;
int i, j;
iv->gammaTraj = (int*)realloc( iv->gammaTraj, dLen * sizeof(int) );
int maxI;
double maxG;
for( n = 0 ; n < dLen ; n++ ){
maxG = - 1.0;
for( i = 0 ; i < sNo ; i++ ){
if( gmMat[n][i] > maxG ){
maxG = gmMat[n][i];
maxI = i;
}
}
iv->gammaTraj[n] = maxI;
}
return iv->gammaTraj;
}
#endif
// Viterbi algorithm to construct most likely trajectory
int *maxSum( xn, gv, iv )
xnDataSet *xn;
globalVars *gv;
indVars *iv;
{
size_t dLen = xn->N;
int sNo = gv->sNo;
size_t n;
int i, j;
iv->stateTraj = (int*)realloc( iv->stateTraj, dLen * sizeof(int) );
double **wnMat = (double **)malloc( dLen * sizeof(double*) );
double **phiMat = (double **)malloc( dLen * sizeof(double*) );
for( n = 0 ; n < dLen ; n++ ){
wnMat[n] = (double*)malloc( sNo * sizeof(double) );
phiMat[n] = (double*)malloc( sNo * sizeof(double) );
}
int maxI;
double wnTest, maxWn;
// forward
for( n = 0 ; n < dLen ; n++ ){
for( i = 0 ; i < sNo ; i++ ){
wnMat[n][i] = 0.0;
phiMat[n][i] = 0.0;
}
}
for( i = 0 ; i < sNo ; i++ ){
wnMat[0][i] = log((*pTilde_z1)(i, gv->params)) + log((*pTilde_xn_zn)(xn, 0, i, gv->params));
}
for( n = 1 ; n < dLen ; n++ ){
for( j = 0 ; j < sNo ; j++ ){
maxWn = log( (*pTilde_zn_zn1)(0, j, gv->params) ) + wnMat[n-1][0];
maxI = 0;
for( i = 1 ; i < sNo ; i++ ){
wnTest = log( (*pTilde_zn_zn1)(i, j, gv->params) ) + wnMat[n-1][i];
if( wnTest > maxWn ){
maxWn = wnTest;
maxI = i;
}
}
phiMat[n][j] = maxI;
wnMat[n][j] = log((*pTilde_xn_zn)(xn, n, j, gv->params)) + maxWn;
}
}
// backward
maxWn = wnMat[dLen-1][0];
maxI = 0;
for( i = 1 ; i < sNo ; i++ ){
if( wnMat[dLen-1][i] > maxWn ){
maxWn = wnMat[dLen-1][i];
maxI = i;
}
}
iv->stateTraj[dLen-1] = maxI;
for( n = dLen-1 ; n > 0 ; n-- ){
iv->stateTraj[n-1] = phiMat[n][iv->stateTraj[n]];
}
for( n = 0 ; n < dLen ; n++ ){
free( wnMat[n] );
free( phiMat[n] );
}
free( wnMat );
free( phiMat );
return iv->stateTraj;
}
//
|
divsufsort.c | /*
* divsufsort.c for libdivsufsort-lite
* Copyright (c) 2003-2008 Yuta Mori All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
# include <omp.h>
#endif
#include "divsufsort.h"
/*- Constants -*/
#define INLINE __inline
#if defined(ALPHABET_SIZE) && (ALPHABET_SIZE < 1)
# undef ALPHABET_SIZE
#endif
#if !defined(ALPHABET_SIZE)
# define ALPHABET_SIZE (256)
#endif
#define BUCKET_A_SIZE (ALPHABET_SIZE)
#define BUCKET_B_SIZE (ALPHABET_SIZE * ALPHABET_SIZE)
#if defined(SS_INSERTIONSORT_THRESHOLD)
# if SS_INSERTIONSORT_THRESHOLD < 1
# undef SS_INSERTIONSORT_THRESHOLD
# define SS_INSERTIONSORT_THRESHOLD (1)
# endif
#else
# define SS_INSERTIONSORT_THRESHOLD (8)
#endif
#if defined(SS_BLOCKSIZE)
# if SS_BLOCKSIZE < 0
# undef SS_BLOCKSIZE
# define SS_BLOCKSIZE (0)
# elif 32768 <= SS_BLOCKSIZE
# undef SS_BLOCKSIZE
# define SS_BLOCKSIZE (32767)
# endif
#else
# define SS_BLOCKSIZE (1024)
#endif
/* minstacksize = log(SS_BLOCKSIZE) / log(3) * 2 */
#if SS_BLOCKSIZE == 0
# define SS_MISORT_STACKSIZE (96)
#elif SS_BLOCKSIZE <= 4096
# define SS_MISORT_STACKSIZE (16)
#else
# define SS_MISORT_STACKSIZE (24)
#endif
#define SS_SMERGE_STACKSIZE (32)
#define TR_INSERTIONSORT_THRESHOLD (8)
#define TR_STACKSIZE (64)
/*- Macros -*/
#ifndef SWAP
# define SWAP(_a, _b) do { t = (_a); (_a) = (_b); (_b) = t; } while(0)
#endif /* SWAP */
#ifndef MIN
# define MIN(_a, _b) (((_a) < (_b)) ? (_a) : (_b))
#endif /* MIN */
#ifndef MAX
# define MAX(_a, _b) (((_a) > (_b)) ? (_a) : (_b))
#endif /* MAX */
#define STACK_PUSH(_a, _b, _c, _d)\
do {\
assert(ssize < STACK_SIZE);\
stack[ssize].a = (_a), stack[ssize].b = (_b),\
stack[ssize].c = (_c), stack[ssize++].d = (_d);\
} while(0)
#define STACK_PUSH5(_a, _b, _c, _d, _e)\
do {\
assert(ssize < STACK_SIZE);\
stack[ssize].a = (_a), stack[ssize].b = (_b),\
stack[ssize].c = (_c), stack[ssize].d = (_d), stack[ssize++].e = (_e);\
} while(0)
#define STACK_POP(_a, _b, _c, _d)\
do {\
assert(0 <= ssize);\
if(ssize == 0) { return; }\
(_a) = stack[--ssize].a, (_b) = stack[ssize].b,\
(_c) = stack[ssize].c, (_d) = stack[ssize].d;\
} while(0)
#define STACK_POP5(_a, _b, _c, _d, _e)\
do {\
assert(0 <= ssize);\
if(ssize == 0) { return; }\
(_a) = stack[--ssize].a, (_b) = stack[ssize].b,\
(_c) = stack[ssize].c, (_d) = stack[ssize].d, (_e) = stack[ssize].e;\
} while(0)
#define BUCKET_A(_c0) bucket_A[(_c0)]
#if ALPHABET_SIZE == 256
#define BUCKET_B(_c0, _c1) (bucket_B[((_c1) << 8) | (_c0)])
#define BUCKET_BSTAR(_c0, _c1) (bucket_B[((_c0) << 8) | (_c1)])
#else
#define BUCKET_B(_c0, _c1) (bucket_B[(_c1) * ALPHABET_SIZE + (_c0)])
#define BUCKET_BSTAR(_c0, _c1) (bucket_B[(_c0) * ALPHABET_SIZE + (_c1)])
#endif
/*- Private Functions -*/
static const int lg_table[256]= {
-1,0,1,1,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7
};
#if (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE)
static INLINE
int
ss_ilg(int n) {
#if SS_BLOCKSIZE == 0
return (n & 0xffff0000) ?
((n & 0xff000000) ?
24 + lg_table[(n >> 24) & 0xff] :
16 + lg_table[(n >> 16) & 0xff]) :
((n & 0x0000ff00) ?
8 + lg_table[(n >> 8) & 0xff] :
0 + lg_table[(n >> 0) & 0xff]);
#elif SS_BLOCKSIZE < 256
return lg_table[n];
#else
return (n & 0xff00) ?
8 + lg_table[(n >> 8) & 0xff] :
0 + lg_table[(n >> 0) & 0xff];
#endif
}
#endif /* (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) */
#if SS_BLOCKSIZE != 0
static const int sqq_table[256] = {
0, 16, 22, 27, 32, 35, 39, 42, 45, 48, 50, 53, 55, 57, 59, 61,
64, 65, 67, 69, 71, 73, 75, 76, 78, 80, 81, 83, 84, 86, 87, 89,
90, 91, 93, 94, 96, 97, 98, 99, 101, 102, 103, 104, 106, 107, 108, 109,
110, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126,
128, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
143, 144, 144, 145, 146, 147, 148, 149, 150, 150, 151, 152, 153, 154, 155, 155,
156, 157, 158, 159, 160, 160, 161, 162, 163, 163, 164, 165, 166, 167, 167, 168,
169, 170, 170, 171, 172, 173, 173, 174, 175, 176, 176, 177, 178, 178, 179, 180,
181, 181, 182, 183, 183, 184, 185, 185, 186, 187, 187, 188, 189, 189, 190, 191,
192, 192, 193, 193, 194, 195, 195, 196, 197, 197, 198, 199, 199, 200, 201, 201,
202, 203, 203, 204, 204, 205, 206, 206, 207, 208, 208, 209, 209, 210, 211, 211,
212, 212, 213, 214, 214, 215, 215, 216, 217, 217, 218, 218, 219, 219, 220, 221,
221, 222, 222, 223, 224, 224, 225, 225, 226, 226, 227, 227, 228, 229, 229, 230,
230, 231, 231, 232, 232, 233, 234, 234, 235, 235, 236, 236, 237, 237, 238, 238,
239, 240, 240, 241, 241, 242, 242, 243, 243, 244, 244, 245, 245, 246, 246, 247,
247, 248, 248, 249, 249, 250, 250, 251, 251, 252, 252, 253, 253, 254, 254, 255
};
static INLINE
int
ss_isqrt(int x) {
int y, e;
if(x >= (SS_BLOCKSIZE * SS_BLOCKSIZE)) { return SS_BLOCKSIZE; }
e = (x & 0xffff0000) ?
((x & 0xff000000) ?
24 + lg_table[(x >> 24) & 0xff] :
16 + lg_table[(x >> 16) & 0xff]) :
((x & 0x0000ff00) ?
8 + lg_table[(x >> 8) & 0xff] :
0 + lg_table[(x >> 0) & 0xff]);
if(e >= 16) {
y = sqq_table[x >> ((e - 6) - (e & 1))] << ((e >> 1) - 7);
if(e >= 24) { y = (y + 1 + x / y) >> 1; }
y = (y + 1 + x / y) >> 1;
} else if(e >= 8) {
y = (sqq_table[x >> ((e - 6) - (e & 1))] >> (7 - (e >> 1))) + 1;
} else {
return sqq_table[x] >> 4;
}
return (x < (y * y)) ? y - 1 : y;
}
#endif /* SS_BLOCKSIZE != 0 */
/*---------------------------------------------------------------------------*/
/* Compares two suffixes. */
static INLINE
int
ss_compare(const unsigned char *T,
const int *p1, const int *p2,
int depth) {
const unsigned char *U1, *U2, *U1n, *U2n;
for(U1 = T + depth + *p1,
U2 = T + depth + *p2,
U1n = T + *(p1 + 1) + 2,
U2n = T + *(p2 + 1) + 2;
(U1 < U1n) && (U2 < U2n) && (*U1 == *U2);
++U1, ++U2) {
}
return U1 < U1n ?
(U2 < U2n ? *U1 - *U2 : 1) :
(U2 < U2n ? -1 : 0);
}
/*---------------------------------------------------------------------------*/
#if (SS_BLOCKSIZE != 1) && (SS_INSERTIONSORT_THRESHOLD != 1)
/* Insertionsort for small size groups */
static
void
ss_insertionsort(const unsigned char *T, const int *PA,
int *first, int *last, int depth) {
int *i, *j;
int t;
int r;
for(i = last - 2; first <= i; --i) {
for(t = *i, j = i + 1; 0 < (r = ss_compare(T, PA + t, PA + *j, depth));) {
do { *(j - 1) = *j; } while((++j < last) && (*j < 0));
if(last <= j) { break; }
}
if(r == 0) { *j = ~*j; }
*(j - 1) = t;
}
}
#endif /* (SS_BLOCKSIZE != 1) && (SS_INSERTIONSORT_THRESHOLD != 1) */
/*---------------------------------------------------------------------------*/
#if (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE)
static INLINE
void
ss_fixdown(const unsigned char *Td, const int *PA,
int *SA, int i, int size) {
int j, k;
int v;
int c, d, e;
for(v = SA[i], c = Td[PA[v]]; (j = 2 * i + 1) < size; SA[i] = SA[k], i = k) {
d = Td[PA[SA[k = j++]]];
if(d < (e = Td[PA[SA[j]]])) { k = j; d = e; }
if(d <= c) { break; }
}
SA[i] = v;
}
/* Simple top-down heapsort. */
static
void
ss_heapsort(const unsigned char *Td, const int *PA, int *SA, int size) {
int i, m;
int t;
m = size;
if((size % 2) == 0) {
m--;
if(Td[PA[SA[m / 2]]] < Td[PA[SA[m]]]) { SWAP(SA[m], SA[m / 2]); }
}
for(i = m / 2 - 1; 0 <= i; --i) { ss_fixdown(Td, PA, SA, i, m); }
if((size % 2) == 0) { SWAP(SA[0], SA[m]); ss_fixdown(Td, PA, SA, 0, m); }
for(i = m - 1; 0 < i; --i) {
t = SA[0], SA[0] = SA[i];
ss_fixdown(Td, PA, SA, 0, i);
SA[i] = t;
}
}
/*---------------------------------------------------------------------------*/
/* Returns the median of three elements. */
static INLINE
int *
ss_median3(const unsigned char *Td, const int *PA,
int *v1, int *v2, int *v3) {
int *t;
if(Td[PA[*v1]] > Td[PA[*v2]]) { SWAP(v1, v2); }
if(Td[PA[*v2]] > Td[PA[*v3]]) {
if(Td[PA[*v1]] > Td[PA[*v3]]) { return v1; }
else { return v3; }
}
return v2;
}
/* Returns the median of five elements. */
static INLINE
int *
ss_median5(const unsigned char *Td, const int *PA,
int *v1, int *v2, int *v3, int *v4, int *v5) {
int *t;
if(Td[PA[*v2]] > Td[PA[*v3]]) { SWAP(v2, v3); }
if(Td[PA[*v4]] > Td[PA[*v5]]) { SWAP(v4, v5); }
if(Td[PA[*v2]] > Td[PA[*v4]]) { SWAP(v2, v4); SWAP(v3, v5); }
if(Td[PA[*v1]] > Td[PA[*v3]]) { SWAP(v1, v3); }
if(Td[PA[*v1]] > Td[PA[*v4]]) { SWAP(v1, v4); SWAP(v3, v5); }
if(Td[PA[*v3]] > Td[PA[*v4]]) { return v4; }
return v3;
}
/* Returns the pivot element. */
static INLINE
int *
ss_pivot(const unsigned char *Td, const int *PA, int *first, int *last) {
int *middle;
int t;
t = last - first;
middle = first + t / 2;
if(t <= 512) {
if(t <= 32) {
return ss_median3(Td, PA, first, middle, last - 1);
} else {
t >>= 2;
return ss_median5(Td, PA, first, first + t, middle, last - 1 - t, last - 1);
}
}
t >>= 3;
first = ss_median3(Td, PA, first, first + t, first + (t << 1));
middle = ss_median3(Td, PA, middle - t, middle, middle + t);
last = ss_median3(Td, PA, last - 1 - (t << 1), last - 1 - t, last - 1);
return ss_median3(Td, PA, first, middle, last);
}
/*---------------------------------------------------------------------------*/
/* Binary partition for substrings. */
static INLINE
int *
ss_partition(const int *PA,
int *first, int *last, int depth) {
int *a, *b;
int t;
for(a = first - 1, b = last;;) {
for(; (++a < b) && ((PA[*a] + depth) >= (PA[*a + 1] + 1));) { *a = ~*a; }
for(; (a < --b) && ((PA[*b] + depth) < (PA[*b + 1] + 1));) { }
if(b <= a) { break; }
t = ~*b;
*b = *a;
*a = t;
}
if(first < a) { *first = ~*first; }
return a;
}
/* Multikey introsort for medium size groups. */
static
void
ss_mintrosort(const unsigned char *T, const int *PA,
int *first, int *last,
int depth) {
#define STACK_SIZE SS_MISORT_STACKSIZE
struct { int *a, *b, c; int d; } stack[STACK_SIZE];
const unsigned char *Td;
int *a, *b, *c, *d, *e, *f;
int s, t;
int ssize;
int limit;
int v, x = 0;
for(ssize = 0, limit = ss_ilg(last - first);;) {
if((last - first) <= SS_INSERTIONSORT_THRESHOLD) {
#if 1 < SS_INSERTIONSORT_THRESHOLD
if(1 < (last - first)) { ss_insertionsort(T, PA, first, last, depth); }
#endif
STACK_POP(first, last, depth, limit);
continue;
}
Td = T + depth;
if(limit-- == 0) { ss_heapsort(Td, PA, first, last - first); }
if(limit < 0) {
for(a = first + 1, v = Td[PA[*first]]; a < last; ++a) {
if((x = Td[PA[*a]]) != v) {
if(1 < (a - first)) { break; }
v = x;
first = a;
}
}
if(Td[PA[*first] - 1] < v) {
first = ss_partition(PA, first, a, depth);
}
if((a - first) <= (last - a)) {
if(1 < (a - first)) {
STACK_PUSH(a, last, depth, -1);
last = a, depth += 1, limit = ss_ilg(a - first);
} else {
first = a, limit = -1;
}
} else {
if(1 < (last - a)) {
STACK_PUSH(first, a, depth + 1, ss_ilg(a - first));
first = a, limit = -1;
} else {
last = a, depth += 1, limit = ss_ilg(a - first);
}
}
continue;
}
/* choose pivot */
a = ss_pivot(Td, PA, first, last);
v = Td[PA[*a]];
SWAP(*first, *a);
/* partition */
for(b = first; (++b < last) && ((x = Td[PA[*b]]) == v);) { }
if(((a = b) < last) && (x < v)) {
for(; (++b < last) && ((x = Td[PA[*b]]) <= v);) {
if(x == v) { SWAP(*b, *a); ++a; }
}
}
for(c = last; (b < --c) && ((x = Td[PA[*c]]) == v);) { }
if((b < (d = c)) && (x > v)) {
for(; (b < --c) && ((x = Td[PA[*c]]) >= v);) {
if(x == v) { SWAP(*c, *d); --d; }
}
}
for(; b < c;) {
SWAP(*b, *c);
for(; (++b < c) && ((x = Td[PA[*b]]) <= v);) {
if(x == v) { SWAP(*b, *a); ++a; }
}
for(; (b < --c) && ((x = Td[PA[*c]]) >= v);) {
if(x == v) { SWAP(*c, *d); --d; }
}
}
if(a <= d) {
c = b - 1;
if((s = a - first) > (t = b - a)) { s = t; }
for(e = first, f = b - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }
if((s = d - c) > (t = last - d - 1)) { s = t; }
for(e = b, f = last - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }
a = first + (b - a), c = last - (d - c);
b = (v <= Td[PA[*a] - 1]) ? a : ss_partition(PA, a, c, depth);
if((a - first) <= (last - c)) {
if((last - c) <= (c - b)) {
STACK_PUSH(b, c, depth + 1, ss_ilg(c - b));
STACK_PUSH(c, last, depth, limit);
last = a;
} else if((a - first) <= (c - b)) {
STACK_PUSH(c, last, depth, limit);
STACK_PUSH(b, c, depth + 1, ss_ilg(c - b));
last = a;
} else {
STACK_PUSH(c, last, depth, limit);
STACK_PUSH(first, a, depth, limit);
first = b, last = c, depth += 1, limit = ss_ilg(c - b);
}
} else {
if((a - first) <= (c - b)) {
STACK_PUSH(b, c, depth + 1, ss_ilg(c - b));
STACK_PUSH(first, a, depth, limit);
first = c;
} else if((last - c) <= (c - b)) {
STACK_PUSH(first, a, depth, limit);
STACK_PUSH(b, c, depth + 1, ss_ilg(c - b));
first = c;
} else {
STACK_PUSH(first, a, depth, limit);
STACK_PUSH(c, last, depth, limit);
first = b, last = c, depth += 1, limit = ss_ilg(c - b);
}
}
} else {
limit += 1;
if(Td[PA[*first] - 1] < v) {
first = ss_partition(PA, first, last, depth);
limit = ss_ilg(last - first);
}
depth += 1;
}
}
#undef STACK_SIZE
}
#endif /* (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) */
/*---------------------------------------------------------------------------*/
#if SS_BLOCKSIZE != 0
static INLINE
void
ss_blockswap(int *a, int *b, int n) {
int t;
for(; 0 < n; --n, ++a, ++b) {
t = *a, *a = *b, *b = t;
}
}
static INLINE
void
ss_rotate(int *first, int *middle, int *last) {
int *a, *b, t;
int l, r;
l = middle - first, r = last - middle;
for(; (0 < l) && (0 < r);) {
if(l == r) { ss_blockswap(first, middle, l); break; }
if(l < r) {
a = last - 1, b = middle - 1;
t = *a;
do {
*a-- = *b, *b-- = *a;
if(b < first) {
*a = t;
last = a;
if((r -= l + 1) <= l) { break; }
a -= 1, b = middle - 1;
t = *a;
}
} while(1);
} else {
a = first, b = middle;
t = *a;
do {
*a++ = *b, *b++ = *a;
if(last <= b) {
*a = t;
first = a + 1;
if((l -= r + 1) <= r) { break; }
a += 1, b = middle;
t = *a;
}
} while(1);
}
}
}
/*---------------------------------------------------------------------------*/
static
void
ss_inplacemerge(const unsigned char *T, const int *PA,
int *first, int *middle, int *last,
int depth) {
const int *p;
int *a, *b;
int len, half;
int q, r;
int x;
for(;;) {
if(*(last - 1) < 0) { x = 1; p = PA + ~*(last - 1); }
else { x = 0; p = PA + *(last - 1); }
for(a = first, len = middle - first, half = len >> 1, r = -1;
0 < len;
len = half, half >>= 1) {
b = a + half;
q = ss_compare(T, PA + ((0 <= *b) ? *b : ~*b), p, depth);
if(q < 0) {
a = b + 1;
half -= (len & 1) ^ 1;
} else {
r = q;
}
}
if(a < middle) {
if(r == 0) { *a = ~*a; }
ss_rotate(a, middle, last);
last -= middle - a;
middle = a;
if(first == middle) { break; }
}
--last;
if(x != 0) { while(*--last < 0) { } }
if(middle == last) { break; }
}
}
/*---------------------------------------------------------------------------*/
/* Merge-forward with internal buffer. */
static
void
ss_mergeforward(const unsigned char *T, const int *PA,
int *first, int *middle, int *last,
int *buf, int depth) {
int *a, *b, *c, *bufend;
int t;
int r;
bufend = buf + (middle - first) - 1;
ss_blockswap(buf, first, middle - first);
for(t = *(a = first), b = buf, c = middle;;) {
r = ss_compare(T, PA + *b, PA + *c, depth);
if(r < 0) {
do {
*a++ = *b;
if(bufend <= b) { *bufend = t; return; }
*b++ = *a;
} while(*b < 0);
} else if(r > 0) {
do {
*a++ = *c, *c++ = *a;
if(last <= c) {
while(b < bufend) { *a++ = *b, *b++ = *a; }
*a = *b, *b = t;
return;
}
} while(*c < 0);
} else {
*c = ~*c;
do {
*a++ = *b;
if(bufend <= b) { *bufend = t; return; }
*b++ = *a;
} while(*b < 0);
do {
*a++ = *c, *c++ = *a;
if(last <= c) {
while(b < bufend) { *a++ = *b, *b++ = *a; }
*a = *b, *b = t;
return;
}
} while(*c < 0);
}
}
}
/* Merge-backward with internal buffer. */
static
void
ss_mergebackward(const unsigned char *T, const int *PA,
int *first, int *middle, int *last,
int *buf, int depth) {
const int *p1, *p2;
int *a, *b, *c, *bufend;
int t;
int r;
int x;
bufend = buf + (last - middle) - 1;
ss_blockswap(buf, middle, last - middle);
x = 0;
if(*bufend < 0) { p1 = PA + ~*bufend; x |= 1; }
else { p1 = PA + *bufend; }
if(*(middle - 1) < 0) { p2 = PA + ~*(middle - 1); x |= 2; }
else { p2 = PA + *(middle - 1); }
for(t = *(a = last - 1), b = bufend, c = middle - 1;;) {
r = ss_compare(T, p1, p2, depth);
if(0 < r) {
if(x & 1) { do { *a-- = *b, *b-- = *a; } while(*b < 0); x ^= 1; }
*a-- = *b;
if(b <= buf) { *buf = t; break; }
*b-- = *a;
if(*b < 0) { p1 = PA + ~*b; x |= 1; }
else { p1 = PA + *b; }
} else if(r < 0) {
if(x & 2) { do { *a-- = *c, *c-- = *a; } while(*c < 0); x ^= 2; }
*a-- = *c, *c-- = *a;
if(c < first) {
while(buf < b) { *a-- = *b, *b-- = *a; }
*a = *b, *b = t;
break;
}
if(*c < 0) { p2 = PA + ~*c; x |= 2; }
else { p2 = PA + *c; }
} else {
if(x & 1) { do { *a-- = *b, *b-- = *a; } while(*b < 0); x ^= 1; }
*a-- = ~*b;
if(b <= buf) { *buf = t; break; }
*b-- = *a;
if(x & 2) { do { *a-- = *c, *c-- = *a; } while(*c < 0); x ^= 2; }
*a-- = *c, *c-- = *a;
if(c < first) {
while(buf < b) { *a-- = *b, *b-- = *a; }
*a = *b, *b = t;
break;
}
if(*b < 0) { p1 = PA + ~*b; x |= 1; }
else { p1 = PA + *b; }
if(*c < 0) { p2 = PA + ~*c; x |= 2; }
else { p2 = PA + *c; }
}
}
}
/* D&C based merge. */
static
void
ss_swapmerge(const unsigned char *T, const int *PA,
int *first, int *middle, int *last,
int *buf, int bufsize, int depth) {
#define STACK_SIZE SS_SMERGE_STACKSIZE
#define GETIDX(a) ((0 <= (a)) ? (a) : (~(a)))
#define MERGE_CHECK(a, b, c)\
do {\
if(((c) & 1) ||\
(((c) & 2) && (ss_compare(T, PA + GETIDX(*((a) - 1)), PA + *(a), depth) == 0))) {\
*(a) = ~*(a);\
}\
if(((c) & 4) && ((ss_compare(T, PA + GETIDX(*((b) - 1)), PA + *(b), depth) == 0))) {\
*(b) = ~*(b);\
}\
} while(0)
struct { int *a, *b, *c; int d; } stack[STACK_SIZE];
int *l, *r, *lm, *rm;
int m, len, half;
int ssize;
int check, next;
for(check = 0, ssize = 0;;) {
if((last - middle) <= bufsize) {
if((first < middle) && (middle < last)) {
ss_mergebackward(T, PA, first, middle, last, buf, depth);
}
MERGE_CHECK(first, last, check);
STACK_POP(first, middle, last, check);
continue;
}
if((middle - first) <= bufsize) {
if(first < middle) {
ss_mergeforward(T, PA, first, middle, last, buf, depth);
}
MERGE_CHECK(first, last, check);
STACK_POP(first, middle, last, check);
continue;
}
for(m = 0, len = MIN(middle - first, last - middle), half = len >> 1;
0 < len;
len = half, half >>= 1) {
if(ss_compare(T, PA + GETIDX(*(middle + m + half)),
PA + GETIDX(*(middle - m - half - 1)), depth) < 0) {
m += half + 1;
half -= (len & 1) ^ 1;
}
}
if(0 < m) {
lm = middle - m, rm = middle + m;
ss_blockswap(lm, middle, m);
l = r = middle, next = 0;
if(rm < last) {
if(*rm < 0) {
*rm = ~*rm;
if(first < lm) { for(; *--l < 0;) { } next |= 4; }
next |= 1;
} else if(first < lm) {
for(; *r < 0; ++r) { }
next |= 2;
}
}
if((l - first) <= (last - r)) {
STACK_PUSH(r, rm, last, (next & 3) | (check & 4));
middle = lm, last = l, check = (check & 3) | (next & 4);
} else {
if((next & 2) && (r == middle)) { next ^= 6; }
STACK_PUSH(first, lm, l, (check & 3) | (next & 4));
first = r, middle = rm, check = (next & 3) | (check & 4);
}
} else {
if(ss_compare(T, PA + GETIDX(*(middle - 1)), PA + *middle, depth) == 0) {
*middle = ~*middle;
}
MERGE_CHECK(first, last, check);
STACK_POP(first, middle, last, check);
}
}
#undef STACK_SIZE
}
#endif /* SS_BLOCKSIZE != 0 */
/*---------------------------------------------------------------------------*/
/* Substring sort */
static
void
sssort(const unsigned char *T, const int *PA,
int *first, int *last,
int *buf, int bufsize,
int depth, int n, int lastsuffix) {
int *a;
#if SS_BLOCKSIZE != 0
int *b, *middle, *curbuf;
int j, k, curbufsize, limit;
#endif
int i;
if(lastsuffix != 0) { ++first; }
#if SS_BLOCKSIZE == 0
ss_mintrosort(T, PA, first, last, depth);
#else
if((bufsize < SS_BLOCKSIZE) &&
(bufsize < (last - first)) &&
(bufsize < (limit = ss_isqrt(last - first)))) {
if(SS_BLOCKSIZE < limit) { limit = SS_BLOCKSIZE; }
buf = middle = last - limit, bufsize = limit;
} else {
middle = last, limit = 0;
}
for(a = first, i = 0; SS_BLOCKSIZE < (middle - a); a += SS_BLOCKSIZE, ++i) {
#if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE
ss_mintrosort(T, PA, a, a + SS_BLOCKSIZE, depth);
#elif 1 < SS_BLOCKSIZE
ss_insertionsort(T, PA, a, a + SS_BLOCKSIZE, depth);
#endif
curbufsize = last - (a + SS_BLOCKSIZE);
curbuf = a + SS_BLOCKSIZE;
if(curbufsize <= bufsize) { curbufsize = bufsize, curbuf = buf; }
for(b = a, k = SS_BLOCKSIZE, j = i; j & 1; b -= k, k <<= 1, j >>= 1) {
ss_swapmerge(T, PA, b - k, b, b + k, curbuf, curbufsize, depth);
}
}
#if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE
ss_mintrosort(T, PA, a, middle, depth);
#elif 1 < SS_BLOCKSIZE
ss_insertionsort(T, PA, a, middle, depth);
#endif
for(k = SS_BLOCKSIZE; i != 0; k <<= 1, i >>= 1) {
if(i & 1) {
ss_swapmerge(T, PA, a - k, a, middle, buf, bufsize, depth);
a -= k;
}
}
if(limit != 0) {
#if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE
ss_mintrosort(T, PA, middle, last, depth);
#elif 1 < SS_BLOCKSIZE
ss_insertionsort(T, PA, middle, last, depth);
#endif
ss_inplacemerge(T, PA, first, middle, last, depth);
}
#endif
if(lastsuffix != 0) {
/* Insert last type B* suffix. */
int PAi[2]; PAi[0] = PA[*(first - 1)], PAi[1] = n - 2;
for(a = first, i = *(first - 1);
(a < last) && ((*a < 0) || (0 < ss_compare(T, &(PAi[0]), PA + *a, depth)));
++a) {
*(a - 1) = *a;
}
*(a - 1) = i;
}
}
/*---------------------------------------------------------------------------*/
static INLINE
int
tr_ilg(int n) {
return (n & 0xffff0000) ?
((n & 0xff000000) ?
24 + lg_table[(n >> 24) & 0xff] :
16 + lg_table[(n >> 16) & 0xff]) :
((n & 0x0000ff00) ?
8 + lg_table[(n >> 8) & 0xff] :
0 + lg_table[(n >> 0) & 0xff]);
}
/*---------------------------------------------------------------------------*/
/* Simple insertionsort for small size groups. */
static
void
tr_insertionsort(const int *ISAd, int *first, int *last) {
int *a, *b;
int t, r;
for(a = first + 1; a < last; ++a) {
for(t = *a, b = a - 1; 0 > (r = ISAd[t] - ISAd[*b]);) {
do { *(b + 1) = *b; } while((first <= --b) && (*b < 0));
if(b < first) { break; }
}
if(r == 0) { *b = ~*b; }
*(b + 1) = t;
}
}
/*---------------------------------------------------------------------------*/
static INLINE
void
tr_fixdown(const int *ISAd, int *SA, int i, int size) {
int j, k;
int v;
int c, d, e;
for(v = SA[i], c = ISAd[v]; (j = 2 * i + 1) < size; SA[i] = SA[k], i = k) {
d = ISAd[SA[k = j++]];
if(d < (e = ISAd[SA[j]])) { k = j; d = e; }
if(d <= c) { break; }
}
SA[i] = v;
}
/* Simple top-down heapsort. */
static
void
tr_heapsort(const int *ISAd, int *SA, int size) {
int i, m;
int t;
m = size;
if((size % 2) == 0) {
m--;
if(ISAd[SA[m / 2]] < ISAd[SA[m]]) { SWAP(SA[m], SA[m / 2]); }
}
for(i = m / 2 - 1; 0 <= i; --i) { tr_fixdown(ISAd, SA, i, m); }
if((size % 2) == 0) { SWAP(SA[0], SA[m]); tr_fixdown(ISAd, SA, 0, m); }
for(i = m - 1; 0 < i; --i) {
t = SA[0], SA[0] = SA[i];
tr_fixdown(ISAd, SA, 0, i);
SA[i] = t;
}
}
/*---------------------------------------------------------------------------*/
/* Returns the median of three elements. */
static INLINE
int *
tr_median3(const int *ISAd, int *v1, int *v2, int *v3) {
int *t;
if(ISAd[*v1] > ISAd[*v2]) { SWAP(v1, v2); }
if(ISAd[*v2] > ISAd[*v3]) {
if(ISAd[*v1] > ISAd[*v3]) { return v1; }
else { return v3; }
}
return v2;
}
/* Returns the median of five elements. */
static INLINE
int *
tr_median5(const int *ISAd,
int *v1, int *v2, int *v3, int *v4, int *v5) {
int *t;
if(ISAd[*v2] > ISAd[*v3]) { SWAP(v2, v3); }
if(ISAd[*v4] > ISAd[*v5]) { SWAP(v4, v5); }
if(ISAd[*v2] > ISAd[*v4]) { SWAP(v2, v4); SWAP(v3, v5); }
if(ISAd[*v1] > ISAd[*v3]) { SWAP(v1, v3); }
if(ISAd[*v1] > ISAd[*v4]) { SWAP(v1, v4); SWAP(v3, v5); }
if(ISAd[*v3] > ISAd[*v4]) { return v4; }
return v3;
}
/* Returns the pivot element. */
static INLINE
int *
tr_pivot(const int *ISAd, int *first, int *last) {
int *middle;
int t;
t = last - first;
middle = first + t / 2;
if(t <= 512) {
if(t <= 32) {
return tr_median3(ISAd, first, middle, last - 1);
} else {
t >>= 2;
return tr_median5(ISAd, first, first + t, middle, last - 1 - t, last - 1);
}
}
t >>= 3;
first = tr_median3(ISAd, first, first + t, first + (t << 1));
middle = tr_median3(ISAd, middle - t, middle, middle + t);
last = tr_median3(ISAd, last - 1 - (t << 1), last - 1 - t, last - 1);
return tr_median3(ISAd, first, middle, last);
}
/*---------------------------------------------------------------------------*/
typedef struct _trbudget_t trbudget_t;
struct _trbudget_t {
int chance;
int remain;
int incval;
int count;
};
static INLINE
void
trbudget_init(trbudget_t *budget, int chance, int incval) {
budget->chance = chance;
budget->remain = budget->incval = incval;
}
static INLINE
int
trbudget_check(trbudget_t *budget, int size) {
if(size <= budget->remain) { budget->remain -= size; return 1; }
if(budget->chance == 0) { budget->count += size; return 0; }
budget->remain += budget->incval - size;
budget->chance -= 1;
return 1;
}
/*---------------------------------------------------------------------------*/
static INLINE
void
tr_partition(const int *ISAd,
int *first, int *middle, int *last,
int **pa, int **pb, int v) {
int *a, *b, *c, *d, *e, *f;
int t, s;
int x = 0;
for(b = middle - 1; (++b < last) && ((x = ISAd[*b]) == v);) { }
if(((a = b) < last) && (x < v)) {
for(; (++b < last) && ((x = ISAd[*b]) <= v);) {
if(x == v) { SWAP(*b, *a); ++a; }
}
}
for(c = last; (b < --c) && ((x = ISAd[*c]) == v);) { }
if((b < (d = c)) && (x > v)) {
for(; (b < --c) && ((x = ISAd[*c]) >= v);) {
if(x == v) { SWAP(*c, *d); --d; }
}
}
for(; b < c;) {
SWAP(*b, *c);
for(; (++b < c) && ((x = ISAd[*b]) <= v);) {
if(x == v) { SWAP(*b, *a); ++a; }
}
for(; (b < --c) && ((x = ISAd[*c]) >= v);) {
if(x == v) { SWAP(*c, *d); --d; }
}
}
if(a <= d) {
c = b - 1;
if((s = a - first) > (t = b - a)) { s = t; }
for(e = first, f = b - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }
if((s = d - c) > (t = last - d - 1)) { s = t; }
for(e = b, f = last - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }
first += (b - a), last -= (d - c);
}
*pa = first, *pb = last;
}
static
void
tr_copy(int *ISA, const int *SA,
int *first, int *a, int *b, int *last,
int depth) {
/* sort suffixes of middle partition
by using sorted order of suffixes of left and right partition. */
int *c, *d, *e;
int s, v;
v = b - SA - 1;
for(c = first, d = a - 1; c <= d; ++c) {
if((0 <= (s = *c - depth)) && (ISA[s] == v)) {
*++d = s;
ISA[s] = d - SA;
}
}
for(c = last - 1, e = d + 1, d = b; e < d; --c) {
if((0 <= (s = *c - depth)) && (ISA[s] == v)) {
*--d = s;
ISA[s] = d - SA;
}
}
}
static
void
tr_partialcopy(int *ISA, const int *SA,
int *first, int *a, int *b, int *last,
int depth) {
int *c, *d, *e;
int s, v;
int rank, lastrank, newrank = -1;
v = b - SA - 1;
lastrank = -1;
for(c = first, d = a - 1; c <= d; ++c) {
if((0 <= (s = *c - depth)) && (ISA[s] == v)) {
*++d = s;
rank = ISA[s + depth];
if(lastrank != rank) { lastrank = rank; newrank = d - SA; }
ISA[s] = newrank;
}
}
lastrank = -1;
for(e = d; first <= e; --e) {
rank = ISA[*e];
if(lastrank != rank) { lastrank = rank; newrank = e - SA; }
if(newrank != rank) { ISA[*e] = newrank; }
}
lastrank = -1;
for(c = last - 1, e = d + 1, d = b; e < d; --c) {
if((0 <= (s = *c - depth)) && (ISA[s] == v)) {
*--d = s;
rank = ISA[s + depth];
if(lastrank != rank) { lastrank = rank; newrank = d - SA; }
ISA[s] = newrank;
}
}
}
static
void
tr_introsort(int *ISA, const int *ISAd,
int *SA, int *first, int *last,
trbudget_t *budget) {
#define STACK_SIZE TR_STACKSIZE
struct { const int *a; int *b, *c; int d, e; }stack[STACK_SIZE];
int *a, *b, *c;
int t;
int v, x = 0;
int incr = ISAd - ISA;
int limit, next;
int ssize, trlink = -1;
for(ssize = 0, limit = tr_ilg(last - first);;) {
if(limit < 0) {
if(limit == -1) {
/* tandem repeat partition */
tr_partition(ISAd - incr, first, first, last, &a, &b, last - SA - 1);
/* update ranks */
if(a < last) {
for(c = first, v = a - SA - 1; c < a; ++c) { ISA[*c] = v; }
}
if(b < last) {
for(c = a, v = b - SA - 1; c < b; ++c) { ISA[*c] = v; }
}
/* push */
if(1 < (b - a)) {
STACK_PUSH5(NULL, a, b, 0, 0);
STACK_PUSH5(ISAd - incr, first, last, -2, trlink);
trlink = ssize - 2;
}
if((a - first) <= (last - b)) {
if(1 < (a - first)) {
STACK_PUSH5(ISAd, b, last, tr_ilg(last - b), trlink);
last = a, limit = tr_ilg(a - first);
} else if(1 < (last - b)) {
first = b, limit = tr_ilg(last - b);
} else {
STACK_POP5(ISAd, first, last, limit, trlink);
}
} else {
if(1 < (last - b)) {
STACK_PUSH5(ISAd, first, a, tr_ilg(a - first), trlink);
first = b, limit = tr_ilg(last - b);
} else if(1 < (a - first)) {
last = a, limit = tr_ilg(a - first);
} else {
STACK_POP5(ISAd, first, last, limit, trlink);
}
}
} else if(limit == -2) {
/* tandem repeat copy */
a = stack[--ssize].b, b = stack[ssize].c;
if(stack[ssize].d == 0) {
tr_copy(ISA, SA, first, a, b, last, ISAd - ISA);
} else {
if(0 <= trlink) { stack[trlink].d = -1; }
tr_partialcopy(ISA, SA, first, a, b, last, ISAd - ISA);
}
STACK_POP5(ISAd, first, last, limit, trlink);
} else {
/* sorted partition */
if(0 <= *first) {
a = first;
do { ISA[*a] = a - SA; } while((++a < last) && (0 <= *a));
first = a;
}
if(first < last) {
a = first; do { *a = ~*a; } while(*++a < 0);
next = (ISA[*a] != ISAd[*a]) ? tr_ilg(a - first + 1) : -1;
if(++a < last) { for(b = first, v = a - SA - 1; b < a; ++b) { ISA[*b] = v; } }
/* push */
if(trbudget_check(budget, a - first)) {
if((a - first) <= (last - a)) {
STACK_PUSH5(ISAd, a, last, -3, trlink);
ISAd += incr, last = a, limit = next;
} else {
if(1 < (last - a)) {
STACK_PUSH5(ISAd + incr, first, a, next, trlink);
first = a, limit = -3;
} else {
ISAd += incr, last = a, limit = next;
}
}
} else {
if(0 <= trlink) { stack[trlink].d = -1; }
if(1 < (last - a)) {
first = a, limit = -3;
} else {
STACK_POP5(ISAd, first, last, limit, trlink);
}
}
} else {
STACK_POP5(ISAd, first, last, limit, trlink);
}
}
continue;
}
if((last - first) <= TR_INSERTIONSORT_THRESHOLD) {
tr_insertionsort(ISAd, first, last);
limit = -3;
continue;
}
if(limit-- == 0) {
tr_heapsort(ISAd, first, last - first);
for(a = last - 1; first < a; a = b) {
for(x = ISAd[*a], b = a - 1; (first <= b) && (ISAd[*b] == x); --b) { *b = ~*b; }
}
limit = -3;
continue;
}
/* choose pivot */
a = tr_pivot(ISAd, first, last);
SWAP(*first, *a);
v = ISAd[*first];
/* partition */
tr_partition(ISAd, first, first + 1, last, &a, &b, v);
if((last - first) != (b - a)) {
next = (ISA[*a] != v) ? tr_ilg(b - a) : -1;
/* update ranks */
for(c = first, v = a - SA - 1; c < a; ++c) { ISA[*c] = v; }
if(b < last) { for(c = a, v = b - SA - 1; c < b; ++c) { ISA[*c] = v; } }
/* push */
if((1 < (b - a)) && (trbudget_check(budget, b - a))) {
if((a - first) <= (last - b)) {
if((last - b) <= (b - a)) {
if(1 < (a - first)) {
STACK_PUSH5(ISAd + incr, a, b, next, trlink);
STACK_PUSH5(ISAd, b, last, limit, trlink);
last = a;
} else if(1 < (last - b)) {
STACK_PUSH5(ISAd + incr, a, b, next, trlink);
first = b;
} else {
ISAd += incr, first = a, last = b, limit = next;
}
} else if((a - first) <= (b - a)) {
if(1 < (a - first)) {
STACK_PUSH5(ISAd, b, last, limit, trlink);
STACK_PUSH5(ISAd + incr, a, b, next, trlink);
last = a;
} else {
STACK_PUSH5(ISAd, b, last, limit, trlink);
ISAd += incr, first = a, last = b, limit = next;
}
} else {
STACK_PUSH5(ISAd, b, last, limit, trlink);
STACK_PUSH5(ISAd, first, a, limit, trlink);
ISAd += incr, first = a, last = b, limit = next;
}
} else {
if((a - first) <= (b - a)) {
if(1 < (last - b)) {
STACK_PUSH5(ISAd + incr, a, b, next, trlink);
STACK_PUSH5(ISAd, first, a, limit, trlink);
first = b;
} else if(1 < (a - first)) {
STACK_PUSH5(ISAd + incr, a, b, next, trlink);
last = a;
} else {
ISAd += incr, first = a, last = b, limit = next;
}
} else if((last - b) <= (b - a)) {
if(1 < (last - b)) {
STACK_PUSH5(ISAd, first, a, limit, trlink);
STACK_PUSH5(ISAd + incr, a, b, next, trlink);
first = b;
} else {
STACK_PUSH5(ISAd, first, a, limit, trlink);
ISAd += incr, first = a, last = b, limit = next;
}
} else {
STACK_PUSH5(ISAd, first, a, limit, trlink);
STACK_PUSH5(ISAd, b, last, limit, trlink);
ISAd += incr, first = a, last = b, limit = next;
}
}
} else {
if((1 < (b - a)) && (0 <= trlink)) { stack[trlink].d = -1; }
if((a - first) <= (last - b)) {
if(1 < (a - first)) {
STACK_PUSH5(ISAd, b, last, limit, trlink);
last = a;
} else if(1 < (last - b)) {
first = b;
} else {
STACK_POP5(ISAd, first, last, limit, trlink);
}
} else {
if(1 < (last - b)) {
STACK_PUSH5(ISAd, first, a, limit, trlink);
first = b;
} else if(1 < (a - first)) {
last = a;
} else {
STACK_POP5(ISAd, first, last, limit, trlink);
}
}
}
} else {
if(trbudget_check(budget, last - first)) {
limit = tr_ilg(last - first), ISAd += incr;
} else {
if(0 <= trlink) { stack[trlink].d = -1; }
STACK_POP5(ISAd, first, last, limit, trlink);
}
}
}
#undef STACK_SIZE
}
/*---------------------------------------------------------------------------*/
/* Tandem repeat sort */
static
void
trsort(int *ISA, int *SA, int n, int depth) {
int *ISAd;
int *first, *last;
trbudget_t budget;
int t, skip, unsorted;
trbudget_init(&budget, tr_ilg(n) * 2 / 3, n);
/* trbudget_init(&budget, tr_ilg(n) * 3 / 4, n); */
for(ISAd = ISA + depth; -n < *SA; ISAd += ISAd - ISA) {
first = SA;
skip = 0;
unsorted = 0;
do {
if((t = *first) < 0) { first -= t; skip += t; }
else {
if(skip != 0) { *(first + skip) = skip; skip = 0; }
last = SA + ISA[t] + 1;
if(1 < (last - first)) {
budget.count = 0;
tr_introsort(ISA, ISAd, SA, first, last, &budget);
if(budget.count != 0) { unsorted += budget.count; }
else { skip = first - last; }
} else if((last - first) == 1) {
skip = -1;
}
first = last;
}
} while(first < (SA + n));
if(skip != 0) { *(first + skip) = skip; }
if(unsorted == 0) { break; }
}
}
/*---------------------------------------------------------------------------*/
/* Sorts suffixes of type B*. */
static
int
sort_typeBstar(const unsigned char *T, int *SA,
int *bucket_A, int *bucket_B,
int n) {
int *PAb, *ISAb, *buf;
#ifdef _OPENMP
int *curbuf;
int l;
#endif
int i, j, k, t, m, bufsize;
int c0, c1;
#ifdef _OPENMP
int d0, d1;
int tmp;
#endif
/* Initialize bucket arrays. */
for(i = 0; i < BUCKET_A_SIZE; ++i) { bucket_A[i] = 0; }
for(i = 0; i < BUCKET_B_SIZE; ++i) { bucket_B[i] = 0; }
/* Count the number of occurrences of the first one or two characters of each
type A, B and B* suffix. Moreover, store the beginning position of all
type B* suffixes into the array SA. */
for(i = n - 1, m = n, c0 = T[n - 1]; 0 <= i;) {
/* type A suffix. */
do { ++BUCKET_A(c1 = c0); } while((0 <= --i) && ((c0 = T[i]) >= c1));
if(0 <= i) {
/* type B* suffix. */
++BUCKET_BSTAR(c0, c1);
SA[--m] = i;
/* type B suffix. */
for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) {
++BUCKET_B(c0, c1);
}
}
}
m = n - m;
/*
note:
A type B* suffix is lexicographically smaller than a type B suffix that
begins with the same first two characters.
*/
/* Calculate the index of start/end point of each bucket. */
for(c0 = 0, i = 0, j = 0; c0 < ALPHABET_SIZE; ++c0) {
t = i + BUCKET_A(c0);
BUCKET_A(c0) = i + j; /* start point */
i = t + BUCKET_B(c0, c0);
for(c1 = c0 + 1; c1 < ALPHABET_SIZE; ++c1) {
j += BUCKET_BSTAR(c0, c1);
BUCKET_BSTAR(c0, c1) = j; /* end point */
i += BUCKET_B(c0, c1);
}
}
if(0 < m) {
/* Sort the type B* suffixes by their first two characters. */
PAb = SA + n - m; ISAb = SA + m;
for(i = m - 2; 0 <= i; --i) {
t = PAb[i], c0 = T[t], c1 = T[t + 1];
SA[--BUCKET_BSTAR(c0, c1)] = i;
}
t = PAb[m - 1], c0 = T[t], c1 = T[t + 1];
SA[--BUCKET_BSTAR(c0, c1)] = m - 1;
/* Sort the type B* substrings using sssort. */
#ifdef _OPENMP
tmp = omp_get_max_threads();
buf = SA + m, bufsize = (n - (2 * m)) / tmp;
c0 = ALPHABET_SIZE - 2, c1 = ALPHABET_SIZE - 1, j = m;
#pragma omp parallel default(shared) private(curbuf, k, l, d0, d1, tmp)
{
tmp = omp_get_thread_num();
curbuf = buf + tmp * bufsize;
k = 0;
for(;;) {
#pragma omp critical(sssort_lock)
{
if(0 < (l = j)) {
d0 = c0, d1 = c1;
do {
k = BUCKET_BSTAR(d0, d1);
if(--d1 <= d0) {
d1 = ALPHABET_SIZE - 1;
if(--d0 < 0) { break; }
}
} while(((l - k) <= 1) && (0 < (l = k)));
c0 = d0, c1 = d1, j = k;
}
}
if(l == 0) { break; }
sssort(T, PAb, SA + k, SA + l,
curbuf, bufsize, 2, n, *(SA + k) == (m - 1));
}
}
#else
buf = SA + m, bufsize = n - (2 * m);
for(c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) {
for(c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) {
i = BUCKET_BSTAR(c0, c1);
if(1 < (j - i)) {
sssort(T, PAb, SA + i, SA + j,
buf, bufsize, 2, n, *(SA + i) == (m - 1));
}
}
}
#endif
/* Compute ranks of type B* substrings. */
for(i = m - 1; 0 <= i; --i) {
if(0 <= SA[i]) {
j = i;
do { ISAb[SA[i]] = i; } while((0 <= --i) && (0 <= SA[i]));
SA[i + 1] = i - j;
if(i <= 0) { break; }
}
j = i;
do { ISAb[SA[i] = ~SA[i]] = j; } while(SA[--i] < 0);
ISAb[SA[i]] = j;
}
/* Construct the inverse suffix array of type B* suffixes using trsort. */
trsort(ISAb, SA, m, 1);
/* Set the sorted order of tyoe B* suffixes. */
for(i = n - 1, j = m, c0 = T[n - 1]; 0 <= i;) {
for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) >= c1); --i, c1 = c0) { }
if(0 <= i) {
t = i;
for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) { }
SA[ISAb[--j]] = ((t == 0) || (1 < (t - i))) ? t : ~t;
}
}
/* Calculate the index of start/end point of each bucket. */
BUCKET_B(ALPHABET_SIZE - 1, ALPHABET_SIZE - 1) = n; /* end point */
for(c0 = ALPHABET_SIZE - 2, k = m - 1; 0 <= c0; --c0) {
i = BUCKET_A(c0 + 1) - 1;
for(c1 = ALPHABET_SIZE - 1; c0 < c1; --c1) {
t = i - BUCKET_B(c0, c1);
BUCKET_B(c0, c1) = i; /* end point */
/* Move all type B* suffixes to the correct position. */
for(i = t, j = BUCKET_BSTAR(c0, c1);
j <= k;
--i, --k) { SA[i] = SA[k]; }
}
BUCKET_BSTAR(c0, c0 + 1) = i - BUCKET_B(c0, c0) + 1; /* start point */
BUCKET_B(c0, c0) = i; /* end point */
}
}
return m;
}
/* Constructs the suffix array by using the sorted order of type B* suffixes. */
static
void
construct_SA(const unsigned char *T, int *SA,
int *bucket_A, int *bucket_B,
int n, int m) {
int *i, *j, *k;
int s;
int c0, c1, c2;
if(0 < m) {
/* Construct the sorted order of type B suffixes by using
the sorted order of type B* suffixes. */
for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {
/* Scan the suffix array from right to left. */
for(i = SA + BUCKET_BSTAR(c1, c1 + 1),
j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1;
i <= j;
--j) {
if(0 < (s = *j)) {
assert(T[s] == c1);
assert(((s + 1) < n) && (T[s] <= T[s + 1]));
assert(T[s - 1] <= T[s]);
*j = ~s;
c0 = T[--s];
if((0 < s) && (T[s - 1] > c0)) { s = ~s; }
if(c0 != c2) {
if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
k = SA + BUCKET_B(c2 = c0, c1);
}
assert(k < j);
*k-- = s;
} else {
assert(((s == 0) && (T[s] == c1)) || (s < 0));
*j = ~s;
}
}
}
}
/* Construct the suffix array by using
the sorted order of type B suffixes. */
k = SA + BUCKET_A(c2 = T[n - 1]);
*k++ = (T[n - 2] < c2) ? ~(n - 1) : (n - 1);
/* Scan the suffix array from left to right. */
for(i = SA, j = SA + n; i < j; ++i) {
if(0 < (s = *i)) {
assert(T[s - 1] >= T[s]);
c0 = T[--s];
if((s == 0) || (T[s - 1] < c0)) { s = ~s; }
if(c0 != c2) {
BUCKET_A(c2) = k - SA;
k = SA + BUCKET_A(c2 = c0);
}
assert(i < k);
*k++ = s;
} else {
assert(s < 0);
*i = ~s;
}
}
}
/* Constructs the burrows-wheeler transformed string directly
by using the sorted order of type B* suffixes. */
static
int
construct_BWT(const unsigned char *T, int *SA,
int *bucket_A, int *bucket_B,
int n, int m) {
int *i, *j, *k, *orig;
int s;
int c0, c1, c2;
if(0 < m) {
/* Construct the sorted order of type B suffixes by using
the sorted order of type B* suffixes. */
for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {
/* Scan the suffix array from right to left. */
for(i = SA + BUCKET_BSTAR(c1, c1 + 1),
j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1;
i <= j;
--j) {
if(0 < (s = *j)) {
assert(T[s] == c1);
assert(((s + 1) < n) && (T[s] <= T[s + 1]));
assert(T[s - 1] <= T[s]);
c0 = T[--s];
*j = ~((int)c0);
if((0 < s) && (T[s - 1] > c0)) { s = ~s; }
if(c0 != c2) {
if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
k = SA + BUCKET_B(c2 = c0, c1);
}
assert(k < j);
*k-- = s;
} else if(s != 0) {
*j = ~s;
#ifndef NDEBUG
} else {
assert(T[s] == c1);
#endif
}
}
}
}
/* Construct the BWTed string by using
the sorted order of type B suffixes. */
k = SA + BUCKET_A(c2 = T[n - 1]);
*k++ = (T[n - 2] < c2) ? ~((int)T[n - 2]) : (n - 1);
/* Scan the suffix array from left to right. */
for(i = SA, j = SA + n, orig = SA; i < j; ++i) {
if(0 < (s = *i)) {
assert(T[s - 1] >= T[s]);
c0 = T[--s];
*i = c0;
if((0 < s) && (T[s - 1] < c0)) { s = ~((int)T[s - 1]); }
if(c0 != c2) {
BUCKET_A(c2) = k - SA;
k = SA + BUCKET_A(c2 = c0);
}
assert(i < k);
*k++ = s;
} else if(s != 0) {
*i = ~s;
} else {
orig = i;
}
}
return orig - SA;
}
/*---------------------------------------------------------------------------*/
/*- Function -*/
int
divsufsort(const unsigned char *T, int *SA, int n) {
int *bucket_A, *bucket_B;
int m;
int err = 0;
/* Check arguments. */
if((T == NULL) || (SA == NULL) || (n < 0)) { return -1; }
else if(n == 0) { return 0; }
else if(n == 1) { SA[0] = 0; return 0; }
else if(n == 2) { m = (T[0] < T[1]); SA[m ^ 1] = 0, SA[m] = 1; return 0; }
bucket_A = (int *)malloc(BUCKET_A_SIZE * sizeof(int));
bucket_B = (int *)malloc(BUCKET_B_SIZE * sizeof(int));
/* Suffixsort. */
if((bucket_A != NULL) && (bucket_B != NULL)) {
m = sort_typeBstar(T, SA, bucket_A, bucket_B, n);
construct_SA(T, SA, bucket_A, bucket_B, n, m);
} else {
err = -2;
}
free(bucket_B);
free(bucket_A);
return err;
}
int
divbwt(const unsigned char *T, unsigned char *U, int *A, int n) {
int *B;
int *bucket_A, *bucket_B;
int m, pidx, i;
/* Check arguments. */
if((T == NULL) || (U == NULL) || (n < 0)) { return -1; }
else if(n <= 1) { if(n == 1) { U[0] = T[0]; } return n; }
if((B = A) == NULL) { B = (int *)malloc((size_t)(n + 1) * sizeof(int)); }
bucket_A = (int *)malloc(BUCKET_A_SIZE * sizeof(int));
bucket_B = (int *)malloc(BUCKET_B_SIZE * sizeof(int));
/* Burrows-Wheeler Transform. */
if((B != NULL) && (bucket_A != NULL) && (bucket_B != NULL)) {
m = sort_typeBstar(T, B, bucket_A, bucket_B, n);
pidx = construct_BWT(T, B, bucket_A, bucket_B, n, m);
/* Copy to output string. */
U[0] = T[n - 1];
for(i = 0; i < pidx; ++i) { U[i + 1] = (unsigned char)B[i]; }
for(i += 1; i < n; ++i) { U[i] = (unsigned char)B[i]; }
pidx += 1;
} else {
pidx = -2;
}
free(bucket_B);
free(bucket_A);
if(A == NULL) { free(B); }
return pidx;
}
|
5125.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "atax.h"
/* Array initialization. */
static
void init_array (int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny))
{
int i, j;
for (i = 0; i < ny; i++)
x[i] = i * M_PI;
for (i = 0; i < nx; i++)
for (j = 0; j < ny; j++)
A[i][j] = ((DATA_TYPE) i*(j+1)) / nx;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int nx,
DATA_TYPE POLYBENCH_1D(y,NX,nx))
{
int i;
for (i = 0; i < nx; i++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]);
if (i % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_atax(int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny),
DATA_TYPE POLYBENCH_1D(y,NY,ny),
DATA_TYPE POLYBENCH_1D(tmp,NX,nx))
{
int i, j;
#pragma scop
#pragma omp parallel num_threads(2)
{
#pragma omp for schedule(dynamic, 8)
for (i = 0; i < _PB_NY; i++)
y[i] = 0;
#pragma omp for private (j) schedule(dynamic, 8)
for (i = 0; i < _PB_NX; i++)
{
tmp[i] = 0;
for (j = 0; j < _PB_NY; j++)
tmp[i] = tmp[i] + A[i][j] * x[j];
for (j = 0; j < _PB_NY; j++)
y[j] = y[j] + A[i][j] * tmp[i];
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int nx = NX;
int ny = NY;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NX, NY, nx, ny);
POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(tmp, DATA_TYPE, NX, nx);
/* Initialize array(s). */
init_array (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_atax (nx, ny,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(x),
POLYBENCH_ARRAY(y),
POLYBENCH_ARRAY(tmp));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(nx, POLYBENCH_ARRAY(y)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(x);
POLYBENCH_FREE_ARRAY(y);
POLYBENCH_FREE_ARRAY(tmp);
return 0;
}
|
mixed_tentusscher_myo_epi_2004_S2_9.c | // Scenario 1 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium)
// (AP + max:dvdt)
#include <stdio.h>
#include "mixed_tentusscher_myo_epi_2004_S2_9.h"
GET_CELL_MODEL_DATA(init_cell_model_data)
{
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu)
{
static bool first_call = true;
if(first_call)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n");
first_call = false;
}
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
// Initial conditions for TenTusscher myocardium
if (mapping[sv_id] == 0)
{
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
// Initial conditions for TenTusscher epicardium
else
{
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5928834149027,0.00128330373181304,0.780307779232330,0.780055018733705,0.000174145408126877,0.485346186618098,0.00293516207326794,0.999998356983063,1.92561090443674e-08,1.88487092529666e-05,0.999772824420775,1.00713739870886,0.999995945796599,4.41779013989042e-05,0.492864370358447,10.0629845292030,139.540308692868}; for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu)
{
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++)
{
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = (uint32_t )i;
for (int j = 0; j < num_steps; ++j)
{
if (mapping[i] == 0)
solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]);
else
solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_myo(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_epi(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.2678658188600,0.000185186617039757,0.000132124407111086,0.000515886761168309,0.250188569257203,0.153314251022838,0.158501989253313,4.69616330756314,0.0144678840085242,1.89285514296658,1089.26406046390,0.000356678402399680,0.279508931563235,0.0134701701310225,0.00380118343938842,3.03411014370249e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
EntityInitializer.h | // Copyright (c) 2004-2022 Tomáš Oberhuber et al.
//
// This file is part of TNL - Template Numerical Library (https://tnl-project.org/)
//
// SPDX-License-Identifier: MIT
/***
* Authors:
* Oberhuber Tomas, tomas.oberhuber@fjfi.cvut.cz
* Zabka Vitezslav, zabkav@gmail.com
*/
#pragma once
#include <noa/3rdparty/tnl-noa/src/TNL/Meshes/MeshDetails/initializer/EntitySeed.h>
#include <noa/3rdparty/tnl-noa/src/TNL/Meshes/MeshDetails/initializer/SubentitySeedsCreator.h>
#include <noa/3rdparty/tnl-noa/src/TNL/Atomic.h>
#include <noa/3rdparty/tnl-noa/src/TNL/Algorithms/AtomicOperations.h>
namespace noa::TNL {
namespace Meshes {
template< typename MeshConfig >
class Initializer;
template< typename MeshConfig,
typename SubdimensionTag,
typename SuperdimensionTag,
typename SuperentityTopology =
typename MeshTraits< MeshConfig >::template EntityTraits< SuperdimensionTag::value >::EntityTopology,
// storage in the superentity
bool SubentityStorage = MeshConfig::subentityStorage( SuperdimensionTag::value, SubdimensionTag::value ),
// storage in the subentity
bool SuperentityStorage = MeshConfig::superentityStorage( SubdimensionTag::value, SuperdimensionTag::value ),
// necessary to disambiguate the stop condition for specializations
bool valid_dimension = ! std::is_same< SubdimensionTag, SuperdimensionTag >::value >
class EntityInitializerLayer;
template< typename MeshConfig,
typename EntityTopology,
bool SubvertexStorage = MeshConfig::subentityStorage( EntityTopology::dimension, 0 ) >
class EntityInitializer : public EntityInitializerLayer< MeshConfig,
DimensionTag< EntityTopology::dimension >,
DimensionTag< MeshTraits< MeshConfig >::meshDimension > >
{
using BaseType = EntityInitializerLayer< MeshConfig,
DimensionTag< EntityTopology::dimension >,
DimensionTag< MeshTraits< MeshConfig >::meshDimension > >;
using MeshTraitsType = MeshTraits< MeshConfig >;
using EntityTraitsType = typename MeshTraitsType::template EntityTraits< EntityTopology::dimension >;
using GlobalIndexType = typename MeshTraitsType::GlobalIndexType;
using LocalIndexType = typename MeshTraitsType::LocalIndexType;
using SeedType = EntitySeed< MeshConfig, EntityTopology >;
using InitializerType = Initializer< MeshConfig >;
using NeighborCountsArray = typename MeshTraitsType::NeighborCountsArray;
using SeedMatrixType = typename EntityTraitsType::SeedMatrixType;
public:
static void
initSubvertexMatrix( NeighborCountsArray& capacities, InitializerType& initializer )
{
initializer.template initSubentityMatrix< EntityTopology::dimension, 0 >( capacities );
}
static void
initSubvertexMatrix( SeedMatrixType& seeds, InitializerType& initializer )
{
auto& subvertexMatrix = initializer.template getSubentitiesMatrix< EntityTopology::dimension, 0 >();
subvertexMatrix = std::move( seeds.getMatrix() );
initializer.template initSubentitiesCounts< EntityTopology::dimension, 0 >( seeds.getEntityCornerCounts() );
}
static void
initEntity( const GlobalIndexType entityIndex, const SeedType& entitySeed, InitializerType& initializer )
{
// this is necessary if we want to use existing entities instead of intermediate seeds to create subentity seeds
for( LocalIndexType i = 0; i < entitySeed.getCornerIds().getSize(); i++ )
initializer.template setSubentityIndex< EntityTopology::dimension, 0 >(
entityIndex, i, entitySeed.getCornerIds()[ i ] );
}
};
template< typename MeshConfig, typename EntityTopology >
class EntityInitializer< MeshConfig, EntityTopology, false >
: public EntityInitializerLayer< MeshConfig,
DimensionTag< EntityTopology::dimension >,
DimensionTag< MeshTraits< MeshConfig >::meshDimension > >
{
using MeshTraitsType = MeshTraits< MeshConfig >;
using EntityTraitsType = typename MeshTraitsType::template EntityTraits< EntityTopology::dimension >;
using GlobalIndexType = typename MeshTraitsType::GlobalIndexType;
using SeedType = EntitySeed< MeshConfig, EntityTopology >;
using InitializerType = Initializer< MeshConfig >;
using NeighborCountsArray = typename MeshTraitsType::NeighborCountsArray;
using SeedMatrixType = typename EntityTraitsType::SeedMatrixType;
public:
static void
initSubvertexMatrix( const NeighborCountsArray& capacities, InitializerType& initializer )
{}
static void
initSubvertexMatrix( SeedMatrixType& seeds, InitializerType& initializer )
{}
static void
initEntity( const GlobalIndexType entityIndex, const SeedType& entitySeed, InitializerType& initializer )
{}
};
/****
* Mesh entity initializer layer with specializations
*
* SUBENTITY STORAGE SUPERENTITY STORAGE
* TRUE TRUE
*/
template< typename MeshConfig, typename SubdimensionTag, typename SuperdimensionTag, typename SuperentityTopology >
class EntityInitializerLayer< MeshConfig, SubdimensionTag, SuperdimensionTag, SuperentityTopology, true, true, true >
: public EntityInitializerLayer< MeshConfig, SubdimensionTag, typename SuperdimensionTag::Decrement >
{
using BaseType = EntityInitializerLayer< MeshConfig, SubdimensionTag, typename SuperdimensionTag::Decrement >;
using InitializerType = Initializer< MeshConfig >;
using MeshType = typename InitializerType::MeshType;
using MeshTraitsType = MeshTraits< MeshConfig >;
using GlobalIndexType = typename MeshTraitsType::GlobalIndexType;
using LocalIndexType = typename MeshTraitsType::LocalIndexType;
using SubentityTraitsType = typename MeshTraitsType::template EntityTraits< SubdimensionTag::value >;
using SubentityTopology = typename SubentityTraitsType::EntityTopology;
using SuperentityTraitsType = typename MeshTraitsType::template EntityTraits< SuperdimensionTag::value >;
using SubentitySeedsCreatorType = SubentitySeedsCreator< MeshConfig, SuperentityTopology, SubdimensionTag >;
using SuperentityMatrixType = typename MeshTraitsType::SuperentityMatrixType;
using NeighborCountsArray = typename MeshTraitsType::NeighborCountsArray;
using SeedType = EntitySeed< MeshConfig, SubentityTopology >;
public:
static void
initSuperentities( InitializerType& meshInitializer, MeshType& mesh )
{
// std::cout << " Initiating superentities with dimension " << SuperdimensionTag::value << " for subentities with
// dimension " << SubdimensionTag::value << " ... " << std::endl;
const GlobalIndexType subentitiesCount = mesh.template getEntitiesCount< SubdimensionTag::value >();
const GlobalIndexType superentitiesCount = mesh.template getEntitiesCount< SuperdimensionTag::value >();
if( SubdimensionTag::value > 0 || std::is_same< SuperentityTopology, Topologies::Polyhedron >::value ) {
NeighborCountsArray capacities( superentitiesCount );
Algorithms::ParallelFor< Devices::Host >::exec( GlobalIndexType{ 0 },
superentitiesCount,
[ & ]( GlobalIndexType superentityIndex )
{
capacities[ superentityIndex ] =
SubentitySeedsCreatorType::getSubentitiesCount(
meshInitializer, mesh, superentityIndex );
} );
meshInitializer.template initSubentityMatrix< SuperdimensionTag::value, SubdimensionTag::value >( capacities,
subentitiesCount );
}
// counter for superentities of each subentity
auto& superentitiesCounts =
meshInitializer.template getSuperentitiesCountsArray< SubdimensionTag::value, SuperdimensionTag::value >();
superentitiesCounts.setSize( subentitiesCount );
superentitiesCounts.setValue( 0 );
Algorithms::ParallelFor< Devices::Host >::exec(
GlobalIndexType{ 0 },
superentitiesCount,
[ & ]( GlobalIndexType superentityIndex )
{
LocalIndexType i = 0;
SubentitySeedsCreatorType::iterate(
meshInitializer,
mesh,
superentityIndex,
[ & ]( SeedType& seed )
{
const GlobalIndexType subentityIndex = meshInitializer.findEntitySeedIndex( seed );
// Subentity indices for SubdimensionTag::value == 0 of non-polyhedral meshes were already set up from seeds
if( SubdimensionTag::value > 0 || std::is_same< SuperentityTopology, Topologies::Polyhedron >::value )
meshInitializer.template setSubentityIndex< SuperdimensionTag::value, SubdimensionTag::value >(
superentityIndex, i++, subentityIndex );
Algorithms::AtomicOperations< Devices::Host >::add( superentitiesCounts[ subentityIndex ],
LocalIndexType{ 1 } );
} );
} );
// allocate superentities storage
SuperentityMatrixType& matrix =
meshInitializer.template getSuperentitiesMatrix< SubdimensionTag::value, SuperdimensionTag::value >();
matrix.setDimensions( subentitiesCount, superentitiesCount );
matrix.setRowCapacities( superentitiesCounts );
superentitiesCounts.setValue( 0 );
for( GlobalIndexType superentityIndex = 0; superentityIndex < superentitiesCount; superentityIndex++ ) {
for( LocalIndexType i = 0;
i < mesh.template getSubentitiesCount< SuperdimensionTag::value, SubdimensionTag::value >( superentityIndex );
i++ )
{
const GlobalIndexType subentityIndex =
mesh.template getSubentityIndex< SuperdimensionTag::value, SubdimensionTag::value >( superentityIndex, i );
auto row = matrix.getRow( subentityIndex );
row.setElement( superentitiesCounts[ subentityIndex ]++, superentityIndex, true );
}
}
BaseType::initSuperentities( meshInitializer, mesh );
}
};
/****
* Mesh entity initializer layer with specializations
*
* SUBENTITY STORAGE SUPERENTITY STORAGE Subdimension Superdimension SUPERENTITY TOPOLOGY
* TRUE TRUE 2 3 POLYHEDRON
*/
template< typename MeshConfig >
class EntityInitializerLayer< MeshConfig, DimensionTag< 2 >, DimensionTag< 3 >, Topologies::Polyhedron, true, true, true >
: public EntityInitializerLayer< MeshConfig, DimensionTag< 2 >, typename DimensionTag< 3 >::Decrement >
{
using SubdimensionTag = DimensionTag< 2 >;
using SuperdimensionTag = DimensionTag< 3 >;
using BaseType = EntityInitializerLayer< MeshConfig, SubdimensionTag, typename SuperdimensionTag::Decrement >;
using InitializerType = Initializer< MeshConfig >;
using MeshType = typename InitializerType::MeshType;
using MeshTraitsType = MeshTraits< MeshConfig >;
using GlobalIndexType = typename MeshTraitsType::GlobalIndexType;
using LocalIndexType = typename MeshTraitsType::LocalIndexType;
using SubentityTraitsType = typename MeshTraitsType::template EntityTraits< SubdimensionTag::value >;
using SubentityTopology = typename SubentityTraitsType::EntityTopology;
using SuperentityTraitsType = typename MeshTraitsType::template EntityTraits< SuperdimensionTag::value >;
using SuperentityTopology = typename SuperentityTraitsType::EntityTopology;
using SuperentityMatrixType = typename MeshTraitsType::SuperentityMatrixType;
using NeighborCountsArray = typename MeshTraitsType::NeighborCountsArray;
using SeedType = EntitySeed< MeshConfig, SubentityTopology >;
public:
static void
initSuperentities( InitializerType& meshInitializer, MeshType& mesh )
{
// std::cout << " Initiating superentities with dimension " << SuperdimensionTag::value << " for subentities with
// dimension " << SubdimensionTag::value << " ... " << std::endl;
const GlobalIndexType subentitiesCount = mesh.template getEntitiesCount< SubdimensionTag::value >();
const GlobalIndexType superentitiesCount = mesh.template getEntitiesCount< SuperdimensionTag::value >();
// counter for superentities of each subentity
auto& superentitiesCounts =
meshInitializer.template getSuperentitiesCountsArray< SubdimensionTag::value, SuperdimensionTag::value >();
superentitiesCounts.setSize( subentitiesCount );
superentitiesCounts.setValue( 0 );
auto& cellSeeds = meshInitializer.getCellSeeds();
for( GlobalIndexType superentityIndex = 0; superentityIndex < superentitiesCount; superentityIndex++ ) {
const auto cellSeed = cellSeeds.getSeed( superentityIndex );
for( LocalIndexType i = 0; i < cellSeed.getCornersCount(); i++ ) {
const GlobalIndexType subentityIndex = cellSeed.getCornerId( i );
superentitiesCounts[ subentityIndex ]++;
}
}
auto& subvertexMatrix =
meshInitializer.template getSubentitiesMatrix< SuperdimensionTag::value, SubdimensionTag::value >();
subvertexMatrix = std::move( cellSeeds.getMatrix() );
meshInitializer.template initSubentitiesCounts< SuperdimensionTag::value, SubdimensionTag::value >(
cellSeeds.getEntityCornerCounts() );
// allocate superentities storage
SuperentityMatrixType& matrix =
meshInitializer.template getSuperentitiesMatrix< SubdimensionTag::value, SuperdimensionTag::value >();
matrix.setDimensions( subentitiesCount, superentitiesCount );
matrix.setRowCapacities( superentitiesCounts );
superentitiesCounts.setValue( 0 );
// initialize superentities storage
for( GlobalIndexType superentityIndex = 0; superentityIndex < superentitiesCount; superentityIndex++ ) {
for( LocalIndexType i = 0;
i < mesh.template getSubentitiesCount< SuperdimensionTag::value, SubdimensionTag::value >( superentityIndex );
i++ )
{
const GlobalIndexType subentityIndex =
mesh.template getSubentityIndex< SuperdimensionTag::value, SubdimensionTag::value >( superentityIndex, i );
auto row = matrix.getRow( subentityIndex );
row.setElement( superentitiesCounts[ subentityIndex ]++, superentityIndex, true );
}
}
BaseType::initSuperentities( meshInitializer, mesh );
}
};
/****
* Mesh entity initializer layer with specializations
*
* SUBENTITY STORAGE SUPERENTITY STORAGE
* TRUE FALSE
*/
template< typename MeshConfig, typename SubdimensionTag, typename SuperdimensionTag, typename SuperentityTopology >
class EntityInitializerLayer< MeshConfig, SubdimensionTag, SuperdimensionTag, SuperentityTopology, true, false, true >
: public EntityInitializerLayer< MeshConfig, SubdimensionTag, typename SuperdimensionTag::Decrement >
{
using BaseType = EntityInitializerLayer< MeshConfig, SubdimensionTag, typename SuperdimensionTag::Decrement >;
using InitializerType = Initializer< MeshConfig >;
using MeshType = typename InitializerType::MeshType;
using MeshTraitsType = MeshTraits< MeshConfig >;
using GlobalIndexType = typename MeshTraitsType::GlobalIndexType;
using LocalIndexType = typename MeshTraitsType::LocalIndexType;
using SubentityTraitsType = typename MeshTraitsType::template EntityTraits< SubdimensionTag::value >;
using SubentityTopology = typename SubentityTraitsType::EntityTopology;
using SuperentityTraitsType = typename MeshTraitsType::template EntityTraits< SuperdimensionTag::value >;
using SubentitySeedsCreatorType = SubentitySeedsCreator< MeshConfig, SuperentityTopology, SubdimensionTag >;
using NeighborCountsArray = typename MeshTraitsType::NeighborCountsArray;
using SeedType = EntitySeed< MeshConfig, SubentityTopology >;
public:
static void
initSuperentities( InitializerType& meshInitializer, MeshType& mesh )
{
// std::cout << " Initiating superentities with dimension " << SuperdimensionTag::value << " for subentities with
// dimension " << SubdimensionTag::value << " ... " << std::endl;
const GlobalIndexType subentitiesCount = mesh.template getEntitiesCount< SubdimensionTag::value >();
const GlobalIndexType superentitiesCount = mesh.template getEntitiesCount< SuperdimensionTag::value >();
if( SubdimensionTag::value > 0 || std::is_same< SuperentityTopology, Topologies::Polyhedron >::value ) {
NeighborCountsArray capacities( superentitiesCount );
Algorithms::ParallelFor< Devices::Host >::exec( GlobalIndexType{ 0 },
superentitiesCount,
[ & ]( GlobalIndexType superentityIndex )
{
capacities[ superentityIndex ] =
SubentitySeedsCreatorType::getSubentitiesCount(
meshInitializer, mesh, superentityIndex );
} );
meshInitializer.template initSubentityMatrix< SuperdimensionTag::value, SubdimensionTag::value >( capacities,
subentitiesCount );
Algorithms::ParallelFor< Devices::Host >::exec(
GlobalIndexType{ 0 },
superentitiesCount,
[ & ]( GlobalIndexType superentityIndex )
{
LocalIndexType i = 0;
SubentitySeedsCreatorType::iterate(
meshInitializer,
mesh,
superentityIndex,
[ & ]( SeedType& seed )
{
const GlobalIndexType subentityIndex = meshInitializer.findEntitySeedIndex( seed );
meshInitializer.template setSubentityIndex< SuperdimensionTag::value, SubdimensionTag::value >(
superentityIndex, i++, subentityIndex );
} );
} );
}
BaseType::initSuperentities( meshInitializer, mesh );
}
};
/****
* Mesh entity initializer layer with specializations
*
* SUBENTITY STORAGE SUPERENTITY STORAGE Subdimension Superdimension SUPERENTITY TOPOLOGY
* TRUE FALSE 2 3 POLYHEDRON
*/
template< typename MeshConfig >
class EntityInitializerLayer< MeshConfig, DimensionTag< 2 >, DimensionTag< 3 >, Topologies::Polyhedron, true, false, true >
: public EntityInitializerLayer< MeshConfig, DimensionTag< 2 >, typename DimensionTag< 3 >::Decrement >
{
using SubdimensionTag = DimensionTag< 2 >;
using SuperdimensionTag = DimensionTag< 3 >;
using BaseType = EntityInitializerLayer< MeshConfig, SubdimensionTag, typename SuperdimensionTag::Decrement >;
using InitializerType = Initializer< MeshConfig >;
using MeshType = typename InitializerType::MeshType;
using MeshTraitsType = MeshTraits< MeshConfig >;
using GlobalIndexType = typename MeshTraitsType::GlobalIndexType;
using LocalIndexType = typename MeshTraitsType::LocalIndexType;
using SubentityTraitsType = typename MeshTraitsType::template EntityTraits< SubdimensionTag::value >;
using SubentityTopology = typename SubentityTraitsType::EntityTopology;
using SuperentityTraitsType = typename MeshTraitsType::template EntityTraits< SuperdimensionTag::value >;
using SuperentityTopology = typename SuperentityTraitsType::EntityTopology;
using NeighborCountsArray = typename MeshTraitsType::NeighborCountsArray;
using SeedType = EntitySeed< MeshConfig, SubentityTopology >;
public:
static void
initSuperentities( InitializerType& meshInitializer, MeshType& mesh )
{
// std::cout << " Initiating superentities with dimension " << SuperdimensionTag::value << " for subentities with
// dimension " << SubdimensionTag::value << " ... " << std::endl;
auto& cellSeeds = meshInitializer.getCellSeeds();
auto& subvertexMatrix =
meshInitializer.template getSubentitiesMatrix< SuperdimensionTag::value, SubdimensionTag::value >();
subvertexMatrix = std::move( cellSeeds.getMatrix() );
meshInitializer.template initSubentitiesCounts< SuperdimensionTag::value, SubdimensionTag::value >(
cellSeeds.getEntityCornerCounts() );
BaseType::initSuperentities( meshInitializer, mesh );
}
};
/****
* Mesh entity initializer layer with specializations
*
* SUBENTITY STORAGE SUPERENTITY STORAGE
* FALSE TRUE
*/
template< typename MeshConfig, typename SubdimensionTag, typename SuperdimensionTag, typename SuperentityTopology >
class EntityInitializerLayer< MeshConfig, SubdimensionTag, SuperdimensionTag, SuperentityTopology, false, true, true >
: public EntityInitializerLayer< MeshConfig, SubdimensionTag, typename SuperdimensionTag::Decrement >
{
using BaseType = EntityInitializerLayer< MeshConfig, SubdimensionTag, typename SuperdimensionTag::Decrement >;
using InitializerType = Initializer< MeshConfig >;
using MeshType = typename InitializerType::MeshType;
using MeshTraitsType = MeshTraits< MeshConfig >;
using GlobalIndexType = typename MeshTraitsType::GlobalIndexType;
using LocalIndexType = typename MeshTraitsType::LocalIndexType;
using SubentityTraitsType = typename MeshTraitsType::template EntityTraits< SubdimensionTag::value >;
using SubentityTopology = typename SubentityTraitsType::EntityTopology;
using SuperentityTraitsType = typename MeshTraitsType::template EntityTraits< SuperdimensionTag::value >;
using SubentitySeedsCreatorType = SubentitySeedsCreator< MeshConfig, SuperentityTopology, SubdimensionTag >;
using SuperentityMatrixType = typename MeshTraitsType::SuperentityMatrixType;
using SeedType = EntitySeed< MeshConfig, SubentityTopology >;
public:
static void
initSuperentities( InitializerType& meshInitializer, MeshType& mesh )
{
// std::cout << " Initiating superentities with dimension " << SuperdimensionTag::value << " for subentities with
// dimension " << SubdimensionTag::value << " ... " << std::endl;
const GlobalIndexType subentitiesCount = mesh.template getEntitiesCount< SubdimensionTag::value >();
const GlobalIndexType superentitiesCount = mesh.template getEntitiesCount< SuperdimensionTag::value >();
// counter for superentities of each subentity
auto& superentitiesCounts =
meshInitializer.template getSuperentitiesCountsArray< SubdimensionTag::value, SuperdimensionTag::value >();
superentitiesCounts.setSize( subentitiesCount );
superentitiesCounts.setValue( 0 );
Algorithms::ParallelFor< Devices::Host >::exec( GlobalIndexType{ 0 },
superentitiesCount,
[ & ]( GlobalIndexType superentityIndex )
{
SubentitySeedsCreatorType::iterate(
meshInitializer,
mesh,
superentityIndex,
[ & ]( SeedType& seed )
{
const GlobalIndexType subentityIndex =
meshInitializer.findEntitySeedIndex( seed );
Algorithms::AtomicOperations< Devices::Host >::add(
superentitiesCounts[ subentityIndex ], LocalIndexType{ 1 } );
} );
} );
// allocate superentities storage
SuperentityMatrixType& matrix =
meshInitializer.template getSuperentitiesMatrix< SubdimensionTag::value, SuperdimensionTag::value >();
matrix.setDimensions( subentitiesCount, superentitiesCount );
matrix.setRowCapacities( superentitiesCounts );
superentitiesCounts.setValue( 0 );
// initialize superentities storage
for( GlobalIndexType superentityIndex = 0; superentityIndex < superentitiesCount; superentityIndex++ ) {
SubentitySeedsCreatorType::iterate(
meshInitializer,
mesh,
superentityIndex,
[ & ]( SeedType& seed )
{
const GlobalIndexType subentityIndex = meshInitializer.findEntitySeedIndex( seed );
auto row = matrix.getRow( subentityIndex );
row.setElement( superentitiesCounts[ subentityIndex ]++, superentityIndex, true );
} );
}
// Here is an attempt of parallelization of previous for cycle, that seemingly causes some kind of race condition
/*Algorithms::ParallelFor< Devices::Host >::exec( GlobalIndexType{ 0 }, superentitiesCount, [&] ( GlobalIndexType
superentityIndex )
{
SubentitySeedsCreatorType::iterate( meshInitializer, mesh, superentityIndex, [&] ( SeedType& seed ) {
const GlobalIndexType subentityIndex = meshInitializer.findEntitySeedIndex( seed );
auto row = matrix.getRow( subentityIndex );
LocalIndexType superentityCount;
#pragma omp atomic capture
{
superentityCount = superentitiesCounts[ subentityIndex ];
superentitiesCounts[ subentityIndex ]++;
}
row.setElement( superentityCount, superentityIndex, true );
});
});*/
BaseType::initSuperentities( meshInitializer, mesh );
}
};
template< typename MeshConfig, typename SubdimensionTag, typename SuperdimensionTag, typename SuperentityTopology >
class EntityInitializerLayer< MeshConfig, SubdimensionTag, SuperdimensionTag, SuperentityTopology, false, false, true >
: public EntityInitializerLayer< MeshConfig, SubdimensionTag, typename SuperdimensionTag::Decrement >
{};
template< typename MeshConfig,
typename SubdimensionTag,
typename SuperentityTopology,
bool SubentityStorage,
bool SuperentityStorage >
class EntityInitializerLayer< MeshConfig,
SubdimensionTag,
SubdimensionTag,
SuperentityTopology,
SubentityStorage,
SuperentityStorage,
false >
{
using InitializerType = Initializer< MeshConfig >;
using MeshType = typename InitializerType::MeshType;
public:
static void
initSuperentities( InitializerType& meshInitializer, MeshType& mesh )
{}
};
} // namespace Meshes
} // namespace noa::TNL
|
abstract_pivot_column.h | /* Copyright 2013 IST Austria
Contributed by: Ulrich Bauer, Michael Kerber, Jan Reininghaus
This file is part of PHAT.
PHAT is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PHAT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with PHAT. If not, see <http://www.gnu.org/licenses/>. */
#pragma once
#include <phat/helpers/misc.h>
#include <phat/representations/vector_vector.h>
namespace phat {
// Note: We could even make the rep generic in the underlying Const representation
// But I cannot imagine that anything else than vector<vector<index>> would
// make sense
template< typename PivotColumn >
class abstract_pivot_column : public vector_vector {
protected:
typedef vector_vector Base;
typedef PivotColumn pivot_col;
// For parallization purposes, it could be more than one full column
mutable thread_local_storage< pivot_col > pivot_cols;
mutable thread_local_storage< index > idx_of_pivot_cols;
pivot_col& get_pivot_col() const {
return pivot_cols();
}
bool is_pivot_col( index idx ) const {
return idx_of_pivot_cols() == idx;
}
void release_pivot_col() {
index idx = idx_of_pivot_cols();
if( idx != -1 ) {
this->matrix[ idx ].clear();
pivot_cols().get_col_and_clear( this->matrix[ idx ] );
}
idx_of_pivot_cols() = -1;
}
void make_pivot_col( index idx ) {
release_pivot_col();
idx_of_pivot_cols() = idx;
get_pivot_col().add_col( matrix[ idx ] );
}
public:
void _set_num_cols( index nr_of_cols ) {
#pragma omp parallel for
for( int tid = 0; tid < omp_get_num_threads(); tid++ ) {
pivot_cols[ tid ].init( nr_of_cols );
idx_of_pivot_cols[ tid ] = -1;
}
Base::_set_num_cols( nr_of_cols );
}
void _add_to( index source, index target ) {
if( !is_pivot_col( target ) )
make_pivot_col( target );
get_pivot_col().add_col( matrix[source] );
}
void _sync() {
#pragma omp parallel for
for( int tid = 0; tid < omp_get_num_threads(); tid++ )
release_pivot_col();
}
void _get_col( index idx, column& col ) const { is_pivot_col( idx ) ? get_pivot_col().get_col( col ) : Base::_get_col( idx, col ); }
bool _is_empty( index idx ) const { return is_pivot_col( idx ) ? get_pivot_col().is_empty() : Base::_is_empty( idx ); }
index _get_max_index( index idx ) const { return is_pivot_col( idx ) ? get_pivot_col().get_max_index() : Base::_get_max_index( idx ); }
void _clear( index idx ) { is_pivot_col( idx ) ? get_pivot_col().clear() : Base::_clear( idx ); }
void _set_col( index idx, const column& col ) { is_pivot_col( idx ) ? get_pivot_col().set_col( col ) : Base::_set_col( idx, col ); }
void _remove_max( index idx ) { is_pivot_col( idx ) ? get_pivot_col().remove_max() : Base::_remove_max( idx ); }
void finalize( index idx ) { Base::_finalize( idx ); }
};
}
|
omp_atomic_exemple.c | // omp_atomic_exemple.c
// compile with: /openmp
/* #############################################################################
## DESCRIPTION: Simple exemple using atomic in OpenMP.
## NAME: omp_atomic_exemple.c
## AUTHOR: Lucca Pessoa da Silva Matos
## DATE: 10.04.2020
## VERSION: 1.0
## EXEMPLE:
## PS C:\> gcc -fopenmp -o omp_atomic_exemple omp_atomic_exemple.c
##############################################################################*/
// =============================================================================
// LIBRARYS
// =============================================================================
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <locale.h>
// =============================================================================
// MACROS
// =============================================================================
#define NUM_THREADS 12
// =============================================================================
// CALL FUNCTIONS
// =============================================================================
void cabecalho();
void set_portuguese();
// =============================================================================
// MAIN
// =============================================================================
int main(int argc, char const *argv[]){
set_portuguese();
cabecalho();
int contador = 0;
printf("\n1 - Estamos fora do contexto paralelo. Entrando...\n");
#pragma omp parallel num_threads(NUM_THREADS)
{
#pragma omp atomic
contador++;
}
printf("\n2 - Estamos fora do contexto paralelo. Saindo...\n");
printf("\nNumber of threads: %d\n\n", contador);
return 0;
}
// =============================================================================
// FUNCTIONS
// =============================================================================
void set_portuguese(){
setlocale(LC_ALL, "Portuguese");
}
void cabecalho(){
printf("\n**************************************************");
printf("\n* *");
printf("\n* *");
printf("\n* PROGRAMACAO PARALELA COM OPENMP - LUCCA PESSOA *");
printf("\n* *");
printf("\n* *");
printf("\n**************************************************\n");
}
|
test.c |
#include <stdlib.h>
#include <stdio.h>
int main(int argc, char *argv[])
{
int dep = 0;
#pragma omp target device(0) nowait map(tofrom: dep) depend(out: dep)
{
dep++;
}
#pragma omp target device(1) nowait map(tofrom: dep) depend(in: dep)
{
dep++;
}
#pragma omp taskwait
if (dep == 2) {
printf("completed with 0 errors\n");
} else {
printf("completed with a error:\n");
printf("dep should be 2, but is %d\n", dep);
}
return EXIT_SUCCESS;
}
|
omp_single_private.c | // RUN: %libomp-compile-and-run
// REQUIRES: !(abt && (clang || gcc))
#include <stdio.h>
#include "omp_testsuite.h"
int myit = 0;
#pragma omp threadprivate(myit)
int myresult = 0;
#pragma omp threadprivate(myresult)
int test_omp_single_private()
{
int nr_threads_in_single;
int result;
int nr_iterations;
int i;
myit = 0;
nr_threads_in_single = 0;
nr_iterations = 0;
result = 0;
#pragma omp parallel private(i)
{
myresult = 0;
myit = 0;
for (i = 0; i < LOOPCOUNT; i++) {
#pragma omp single private(nr_threads_in_single) nowait
{
nr_threads_in_single = 0;
#pragma omp flush
nr_threads_in_single++;
#pragma omp flush
myit++;
myresult = myresult + nr_threads_in_single;
}
}
#pragma omp critical
{
result += nr_threads_in_single;
nr_iterations += myit;
}
}
return ((result == 0) && (nr_iterations == LOOPCOUNT));
} /* end of check_single private */
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_single_private()) {
num_failed++;
}
}
return num_failed;
}
|
atomic-16.c | // { dg-do run }
extern void abort (void);
int x = 6, cnt;
int
foo (void)
{
return cnt++;
}
int
main ()
{
int v, *p;
p = &x;
#pragma omp atomic update
p[foo (), 0] = 16 + 6 - p[foo (), 0];
#pragma omp atomic read
v = x;
if (cnt != 2 || v != 16)
abort ();
#pragma omp atomic capture
v = p[foo () + foo (), 0] = p[foo () + foo (), 0] + 3;
if (cnt != 6 || v != 19)
abort ();
#pragma omp atomic capture
v = p[foo (), 0] = 12 * 1 / 2 + (foo (), 0) + p[foo (), 0];
if (cnt != 9 || v != 25)
abort ();
#pragma omp atomic capture
{
v = p[foo () & 0]; p[foo () & 0] = (foo (), 1) * 9 - p[foo () & 0];
}
if (cnt != 13 || v != 25)
abort ();
#pragma omp atomic read
v = x;
if (v != -16)
abort ();
#pragma omp atomic capture
{
p[0 & foo ()] = 16 - 2 + 3 + p[0 & foo ()]; v = p[0 & foo ()];
}
if (cnt != 16 || v != 1)
abort ();
#pragma omp atomic capture
{
v = p[foo (), 0]; p[foo (), 0] = (foo (), 7) ? 13 : foo () + 6;
}
if (cnt != 19 || v != 1)
abort ();
#pragma omp atomic read
v = x;
if (v != 13)
abort ();
return 0;
}
|
core_dttlqt.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zttlqt.c, normal z -> d, Fri Sep 28 17:38:24 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "plasma_internal.h"
#include "core_lapack.h"
#include <omp.h>
// This will be swapped during the automatic code generation.
#undef REAL
#define REAL
/***************************************************************************//**
*
* @ingroup core_ttlqt
*
* Computes an LQ factorization of a rectangular matrix
* formed by coupling side-by-side an m-by-m lower triangular tile A1
* and an m-by-n lower triangular tile A2:
*
* | A1 A2 | = L * Q
*
*
*******************************************************************************
*
* @param[in] m
* The number of rows of the tile A1 and A2. m >= 0.
* The number of columns of the tile A1.
*
* @param[in] n
* The number of columns of the tile A2. n >= 0.
*
* @param[in] ib
* The inner-blocking size. ib >= 0.
*
* @param[in,out] A1
* On entry, the m-by-m tile A1.
* On exit, the elements on and below the diagonal of the array
* contain the m-by-m lower trapezoidal tile L;
* the elements above the diagonal are not referenced.
*
* @param[in] lda1
* The leading dimension of the array A1. lda1 >= max(1,m).
*
* @param[in,out] A2
* On entry, the m-by-n lower triangular tile A2.
* On exit, the elements on and below the diagonal of the array
* with the matrix T represent
* the orthogonal tile Q as a product of elementary reflectors.
*
* @param[in] lda2
* The leading dimension of the array A2. lda2 >= max(1,m).
*
* @param[out] T
* The ib-by-m triangular factor T of the block reflector.
* T is upper triangular by block (economic storage);
* The rest of the array is not referenced.
*
* @param[in] ldt
* The leading dimension of the array T. ldt >= ib.
*
* @param tau
* Auxiliary workspace array of length m.
*
* @param work
* Auxiliary workspace array of length ib*m.
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
*
******************************************************************************/
__attribute__((weak))
int plasma_core_dttlqt(int m, int n, int ib,
double *A1, int lda1,
double *A2, int lda2,
double *T, int ldt,
double *tau,
double *work)
{
// Check input arguments.
if (m < 0) {
plasma_coreblas_error("illegal value of m");
return -1;
}
if (n < 0) {
plasma_coreblas_error("illegal value of n");
return -2;
}
if (ib < 0) {
plasma_coreblas_error("illegal value of ib");
return -3;
}
if (A1 == NULL) {
plasma_coreblas_error("NULL A1");
return -4;
}
if (lda1 < imax(1, m) && m > 0) {
plasma_coreblas_error("illegal value of lda1");
return -5;
}
if (A2 == NULL) {
plasma_coreblas_error("NULL A2");
return -6;
}
if (lda2 < imax(1, m) && m > 0) {
plasma_coreblas_error("illegal value of lda2");
return -7;
}
if (T == NULL) {
plasma_coreblas_error("NULL T");
return -8;
}
if (ldt < imax(1, ib) && ib > 0) {
plasma_coreblas_error("illegal value of ldt");
return -9;
}
if (tau == NULL) {
plasma_coreblas_error("NULL tau");
return -10;
}
if (work == NULL) {
plasma_coreblas_error("NULL work");
return -11;
}
// quick return
if ((m == 0) || (n == 0) || (ib == 0))
return PlasmaSuccess;
// TODO: Need to check why some cases require this to avoid
// uninitialized values
//core_dlaset(PlasmaGeneral, ib, m, 0.0, 0.0, T, ldt);
for (int ii = 0; ii < m; ii += ib) {
int sb = imin(m-ii, ib);
for (int i = 0; i < sb; i++) {
int j = ii + i;
int mi = sb-i-1;
int ni = imin( j + 1, n);
// Generate elementary reflector H(ii*ib+i) to annihilate
// A(ii*ib+i, ii*ib+i:m).
#ifdef COMPLEX
LAPACKE_dlacgv_work(ni, &A2[j], lda2);
LAPACKE_dlacgv_work(1, &A1[lda1*j+j], lda1);
#endif
LAPACKE_dlarfg_work(ni+1, &A1[lda1*j+j], &A2[j], lda2, &tau[j]);
double alpha;
if (mi > 0) {
// Apply H(j-1) to A(j:ii+ib-1, j-1:m) from the right.
cblas_dcopy(
mi,
&A1[lda1*j+(j+1)], 1,
work, 1);
double zone = 1.0;
cblas_dgemv(
CblasColMajor, (CBLAS_TRANSPOSE)PlasmaNoTrans,
mi, ni,
(zone), &A2[j+1], lda2,
&A2[j], lda2,
(zone), work, 1);
alpha = -(tau[j]);
cblas_daxpy(
mi, (alpha),
work, 1,
&A1[lda1*j+j+1], 1);
cblas_dger(
CblasColMajor, mi, ni,
(alpha), work, 1,
&A2[j], lda2,
&A2[j+1], lda2);
}
// Calculate T.
if (i > 0 ) {
int l = imin(i, imax(0, n-ii));
alpha = -(tau[j]);
plasma_core_dpemv(
PlasmaNoTrans, PlasmaRowwise,
i, imin(j, n), l,
alpha, &A2[ii], lda2,
&A2[j], lda2,
0.0, &T[ldt*j], 1,
work);
// T(0:i-1, j) = T(0:i-1, ii:j-1) * T(0:i-1, j)
cblas_dtrmv(
CblasColMajor, (CBLAS_UPLO)PlasmaUpper,
(CBLAS_TRANSPOSE)PlasmaNoTrans,
(CBLAS_DIAG)PlasmaNonUnit,
i, &T[ldt*ii], ldt,
&T[ldt*j], 1);
}
#ifdef COMPLEX
LAPACKE_dlacgv_work(ni, &A2[j], lda2 );
LAPACKE_dlacgv_work(1, &A1[lda1*j+j], lda1 );
#endif
T[ldt*j+i] = tau[j];
}
// Apply Q to the rest of the matrix to the right.
if (m > ii+sb) {
int mi = m-(ii+sb);
int ni = imin(ii+sb, n);
int l = imin(sb, imax(0, ni-ii));
plasma_core_dparfb(
PlasmaRight, PlasmaNoTrans,
PlasmaForward, PlasmaRowwise,
mi, ib, mi, ni, sb, l,
&A1[lda1*ii+ii+sb], lda1,
&A2[ii+sb], lda2,
&A2[ii], lda2,
&T[ldt*ii], ldt,
work, m);
}
}
return PlasmaSuccess;
}
/******************************************************************************/
void plasma_core_omp_dttlqt(int m, int n, int ib,
double *A1, int lda1,
double *A2, int lda2,
double *T, int ldt,
plasma_workspace_t work,
plasma_sequence_t *sequence, plasma_request_t *request)
{
#pragma omp task depend(inout:A1[0:lda1*m]) \
depend(inout:A2[0:lda2*n]) \
depend(out:T[0:ib*m]) // T should be mxib, but is stored
// as ibxm
{
if (sequence->status == PlasmaSuccess) {
// Prepare workspaces.
int tid = omp_get_thread_num();
double *tau = ((double*)work.spaces[tid]);
// Call the kernel.
int info = plasma_core_dttlqt(m, n, ib,
A1, lda1,
A2, lda2,
T, ldt,
tau,
tau+m);
if (info != PlasmaSuccess) {
plasma_error("core_dtslqt() failed");
plasma_request_fail(sequence, request, PlasmaErrorInternal);
}
}
}
}
|
scratch.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
static void fun() {
int tid;
tid = omp_get_thread_num();
printf("Hi from thread %d\n", tid);
}
int main (int argc, char *argv[]) {
#pragma omp parallel
{
fun();
} /* All threads join master thread, barrier and disband */
return 0;
}
|
GeneralMatrixMatrix.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_GENERAL_MATRIX_MATRIX_H
#define EIGEN_GENERAL_MATRIX_MATRIX_H
namespace Eigen {
namespace internal {
template<typename _LhsScalar, typename _RhsScalar> class level3_blocking;
/* Specialization for a row-major destination matrix => simple transposition of the product */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor>
{
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static EIGEN_STRONG_INLINE void run(
Index rows, Index cols, Index depth,
const LhsScalar* lhs, Index lhsStride,
const RhsScalar* rhs, Index rhsStride,
ResScalar* res, Index resStride,
ResScalar alpha,
level3_blocking<RhsScalar,LhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
// transpose the product such that the result is column major
general_matrix_matrix_product<Index,
RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs,
LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs,
ColMajor>
::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info);
}
};
/* Specialization for a col-major destination matrix
* => Blocking algorithm following Goto's paper */
template<
typename Index,
typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs,
typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs>
struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor>
{
typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
static void run(Index rows, Index cols, Index depth,
const LhsScalar* _lhs, Index lhsStride,
const RhsScalar* _rhs, Index rhsStride,
ResScalar* res, Index resStride,
ResScalar alpha,
level3_blocking<LhsScalar,RhsScalar>& blocking,
GemmParallelInfo<Index>* info = 0)
{
const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride);
const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride);
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
Index kc = blocking.kc(); // cache block size along the K direction
Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
//Index nc = blocking.nc(); // cache block size along the N direction
gemm_pack_lhs<LhsScalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs;
gemm_pack_rhs<RhsScalar, Index, Traits::nr, RhsStorageOrder> pack_rhs;
gebp_kernel<LhsScalar, RhsScalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp;
#ifdef EIGEN_HAS_OPENMP
if(info)
{
// this is the parallel version!
Index tid = omp_get_thread_num();
Index threads = omp_get_num_threads();
std::size_t sizeA = kc*mc;
std::size_t sizeW = kc*Traits::WorkSpaceFactor;
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, 0);
ei_declare_aligned_stack_constructed_variable(RhsScalar, w, sizeW, 0);
RhsScalar* blockB = blocking.blockB();
eigen_internal_assert(blockB!=0);
// For each horizontal panel of the rhs, and corresponding vertical panel of the lhs...
for(Index k=0; k<depth; k+=kc)
{
const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A'
// In order to reduce the chance that a thread has to wait for the other,
// let's start by packing A'.
pack_lhs(blockA, &lhs(0,k), lhsStride, actual_kc, mc);
// Pack B_k to B' in a parallel fashion:
// each thread packs the sub block B_k,j to B'_j where j is the thread id.
// However, before copying to B'_j, we have to make sure that no other thread is still using it,
// i.e., we test that info[tid].users equals 0.
// Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it.
while(info[tid].users!=0) {}
info[tid].users += threads;
pack_rhs(blockB+info[tid].rhs_start*actual_kc, &rhs(k,info[tid].rhs_start), rhsStride, actual_kc, info[tid].rhs_length);
// Notify the other threads that the part B'_j is ready to go.
info[tid].sync = k;
// Computes C_i += A' * B' per B'_j
for(Index shift=0; shift<threads; ++shift)
{
Index j = (tid+shift)%threads;
// At this point we have to make sure that B'_j has been updated by the thread j,
// we use testAndSetOrdered to mimic a volatile access.
// However, no need to wait for the B' part which has been updated by the current thread!
if(shift>0)
while(info[j].sync!=k) {}
gebp(res+info[j].rhs_start*resStride, resStride, blockA, blockB+info[j].rhs_start*actual_kc, mc, actual_kc, info[j].rhs_length, alpha, -1,-1,0,0, w);
}
// Then keep going as usual with the remaining A'
for(Index i=mc; i<rows; i+=mc)
{
const Index actual_mc = (std::min)(i+mc,rows)-i;
// pack A_i,k to A'
pack_lhs(blockA, &lhs(i,k), lhsStride, actual_kc, actual_mc);
// C_i += A' * B'
gebp(res+i, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1,-1,0,0, w);
}
// Release all the sub blocks B'_j of B' for the current thread,
// i.e., we simply decrement the number of users by 1
for(Index j=0; j<threads; ++j)
{
#pragma omp atomic
info[j].users -= 1;
}
}
}
else
#endif // EIGEN_HAS_OPENMP
{
EIGEN_UNUSED_VARIABLE(info);
// this is the sequential version!
std::size_t sizeA = kc*mc;
std::size_t sizeB = kc*cols;
std::size_t sizeW = kc*Traits::WorkSpaceFactor;
ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA());
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB());
ei_declare_aligned_stack_constructed_variable(RhsScalar, blockW, sizeW, blocking.blockW());
// For each horizontal panel of the rhs, and corresponding panel of the lhs...
// (==GEMM_VAR1)
for(Index k2=0; k2<depth; k2+=kc)
{
const Index actual_kc = (std::min)(k2+kc,depth)-k2;
// OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs.
// => Pack rhs's panel into a sequential chunk of memory (L2 caching)
// Note that this panel will be read as many times as the number of blocks in the lhs's
// vertical panel which is, in practice, a very low number.
pack_rhs(blockB, &rhs(k2,0), rhsStride, actual_kc, cols);
// For each mc x kc block of the lhs's vertical panel...
// (==GEPP_VAR1)
for(Index i2=0; i2<rows; i2+=mc)
{
const Index actual_mc = (std::min)(i2+mc,rows)-i2;
// We pack the lhs's block into a sequential chunk of memory (L1 caching)
// Note that this block will be read a very high number of times, which is equal to the number of
// micro vertical panel of the large rhs's panel (e.g., cols/4 times).
pack_lhs(blockA, &lhs(i2,k2), lhsStride, actual_kc, actual_mc);
// Everything is packed, we can now call the block * panel kernel:
gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1, -1, 0, 0, blockW);
}
}
}
}
};
/*********************************************************************************
* Specialization of GeneralProduct<> for "large" GEMM, i.e.,
* implementation of the high level wrapper to general_matrix_matrix_product
**********************************************************************************/
template<typename Lhs, typename Rhs>
struct traits<GeneralProduct<Lhs,Rhs,GemmProduct> >
: traits<ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs> >
{};
template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType>
struct gemm_functor
{
gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha,
BlockingType& blocking)
: m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking)
{}
void initParallelSession() const
{
m_blocking.allocateB();
}
void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const
{
if(cols==-1)
cols = m_rhs.cols();
Gemm::run(rows, cols, m_lhs.cols(),
/*(const Scalar*)*/&m_lhs.coeffRef(row,0), m_lhs.outerStride(),
/*(const Scalar*)*/&m_rhs.coeffRef(0,col), m_rhs.outerStride(),
(Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(),
m_actualAlpha, m_blocking, info);
}
protected:
const Lhs& m_lhs;
const Rhs& m_rhs;
Dest& m_dest;
Scalar m_actualAlpha;
BlockingType& m_blocking;
};
template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1,
bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space;
template<typename _LhsScalar, typename _RhsScalar>
class level3_blocking
{
typedef _LhsScalar LhsScalar;
typedef _RhsScalar RhsScalar;
protected:
LhsScalar* m_blockA;
RhsScalar* m_blockB;
RhsScalar* m_blockW;
DenseIndex m_mc;
DenseIndex m_nc;
DenseIndex m_kc;
public:
level3_blocking()
: m_blockA(0), m_blockB(0), m_blockW(0), m_mc(0), m_nc(0), m_kc(0)
{}
inline DenseIndex mc() const { return m_mc; }
inline DenseIndex nc() const { return m_nc; }
inline DenseIndex kc() const { return m_kc; }
inline LhsScalar* blockA() { return m_blockA; }
inline RhsScalar* blockB() { return m_blockB; }
inline RhsScalar* blockW() { return m_blockW; }
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor,
ActualRows = Transpose ? MaxCols : MaxRows,
ActualCols = Transpose ? MaxRows : MaxCols
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
enum {
SizeA = ActualRows * MaxDepth,
SizeB = ActualCols * MaxDepth,
SizeW = MaxDepth * Traits::WorkSpaceFactor
};
EIGEN_ALIGN16 LhsScalar m_staticA[SizeA];
EIGEN_ALIGN16 RhsScalar m_staticB[SizeB];
EIGEN_ALIGN16 RhsScalar m_staticW[SizeW];
public:
gemm_blocking_space(DenseIndex /*rows*/, DenseIndex /*cols*/, DenseIndex /*depth*/)
{
this->m_mc = ActualRows;
this->m_nc = ActualCols;
this->m_kc = MaxDepth;
this->m_blockA = m_staticA;
this->m_blockB = m_staticB;
this->m_blockW = m_staticW;
}
inline void allocateA() {}
inline void allocateB() {}
inline void allocateW() {}
inline void allocateAll() {}
};
template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor>
class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false>
: public level3_blocking<
typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type,
typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type>
{
enum {
Transpose = StorageOrder==RowMajor
};
typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar;
typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar;
typedef gebp_traits<LhsScalar,RhsScalar> Traits;
DenseIndex m_sizeA;
DenseIndex m_sizeB;
DenseIndex m_sizeW;
public:
gemm_blocking_space(DenseIndex rows, DenseIndex cols, DenseIndex depth)
{
this->m_mc = Transpose ? cols : rows;
this->m_nc = Transpose ? rows : cols;
this->m_kc = depth;
computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc);
m_sizeA = this->m_mc * this->m_kc;
m_sizeB = this->m_kc * this->m_nc;
m_sizeW = this->m_kc*Traits::WorkSpaceFactor;
}
void allocateA()
{
if(this->m_blockA==0)
this->m_blockA = aligned_new<LhsScalar>(m_sizeA);
}
void allocateB()
{
if(this->m_blockB==0)
this->m_blockB = aligned_new<RhsScalar>(m_sizeB);
}
void allocateW()
{
if(this->m_blockW==0)
this->m_blockW = aligned_new<RhsScalar>(m_sizeW);
}
void allocateAll()
{
allocateA();
allocateB();
allocateW();
}
~gemm_blocking_space()
{
aligned_delete(this->m_blockA, m_sizeA);
aligned_delete(this->m_blockB, m_sizeB);
aligned_delete(this->m_blockW, m_sizeW);
}
};
} // end namespace internal
template<typename Lhs, typename Rhs>
class GeneralProduct<Lhs, Rhs, GemmProduct>
: public ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs>
{
enum {
MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime)
};
public:
EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct)
typedef typename Lhs::Scalar LhsScalar;
typedef typename Rhs::Scalar RhsScalar;
typedef Scalar ResScalar;
GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
{
#if !(defined(EIGEN_NO_STATIC_ASSERT) && defined(EIGEN_NO_DEBUG))
typedef internal::scalar_product_op<LhsScalar,RhsScalar> BinOp;
EIGEN_CHECK_BINARY_COMPATIBILIY(BinOp,LhsScalar,RhsScalar);
#endif
}
template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const
{
eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols());
if(m_lhs.cols()==0 || m_lhs.rows()==0 || m_rhs.cols()==0)
return;
typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(m_lhs);
typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(m_rhs);
Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs)
* RhsBlasTraits::extractScalarFactor(m_rhs);
typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar,
Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType;
typedef internal::gemm_functor<
Scalar, Index,
internal::general_matrix_matrix_product<
Index,
LhsScalar, (_ActualLhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate),
RhsScalar, (_ActualRhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate),
(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>,
_ActualLhsType, _ActualRhsType, Dest, BlockingType> GemmFunctor;
BlockingType blocking(dst.rows(), dst.cols(), lhs.cols());
internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), this->rows(), this->cols(), Dest::Flags&RowMajorBit);
}
};
} // end namespace Eigen
#endif // EIGEN_GENERAL_MATRIX_MATRIX_H
|
ll_load_utils.h | /*
* ll_load_utils.h
* LLAMA Graph Analytics
*
* Copyright 2014
* The President and Fellows of Harvard College.
*
* Copyright 2014
* Oracle Labs.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef LL_LOAD_UTILS_H_
#define LL_LOAD_UTILS_H_
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <unistd.h>
#include <deque>
#include <string>
#include <vector>
#include "llama/ll_config.h"
#include "llama/ll_streaming.h"
#include "llama/ll_external_sort.h"
#include "llama/loaders/ll_load_async_writable.h"
// High-level configuration
//#define LL_LOAD_CREATE_REV_EDGE_MAP
/**
* A stateless file loader prototype
*/
class ll_file_loader {
public:
/**
* Create a new instance of ll_file_loader
*/
ll_file_loader() {}
/**
* Destroy the instance
*/
virtual ~ll_file_loader() {}
/**
* Determine if this file can be opened by this loader
*
* @param file the file
* @return true if it can be opened
*/
virtual bool accepts(const char* file) = 0;
/**
* Load directly into the read-only representation by creating a new
* level
*
* @param graph the graph
* @param file the file
* @param config the loader configuration
*/
virtual void load_direct(ll_mlcsr_ro_graph* graph, const char* file,
const ll_loader_config* config) = 0;
/**
* Load directly into the read-only representation by creating a new
* level
*
* @param graph the graph
* @param file the file
* @param config the loader configuration
*/
virtual void load_direct(ll_writable_graph* graph, const char* file,
const ll_loader_config* config) {
graph->checkpoint(config);
load_direct(&graph->ro_graph(), file, config);
graph->callback_ro_changed();
}
/**
* Load incrementally into the writable representation
*
* @param graph the graph
* @param file the file
* @param config the loader configuration
*/
virtual void load_incremental(ll_writable_graph* graph, const char* file,
const ll_loader_config* config) = 0;
/**
* Create a data source object for the given file
*
* @param file the file
* @return the data source
*/
virtual ll_data_source* create_data_source(const char* file) = 0;
};
/**
* A generic edge-list loader
*/
template <typename NodeType, bool HasWeight=false,
typename WeightType=float, int WeightTypeCode=LL_T_FLOAT>
class ll_edge_list_loader : public ll_data_source {
/**
* Item format for external sort - for out-edges
*/
struct xs_edge {
NodeType tail;
NodeType head;
WeightType weight[HasWeight ? 1 : 0];
};
/**
* Comparator for xs_edge
*/
struct xs_edge_comparator {
bool operator() (const xs_edge& a, const xs_edge& b) {
if (a.tail != b.tail)
return a.tail < b.tail;
else
return a.head < b.head;
}
};
/**
* Item format for external sort - for in-edges
*/
struct xs_in_edge {
NodeType tail;
NodeType head;
#ifdef LL_LOAD_CREATE_REV_EDGE_MAP
edge_t out_edge;
#endif
};
/**
* Comparator for xs_in_edge
*/
struct xs_in_edge_comparator {
bool operator() (const xs_in_edge& a, const xs_in_edge& b) {
if (a.head != b.head)
return a.head < b.head;
else
return a.tail < b.tail;
}
};
private:
/// True if the data file has still potentially more data left in it
volatile bool _has_more;
/// The last value of _has_more
volatile bool _last_has_more;
public:
/**
* Create an instance of class ll_edge_list_loader
*/
ll_edge_list_loader() {
_has_more = true;
_last_has_more = _has_more;
}
/**
* Destroy the loader
*/
virtual ~ll_edge_list_loader() {
}
protected:
/**
* Read the next edge
*
* @param o_tail the output for tail
* @param o_head the output for head
* @param o_weight the output for weight (ignore if HasWeight is false)
* @return true if the edge was loaded, false if EOF or error
*/
virtual bool next_edge(NodeType* o_tail, NodeType* o_head,
WeightType* o_weight) = 0;
/**
* Rewind the input file
*/
virtual void rewind() = 0;
/**
* Get graph stats if they are available
*
* @param o_nodes the output for the number of nodes (1 + max node ID)
* @param o_edges the output for the number of edges
* @return true if succeeded, or false if not or the info is not available
*/
virtual bool stat(size_t* o_nodes, size_t* o_edges) {
return false;
}
public:
/**
* Is this a simple data source?
*/
virtual bool simple() {
return true;
}
/**
* Get the next edge
*
* @param o_tail the output for tail
* @param o_head the output for head
* @return true if the edge was loaded, false if EOF or error
*/
virtual bool next_edge(node_t* o_tail, node_t* o_head) {
NodeType t = 0;
NodeType h = 0;
WeightType w;
bool r = next_edge(&t, &h, &w);
*o_tail = t;
*o_head = h;
return r;
}
/**
* Return true if the data file has potentially more data in it
*
* @return true if it has more data left
*/
virtual bool has_more() {
return _has_more;
}
/**
* Load the graph directly into the read-only representation
*
* @param graph the graph
* @param config the loader configuration
* @return true on no error
*/
bool load_direct(ll_mlcsr_ro_graph* graph,
const ll_loader_config* config) {
// Check if we have stat and if we can load the data just using the
// info stat gives us.
// Specifically, we need to be able to get a reasonable estimate of the
// max size of the edge table, which gets tricky on levels > 0 when
// copying adjacency lists or deleting using continuations. Loading
// levels > 0 with continuations should work, but it will result in
// reserving space for all continuations, which will be a big waste of
// space in many (most?) cases, so disable it for now (we should
// reenable it when we implement variable-sized edge tables or shrinking
// of edge tables).
// TODO Avoid calling stat twice
size_t new_level = graph->num_levels();
size_t max_nodes = 0;
size_t max_edges = 0;
if (IF_LL_MLCSR_CONTINUATIONS(new_level == 0 &&)
stat(&max_nodes, &max_edges)) {
return load_direct_with_stat(graph, config);
}
LL_D_PRINT("Load without stat, level=%lu\n", new_level);
// Check features
feature_vector_t features;
features << LL_L_FEATURE(lc_direction);
features << LL_L_FEATURE(lc_reverse_edges);
features << LL_L_FEATURE(lc_deduplicate);
features << LL_L_FEATURE(lc_no_properties);
config->assert_features(false /*direct*/, true /*error*/, features);
// Initialize
bool print_progress = config->lc_print_progress;
bool reverse = config->lc_reverse_edges;
bool load_weight = !config->lc_no_properties;
xs_edge e;
WeightType _w; (void) _w; // Unused, here for memory safety
if (new_level > 0) {
if (max_nodes < (size_t) graph->out().max_nodes()) {
max_nodes = graph->out().max_nodes();
}
}
// Initialize external sort
ll_external_sort<xs_edge, xs_edge_comparator>* out_sort = NULL;
// Get the degrees
size_t degrees_capacity = 80 * 1000ul * 1000ul;
degree_t* degrees_out = NULL;
degree_t* degrees_in = NULL;
degrees_out = (degree_t*) malloc(sizeof(*degrees_out)*degrees_capacity);
memset(degrees_out, 0, sizeof(*degrees_out) * degrees_capacity);
if (reverse) {
degrees_in = (degree_t*) malloc(
sizeof(*degrees_in) * degrees_capacity);
memset(degrees_in, 0, sizeof(*degrees_in) * degrees_capacity);
}
/*
* PASS 1
* - Determine the node degrees
* - Feed the edges to the external sort (if not already sorted)
*/
NodeType last_tail = 0;
NodeType last_head = 0;
bool already_sorted = true;
bool out_called_sort = false;
if (config->lc_direction == LL_L_UNDIRECTED_DOUBLE) {
already_sorted = false;
out_sort = new ll_external_sort<xs_edge,
xs_edge_comparator>(config);
}
size_t step = 10 * 1000 * 1000ul;
if (print_progress) {
fprintf(stderr, "[<]");
}
// XXX This split should be done instead if
// config->lc_direction == LL_L_UNDIRECTED_DOUBLE,
// since that's guaranteed to mess up the sort order,
// while config->lc_deduplicate does not necessarily
// mess up things
if (config->lc_deduplicate) {
if (already_sorted) {
already_sorted = false;
out_sort = new ll_external_sort<xs_edge,
xs_edge_comparator>(config);
}
while (next_edge(&e.tail, &e.head, &e.weight[0])) {
max_edges++;
if (config->lc_direction == LL_L_UNDIRECTED_ORDERED) {
if (e.tail > e.head) {
unsigned x = e.tail; e.tail = e.head; e.head = x;
}
}
#ifndef LL_S_WEIGHTS_INSTEAD_OF_DUPLICATE_EDGES
// Need to preserve count if LL_S_WEIGHTS_INSTEAD_OF_DUPLICATE_EDGES
if (last_head == e.head && last_tail == e.tail) {
continue;
}
#endif
last_head = e.head;
last_tail = e.tail;
if (e.tail >= (NodeType) max_nodes) max_nodes = e.tail + 1;
if (e.head >= (NodeType) max_nodes) max_nodes = e.head + 1;
*out_sort << e;
if (config->lc_direction == LL_L_UNDIRECTED_DOUBLE) {
if (e.tail != e.head) {
unsigned x = e.tail; e.tail = e.head; e.head = x;
*out_sort << e;
max_edges++;
}
}
if (print_progress) {
if (max_edges % step == 0) {
fprintf(stderr, ".");
if (max_edges % (step * 10) == 0) {
fprintf(stderr, "%lu", max_edges / 1000000ul);
}
}
}
}
if (max_nodes > degrees_capacity) {
size_t d = max_nodes;
degree_t* x = (degree_t*) realloc(degrees_out,
sizeof(*degrees_out)*d);
memset(&x[degrees_capacity], 0,
sizeof(*x)*(d-degrees_capacity));
degrees_out = x;
if (reverse) {
x = (degree_t*) realloc(degrees_in, sizeof(*degrees_in)*d);
memset(&x[degrees_capacity], 0,
sizeof(*x)*(d-degrees_capacity));
degrees_in = x;
}
degrees_capacity = d;
}
out_sort->sort();
out_called_sort = true;
xs_edge* buffer;
size_t length;
size_t index = 0;
last_tail = LL_NIL_NODE;
last_head = LL_NIL_NODE;
if (print_progress) {
fprintf(stderr, "[+]");
}
while (out_sort->next_block(&buffer, &length)) {
while (length --> 0) {
#ifndef LL_S_WEIGHTS_INSTEAD_OF_DUPLICATE_EDGES
if (last_head == buffer->head
&& last_tail == buffer->tail) {
buffer++;
continue;
}
degrees_out[buffer->tail]++;
if (reverse) degrees_in[buffer->head]++;
#else
if (last_head != buffer->head
|| last_tail != buffer->tail) {
degrees_out[buffer->tail]++;
if (reverse) degrees_in[buffer->head]++;
}
#endif
last_head = buffer->head;
last_tail = buffer->tail;
buffer++;
index++;
if (print_progress) {
if (index % step == 0) {
fprintf(stderr, ".");
if (index % (step * 10) == 0) {
fprintf(stderr, "%lu", index / 1000000ul);
}
}
}
}
}
}
else /* if (!config->lc_deduplicate) */ {
size_t loaded_edges = 0;
while (next_edge(&e.tail, &e.head, &e.weight[0])) {
max_edges++;
loaded_edges++;
if (config->lc_direction == LL_L_UNDIRECTED_ORDERED) {
if (e.tail > e.head) {
unsigned x = e.tail; e.tail = e.head; e.head = x;
}
}
if (already_sorted) {
if (last_tail > e.tail
|| (last_tail == e.tail && last_head > e.head)) {
already_sorted = false;
loaded_edges = 0;
rewind();
max_edges = 0;
memset(degrees_out, 0, sizeof(*degrees_out)
* degrees_capacity);
if (reverse) {
memset(degrees_in, 0, sizeof(*degrees_in)
* degrees_capacity);
}
out_sort = new ll_external_sort<xs_edge,
xs_edge_comparator>(config);
continue;
}
}
last_head = e.head;
last_tail = e.tail;
if (e.tail >= (NodeType) max_nodes) max_nodes = e.tail + 1;
if (e.head >= (NodeType) max_nodes) max_nodes = e.head + 1;
if (max_nodes > degrees_capacity) {
size_t d = degrees_capacity;
while (d < max_nodes + 16) d *= 2;
degree_t* x = (degree_t*) realloc(degrees_out,
sizeof(*degrees_out)*d);
memset(&x[degrees_capacity], 0,
sizeof(*x)*(d-degrees_capacity));
degrees_out = x;
if (reverse) {
x = (degree_t*) realloc(degrees_in,
sizeof(*degrees_in)*d);
memset(&x[degrees_capacity], 0,
sizeof(*x)*(d-degrees_capacity));
degrees_in = x;
}
degrees_capacity = d;
}
degrees_out[e.tail]++;
if (reverse) degrees_in[e.head]++;
if (!already_sorted) {
*out_sort << e;
}
if (config->lc_direction == LL_L_UNDIRECTED_DOUBLE) {
if (e.tail != e.head) {
unsigned x = e.tail; e.tail = e.head; e.head = x;
max_edges++;
degrees_out[e.tail]++;
if (reverse) degrees_in[e.head]++;
if (!already_sorted) {
*out_sort << e;
}
}
}
if (print_progress) {
if (loaded_edges % step == 0) {
fprintf(stderr, ".");
if (loaded_edges % (step * 10) == 0) {
fprintf(stderr, "%lu", loaded_edges / 1000000ul);
}
}
}
} /* Ends: while(...) { ... } */
} /* Ends: if (config->lc_deduplicate) { ... } else { ... } */
/*
* PASS 2
* - Write out the level, either by re-reading the input file if it
* is sorted or by pulling them out of external sort
* - Feed the edges into the in-edges external sort
*/
// Create the out-edges level
auto& out = graph->out();
out.init_level_from_degrees(max_nodes, degrees_out, NULL);
LL_ET<node_t>* et = graph->out().edge_table(new_level);
auto* vt = out.vertex_table(new_level); (void) vt;
// If the out-to-in, in-to-out properties are not enabled, disable
// that feature in the corresponding ll_csr_base
if (!config->lc_reverse_edges || !config->lc_reverse_maps) {
graph->out().set_edge_translation(false);
graph->in().set_edge_translation(false);
}
// Initialize the weight property
ll_mlcsr_edge_property<WeightType>* prop_weight = NULL;
if (load_weight) prop_weight = init_prop_weight(graph);
// Initialize all other edge properties
ll_with(auto p = graph->get_all_edge_properties_32()) {
for (auto it = p.begin(); it != p.end(); it++) {
if ((void*) it->second == (void*) prop_weight) continue;
it->second->cow_init_level(out.max_edges(new_level));
}
}
ll_with(auto p = graph->get_all_edge_properties_64()) {
for (auto it = p.begin(); it != p.end(); it++) {
if ((void*) it->second == (void*) prop_weight) continue;
it->second->cow_init_level(out.max_edges(new_level));
}
}
// Initialize streaming weights
#ifdef LL_S_WEIGHTS_INSTEAD_OF_DUPLICATE_EDGES
ll_mlcsr_edge_property<uint32_t>* streaming_weight
= graph->get_edge_weights_streaming();
assert(streaming_weight != NULL);
#ifndef LL_S_SINGLE_SNAPSHOT
ll_mlcsr_edge_property<edge_t>* streaming_forward
= graph->get_edge_forward_streaming();
assert(streaming_forward != NULL);
#endif
#endif
// Initialize the external sort for the in-edges
ll_external_sort<xs_in_edge, xs_in_edge_comparator>* in_sort = NULL;
if (reverse) {
in_sort = new ll_external_sort<xs_in_edge,
xs_in_edge_comparator>(config);
}
// Write the out-edges
if (print_progress) {
fprintf(stderr, "[O]");
}
if (already_sorted) {
assert(config->lc_direction != LL_L_UNDIRECTED_DOUBLE);
assert(!config->lc_deduplicate); // XXX
rewind();
last_head = LL_NIL_NODE;
last_tail = LL_NIL_NODE;
size_t index = 0;
while (next_edge(&e.tail, &e.head, &e.weight[0])) {
if (config->lc_direction == LL_L_UNDIRECTED_ORDERED) {
if (e.tail > e.head) {
unsigned x = e.tail; e.tail = e.head; e.head = x;
}
}
if (config->lc_deduplicate && last_head == e.head
&& last_tail == e.tail) {
continue;
}
#ifdef LL_MLCSR_CONTINUATIONS
if (last_tail != e.tail) {
auto& vt_value = (*vt)[e.tail];
assert(LL_EDGE_LEVEL(vt_value.adj_list_start)
== new_level);
index = LL_EDGE_INDEX(vt_value.adj_list_start);
}
#endif
last_head = e.head;
last_tail = e.tail;
(*et)[index] = LL_VALUE_CREATE((node_t) e.head);
if (HasWeight && load_weight) {
edge_t edge = LL_EDGE_CREATE(new_level, index);
prop_weight->cow_write(edge, e.weight[0]);
}
if (reverse) {
xs_in_edge x;
x.head = e.head;
x.tail = e.tail;
#ifdef LL_LOAD_CREATE_REV_EDGE_MAP
x.out_edge = edge;
#endif
*in_sort << x;
}
index++;
if (print_progress) {
if (index % step == 0) {
fprintf(stderr, ".");
if (index % (step * 10) == 0) {
fprintf(stderr, "%lu", index / 1000000ul);
}
}
}
}
#ifndef LL_MLCSR_CONTINUATIONS
assert(index == max_edges);
#endif
}
else /* if (!already_sorted) */ {
if (out_called_sort)
out_sort->rewind_sorted();
else
out_sort->sort();
xs_edge* buffer;
size_t length;
size_t index = 0;
size_t num_duplicates = 0;
last_tail = LL_NIL_NODE;
last_head = LL_NIL_NODE;
while (out_sort->next_block(&buffer, &length)) {
while (length --> 0) {
if (config->lc_deduplicate && last_head == buffer->head
&& last_tail == buffer->tail) {
buffer++;
num_duplicates++;
#ifdef LL_S_WEIGHTS_INSTEAD_OF_DUPLICATE_EDGES
edge_t edge = LL_EDGE_CREATE(new_level, index-1);
uint32_t old_weight = (*streaming_weight)[edge];
streaming_weight->cow_write(edge, old_weight + 1);
LL_D_NODE2_PRINT(buffer->tail, buffer->head,
"Update duplicate edge %lx: %lu --> %lu, "
"weight %u ==> %u\n",
edge, (size_t) buffer->tail,
(size_t) buffer->head,
old_weight,
old_weight + 1);
#endif
continue;
}
#ifdef LL_MLCSR_CONTINUATIONS
if (last_tail != buffer->tail) {
auto& vt_value = (*vt)[buffer->tail];
assert(LL_EDGE_LEVEL(vt_value.adj_list_start)
== new_level);
index = LL_EDGE_INDEX(vt_value.adj_list_start);
}
#endif
last_head = buffer->head;
last_tail = buffer->tail;
(*et)[index] = LL_VALUE_CREATE((node_t) buffer->head);
#ifdef LL_STREAMING
// Deal with duplicates
if (config->lc_deduplicate) {
edge_t old = new_level == 0 ? LL_NIL_EDGE
: out.find(buffer->tail, buffer->head,
new_level-1, new_level-1);
if (old != LL_NIL_EDGE) {
graph->update_max_visible_level_lower_only(old,
new_level);
# ifdef LL_S_WEIGHTS_INSTEAD_OF_DUPLICATE_EDGES
uint32_t old_weight = (*streaming_weight)[old];
edge_t edge = LL_EDGE_CREATE(new_level, index);
streaming_weight->cow_write(edge,
old_weight + (uint32_t) num_duplicates + 1);
# ifndef LL_S_SINGLE_SNAPSHOT
streaming_forward->cow_write(old, edge);
# endif
LL_D_NODE2_PRINT(buffer->tail, buffer->head,
"Found a duplicate of %lx: %lu --> %lu, "
"weight %u ==> %u\n",
old, (size_t) buffer->tail,
(size_t) buffer->head,
old_weight,
old_weight + (uint32_t) num_duplicates + 1);
# else
LL_D_NODE2_PRINT(buffer->tail, buffer->head,
"Found a duplicate of %lx: %lu --> %lu\n",
old, (size_t) buffer->tail,
(size_t) buffer->head);
# endif
}
else {
# ifdef LL_S_WEIGHTS_INSTEAD_OF_DUPLICATE_EDGES
edge_t edge = LL_EDGE_CREATE(new_level, index);
streaming_weight->cow_write(edge,
(uint32_t) num_duplicates + 1);
LL_D_NODE2_PRINT(buffer->tail, buffer->head,
"Add %llx: %lu --> %lu, weight = %u\n",
LL_EDGE_CREATE(new_level, index),
(size_t) buffer->tail,
(size_t) buffer->head,
(uint32_t) num_duplicates + 1);
# endif
}
}
#endif
LL_D_NODE2_PRINT(buffer->tail, buffer->head,
"Add %llx: %lu --> %lu\n",
LL_EDGE_CREATE(new_level, index),
(size_t) buffer->tail,
(size_t) buffer->head);
if (HasWeight && load_weight) {
edge_t edge = LL_EDGE_CREATE(new_level, index);
prop_weight->cow_write(edge, e.weight[0]);
}
if (reverse) {
xs_in_edge x;
x.head = buffer->head;
x.tail = buffer->tail;
#ifdef LL_LOAD_CREATE_REV_EDGE_MAP
x.out_edge = edge;
#endif
*in_sort << x;
}
index++;
buffer++;
num_duplicates = 0;
if (print_progress) {
if (index % step == 0) {
fprintf(stderr, ".");
if (index % (step * 10) == 0) {
fprintf(stderr, "%lu", index / 1000000ul);
}
}
}
}
}
delete out_sort;
out_sort = NULL;
} /* Ends: if (already_sorted) { ... } else { ... } */
graph->out().finish_level_edges();
if (HasWeight && load_weight) {
prop_weight->finish_level();
}
/*
* PASS 3
* - Compute the in-edges, if applicable
*/
// Do the in-edges
if (reverse) {
if (print_progress) {
fprintf(stderr, "[I]");
}
graph->in().init_level_from_degrees(max_nodes, degrees_in, NULL);
et = graph->in().edge_table(new_level);
vt = graph->in().vertex_table(new_level); (void) vt;
// If the out-to-in, in-to-out properties are not enabled, disable
// that feature in the corresponding ll_csr_base
if (!config->lc_reverse_edges || !config->lc_reverse_maps) {
graph->in().set_edge_translation(false);
}
// Sort the in edges and load them
in_sort->sort();
xs_in_edge* buffer;
size_t length;
size_t index = 0;
last_head = LL_NIL_NODE;
last_tail = LL_NIL_NODE;
while (in_sort->next_block(&buffer, &length)) {
while (length --> 0) {
#ifdef LL_MLCSR_CONTINUATIONS
if (last_head != buffer->head) {
auto& vt_value = (*vt)[buffer->head];
assert(LL_EDGE_LEVEL(vt_value.adj_list_start)
== new_level);
index = LL_EDGE_INDEX(vt_value.adj_list_start);
}
#endif
last_head = buffer->head;
last_tail = buffer->tail;
(*et)[index] = LL_VALUE_CREATE((node_t) buffer->tail);
// TODO Do the out-to-in, in-to-out properties if desired
index++;
buffer++;
if (print_progress) {
if (index % step == 0) {
fprintf(stderr, ".");
if (index % (step * 10) == 0) {
fprintf(stderr, "%lu", index / 1000000ul);
}
}
}
}
}
delete in_sort;
in_sort = NULL;
graph->in().finish_level_edges();
}
// Finish
if (reverse) free(degrees_in);
free(degrees_out);
_last_has_more = _has_more;
_has_more = false;
// Finish node properties
{
auto p = graph->get_all_node_properties_32();
for (auto it = p.begin(); it != p.end(); it++) {
if (!it->second->writable())
it->second->writable_init(max_nodes);
it->second->freeze(max_nodes);
if (it->second->max_level() != out.max_level()) {
fflush(stdout);
fprintf(stderr, "\nASSERT FAILED: Node property checkpoint "
"for '%s': %d level(s), %d expected\n",
it->first.c_str(), it->second->max_level(),
out.max_level());
exit(1);
}
assert(it->second->max_level() == out.max_level());
}
}
{
auto p = graph->get_all_node_properties_64();
for (auto it = p.begin(); it != p.end(); it++) {
if (!it->second->writable())
it->second->writable_init(max_nodes);
it->second->freeze(max_nodes);
assert(it->second->max_level() == out.max_level());
}
}
// Finish edge properties - finish the levels
{
auto p = graph->get_all_edge_properties_32();
for (auto it = p.begin(); it != p.end(); it++) {
if (it->second->writable())
it->second->freeze();
else
it->second->cow_finish_level();
if (it->second->max_level() != out.max_level()) {
fflush(stdout);
fprintf(stderr, "\nASSERT FAILED: Edge property checkpoint "
"for '%s': %d level(s), %d expected\n",
it->first.c_str(), it->second->max_level(),
out.max_level());
exit(1);
}
assert(it->second->max_level() == out.max_level());
}
}
{
auto p = graph->get_all_edge_properties_64();
for (auto it = p.begin(); it != p.end(); it++) {
if (it->second->writable())
it->second->freeze();
else
it->second->cow_finish_level();
assert(it->second->max_level() == out.max_level());
}
}
return true;
}
/**
* Load the data into one or more queues of requests
*
* @param request_queues the request queues
* @param num_stripes the number of stripes (queues array length)
* @param config the loader configuration
* @return true if there are more edges to load
*/
bool load_to_request_queues(ll_la_request_queue** request_queues,
size_t num_stripes, const ll_loader_config* config) {
// Check features
feature_vector_t features;
features << LL_L_FEATURE(lc_max_edges);
features << LL_L_FEATURE(lc_no_properties);
config->assert_features(false /*direct*/, true /*error*/, features);
// Initializie
size_t max_edges = 0;
size_t chunk_size = config->lc_max_edges;
bool load_weight = !config->lc_no_properties;
xs_edge e;
WeightType _w; (void) _w; // Unused, here for memory safety
bool has_more;
while ((has_more = next_edge(&e.tail, &e.head, &e.weight[0]))) {
max_edges++;
LL_D_NODE2_PRINT(e.tail, e.head, "%u --> %u\n", (unsigned) e.tail,
(unsigned) e.head);
ll_la_request_with_edge_properties* request;
if (HasWeight && load_weight) {
// XXX
//LL_NOT_IMPLEMENTED;
}
#ifdef LL_S_WEIGHTS_INSTEAD_OF_DUPLICATE_EDGES
request = new ll_la_add_edge_for_streaming_with_weights
<node_t>((node_t) e.tail, (node_t) e.head);
#else
request = new ll_la_add_edge
<node_t>((node_t) e.tail, (node_t) e.head);
#endif
size_t stripe = (e.tail >> (LL_ENTRIES_PER_PAGE_BITS+3))
% num_stripes;
request_queues[stripe]->enqueue(request);
if (chunk_size > 0)
if (max_edges % chunk_size == 0) break;
}
return has_more;
}
/**
* Load the graph into the writable representation
*
* @param graph the graph
* @param config the loader configuration
* @return true on no error
*/
bool load_incremental(ll_writable_graph* graph,
const ll_loader_config* config) {
// Check features
feature_vector_t features;
features << LL_L_FEATURE(lc_max_edges);
features << LL_L_FEATURE(lc_no_properties);
config->assert_features(false /*direct*/, true /*error*/, features);
// Initializie
size_t num_stripes = omp_get_max_threads();
ll_la_request_queue* request_queues[num_stripes];
for (size_t i = 0; i < num_stripes; i++) {
request_queues[i] = new ll_la_request_queue();
}
LL_D_PRINT("Initialize\n");
// TODO Deduplicate? Unordered?
// TODO Create nodes
bool has_more = true;
while (has_more) {
graph->tx_begin();
for (size_t i = 0; i < num_stripes; i++)
request_queues[i]->shutdown_when_empty(false);
#pragma omp parallel
{
if (omp_get_thread_num() == 0) {
has_more = this->load_to_request_queues(request_queues,
num_stripes, config);
// Add a worker
for (size_t i = 0; i < num_stripes; i++)
request_queues[i]->shutdown_when_empty();
for (size_t i = 0; i < num_stripes; i++)
request_queues[i]->run(*graph);
}
else {
int t = omp_get_thread_num();
for (size_t i = 0; i < num_stripes; i++, t++)
request_queues[t % num_stripes]->worker(*graph);
}
}
graph->tx_commit();
if (has_more) break;
}
_last_has_more = _has_more;
_has_more = has_more;
for (size_t i = 0; i < num_stripes; i++) delete request_queues[i];
return true;
}
/**
* Load the next batch of data
*
* @param graph the writable graph
* @param max_edges the maximum number of edges
* @return true if data was loaded, false if there are no more data
*/
virtual bool pull(ll_writable_graph* graph, size_t max_edges) {
ll_loader_config config;
config.lc_max_edges = max_edges;
if (!load_incremental(graph, &config)) abort();
return _last_has_more;
}
/**
* Load the next batch of data to request queues
*
* @param request_queues the request queues
* @param num_stripes the number of stripes (queues array length)
* @param max_edges the maximum number of edges
* @return true if data was loaded, false if there are no more data
*/
virtual bool pull(ll_la_request_queue** request_queues, size_t num_stripes,
size_t max_edges) {
ll_loader_config config;
config.lc_max_edges = max_edges;
bool has_more = load_to_request_queues(request_queues, num_stripes,
&config);
_last_has_more = _has_more;
_has_more = has_more;
return _last_has_more;
}
private:
/**
* Initialize the weights property (if applicable)
*
* @param graph the graph
* @param config the loader configuration
* @return the property, or NULL if not applicable
*/
ll_mlcsr_edge_property<WeightType>* init_prop_weight(
ll_mlcsr_ro_graph* graph) {
if (!HasWeight) return NULL;
size_t new_level = graph->out().num_levels() - 1;
size_t et_length = graph->out().edge_table_length(new_level);
ll_mlcsr_edge_property<WeightType>* prop_weight = NULL;
if (sizeof(WeightType) == 4) {
prop_weight
= reinterpret_cast<ll_mlcsr_edge_property<WeightType>*>
(graph->get_edge_property_32("weight"));
if (prop_weight == NULL) {
prop_weight
= reinterpret_cast<ll_mlcsr_edge_property<WeightType>*>
(graph->create_uninitialized_edge_property_32
("weight", WeightTypeCode));
prop_weight->ensure_min_levels(new_level, et_length);
}
}
else {
if (sizeof(WeightType) != 8) abort();
prop_weight
= reinterpret_cast<ll_mlcsr_edge_property<WeightType>*>
(graph->get_edge_property_64("weight"));
if (prop_weight == NULL) {
prop_weight
= reinterpret_cast<ll_mlcsr_edge_property<WeightType>*>
(graph->create_uninitialized_edge_property_64
("weight", WeightTypeCode));
prop_weight->ensure_min_levels(new_level, et_length);
}
}
prop_weight->cow_init_level(et_length);
return prop_weight;
}
/**
* Write a node with its out-edges and prep for the in-edges
*
* @param graph the graph
* @param et the edge table
* @param new_level the new level
* @param node the node
* @param adj_list the adjacency list
* @param weights the weights (if applicable)
* @param prop_weight the weights property (if applicable)
* @param in_sort the in-edges sorter (if applicable)
*/
void load_node_out(ll_mlcsr_ro_graph* graph, LL_ET<node_t>* et, size_t new_level,
node_t node, std::vector<NodeType>& adj_list,
std::vector<WeightType>& weights,
ll_mlcsr_edge_property<WeightType>* prop_weight,
ll_external_sort<xs_in_edge, xs_in_edge_comparator>* in_sort) {
size_t et_index = graph->out().init_node(node, adj_list.size(), 0);
edge_t edge = LL_EDGE_CREATE(new_level, et_index);
for (size_t i = 0; i < adj_list.size(); i++) {
LL_D_NODE2_PRINT(node, adj_list[i], "%ld --> %ld\n",
(long) node, (long) adj_list[i]);
(*et)[et_index + i] = LL_VALUE_CREATE((node_t) adj_list[i]);
}
if (in_sort != NULL) {
xs_in_edge x;
x.tail = node;
for (size_t i = 0; i < adj_list.size(); i++) {
x.head = adj_list[i];
#ifdef LL_LOAD_CREATE_REV_EDGE_MAP
x.out_edge = edge + i;
#endif
*in_sort << x;
}
}
if (HasWeight) {
for (size_t i = 0; i < weights.size(); i++) {
prop_weight->cow_write(edge + i, weights[i]);
}
}
}
/**
* Write a node with its in-edges
*
* @param graph the graph
* @param et the edge table
* @param new_level the new level
* @param node the node
* @param adj_list the adjacency list
*/
void load_node_in(ll_mlcsr_ro_graph* graph, LL_ET<node_t>* et, size_t new_level,
node_t node, std::vector<NodeType>& adj_list) {
size_t et_index = graph->in().init_node(node, adj_list.size(), 0);
for (size_t i = 0; i < adj_list.size(); i++) {
(*et)[et_index + i] = LL_VALUE_CREATE((node_t) adj_list[i]);
}
}
/**
* Load the graph directly into the read-only representation for the case
* in which the ll_edge_list_loader::stat() info is readily available
*
* @param graph the graph
* @param config the loader configuration
* @return true on no error
*/
bool load_direct_with_stat(ll_mlcsr_ro_graph* graph,
const ll_loader_config* config) {
// Check features
feature_vector_t features;
features << LL_L_FEATURE(lc_direction);
features << LL_L_FEATURE(lc_reverse_edges);
features << LL_L_FEATURE(lc_deduplicate);
features << LL_L_FEATURE(lc_no_properties);
features << LL_L_FEATURE(lc_max_edges);
config->assert_features(false /*direct*/, true /*error*/, features);
// Initialize the algorithm
bool print_progress = config->lc_print_progress;
bool reverse = config->lc_reverse_edges;
bool load_weight = !config->lc_no_properties;
size_t new_level = graph->num_levels();
size_t max_nodes = 0;
size_t max_edges = 0;
xs_edge e;
WeightType _w; (void) _w; // Unused, here for memory safety
if (!stat(&max_nodes, &max_edges)) {
LL_E_PRINT("The graph stat call failed\n");
abort();
}
if (config->lc_max_edges > 0 && max_edges > config->lc_max_edges) {
max_edges = config->lc_max_edges;
}
if (new_level > 0) {
if (max_nodes < (size_t) graph->out().max_nodes()) {
max_nodes = graph->out().max_nodes();
}
}
if (config->lc_direction == LL_L_UNDIRECTED_DOUBLE) {
max_edges *= 2;
}
// Initialize the new CSR level
graph->partial_init_level(max_nodes, max_nodes, max_edges);
LL_ET<node_t>* et = graph->out().edge_table(new_level);
LL_D_PRINT("Nodes = %lu, edges = %lu\n", max_nodes, max_edges);
ll_mlcsr_edge_property<WeightType>* prop_weight = NULL;
if (load_weight) prop_weight = init_prop_weight(graph);
// If the out-to-in, in-to-out properties are not enabled, disable
// that feature in the corresponding ll_csr_base
if (!config->lc_reverse_edges || !config->lc_reverse_maps) {
graph->out().set_edge_translation(false);
graph->in().set_edge_translation(false);
}
// Initialize the in-edges
ll_external_sort<xs_in_edge, xs_in_edge_comparator>* in_sort = NULL;
if (reverse) {
graph->partial_init_level_in(max_nodes, max_nodes, max_edges);
in_sort = new ll_external_sort<xs_in_edge,
xs_in_edge_comparator>(config);
}
/*
*
* CASE 1: The input file is sorted
*
*/
// Try to load the data if it is sorted -- or discover that it is not,
// abort, and then try again with the external sort
size_t loaded_edges = 0;
bool was_sorted = false;
size_t step = 10 * 1000 * 1000ul;
if (print_progress) {
fprintf(stderr, "[<]");
}
if (config->lc_direction != LL_L_UNDIRECTED_DOUBLE) {
std::vector<NodeType> adj_list_buffer;
std::vector<WeightType> weight_buffer;
NodeType last_tail = (NodeType) LL_NIL_NODE;
NodeType last_head = (NodeType) LL_NIL_NODE;
was_sorted = true;
while (next_edge(&e.tail, &e.head, &e.weight[0])) {
loaded_edges++;
if (config->lc_max_edges > 0
&& loaded_edges > config->lc_max_edges) {
break;
}
LL_D_NODE2_PRINT(e.tail, e.head, "%ld --> %ld\n", (long) e.tail,
(long) e.head);
if (config->lc_direction == LL_L_UNDIRECTED_ORDERED) {
if (e.tail > e.head) {
unsigned x = e.tail; e.tail = e.head; e.head = x;
}
}
if (config->lc_deduplicate && last_head == e.head
&& last_tail == e.tail) {
continue;
}
if ((last_tail != (NodeType) LL_NIL_NODE && last_tail > e.tail)
|| (last_tail == e.tail && last_head > e.head)) {
LL_D_PRINT("The input file is not sorted\n");
was_sorted = false;
loaded_edges = 0;
rewind();
graph->out().restart_init_level();
if (in_sort != NULL) in_sort->clear();
break;
}
// Init the node and write the edges after we moved to the next
// node
if (last_tail != e.tail && last_tail != (NodeType) LL_NIL_NODE) {
load_node_out(graph, et, new_level, last_tail, adj_list_buffer,
weight_buffer, prop_weight, in_sort);
adj_list_buffer.clear();
weight_buffer.clear();
}
last_head = e.head;
last_tail = e.tail;
// Load the edge into the buffer
adj_list_buffer.push_back(e.head);
if (HasWeight && load_weight) weight_buffer.push_back(e.weight[0]);
// Progress
if (print_progress) {
if (loaded_edges % step == 0) {
fprintf(stderr, ".");
if (loaded_edges % (step * 10) == 0) {
fprintf(stderr, "%lu", loaded_edges / 1000000ul);
}
}
}
}
// Finish the buffer
if (was_sorted && last_tail != (NodeType) LL_NIL_NODE) {
load_node_out(graph, et, new_level, last_tail, adj_list_buffer,
weight_buffer, prop_weight, in_sort);
}
}
/*
*
* CASE 2: The input file is not sorted
*
*/
// Now if the buffer was not sorted, load it using the external sort
if (!was_sorted) {
// Temporarily free some memory to make space for sort
graph->out().et_free();
// Load into external sort
ll_external_sort<xs_edge, xs_edge_comparator>* out_sort
= new ll_external_sort<xs_edge, xs_edge_comparator>(config);
NodeType last_tail = (NodeType) LL_NIL_NODE;
NodeType last_head = (NodeType) LL_NIL_NODE;
size_t read_edges = 0;
while (next_edge(&e.tail, &e.head, &e.weight[0])) {
loaded_edges++;
read_edges++;
if (config->lc_max_edges > 0
&& read_edges > config->lc_max_edges) {
break;
}
if (config->lc_direction == LL_L_UNDIRECTED_ORDERED) {
if (e.tail > e.head) {
unsigned x = e.tail; e.tail = e.head; e.head = x;
}
}
if (config->lc_deduplicate && last_head == e.head
&& last_tail == e.tail) {
continue;
}
last_head = e.head;
last_tail = e.tail;
*out_sort << e;
if (config->lc_direction == LL_L_UNDIRECTED_DOUBLE) {
if (e.tail != e.head) {
unsigned x = e.tail; e.tail = e.head; e.head = x;
*out_sort << e;
loaded_edges++;
}
}
// Progress
if (print_progress) {
if (loaded_edges % step == 0) {
fprintf(stderr, ".");
if (loaded_edges % (step * 10) == 0) {
fprintf(stderr, "%lu", loaded_edges / 1000000ul);
}
}
}
}
out_sort->sort();
et = graph->out().et_reinit();
// Now load the CSR
xs_edge* buffer;
size_t length;
std::vector<NodeType> adj_list_buffer;
std::vector<WeightType> weight_buffer;
last_tail = (NodeType) LL_NIL_NODE;
last_head = (NodeType) LL_NIL_NODE;
if (print_progress) {
fprintf(stderr, "[+]");
}
while (out_sort->next_block(&buffer, &length)) {
while (length --> 0) {
if (config->lc_deduplicate && last_head == buffer->head
&& last_tail == buffer->tail) {
buffer++;
continue;
}
// Init the node and write the edges after we moved to the
// next node
if (last_tail != buffer->tail
&& last_tail != (NodeType) LL_NIL_NODE) {
load_node_out(graph, et, new_level, last_tail,
adj_list_buffer, weight_buffer, prop_weight,
in_sort);
adj_list_buffer.clear();
weight_buffer.clear();
}
last_head = buffer->head;
last_tail = buffer->tail;
// Load the edge into the buffer
adj_list_buffer.push_back(buffer->head);
if (HasWeight && load_weight)
weight_buffer.push_back(e.weight[0]);
buffer++;
// Progress
if (print_progress) {
if (loaded_edges % step == 0) {
fprintf(stderr, ".");
if (loaded_edges % (step * 10) == 0) {
fprintf(stderr, "%lu", loaded_edges / 1000000ul);
}
}
}
}
}
// Finish the buffer
if (last_tail != (NodeType) LL_NIL_NODE) {
load_node_out(graph, et, new_level, last_tail, adj_list_buffer,
weight_buffer, prop_weight, in_sort);
}
// Clean-up
delete out_sort;
}
/*
*
* Finish up the out-edges and do the in-edges
*
*/
graph->out().finish_level_vertices();
graph->out().finish_level_edges();
if (HasWeight && load_weight) {
prop_weight->finish_level();
}
if (reverse) {
if (print_progress) {
fprintf(stderr, "[I]");
}
// If the out-to-in, in-to-out properties are not enabled, disable
// that feature in the corresponding ll_csr_base
if (!config->lc_reverse_edges || !config->lc_reverse_maps) {
graph->in().set_edge_translation(false);
}
// Sort the in-edges and load them
in_sort->sort();
xs_in_edge* buffer;
size_t length;
loaded_edges = 0;
et = graph->in().edge_table(new_level);
NodeType last_head = (NodeType) LL_NIL_NODE;
std::vector<NodeType> adj_list_buffer;
std::vector<WeightType> weight_buffer;
while (in_sort->next_block(&buffer, &length)) {
while (length --> 0) {
// Init the node and write the edges after we moved to the
// next node
if (last_head != buffer->head && last_head != (NodeType) LL_NIL_NODE) {
load_node_in(graph, et, new_level, last_head, adj_list_buffer);
adj_list_buffer.clear();
}
last_head = buffer->head;
// Load the edge into the buffer
adj_list_buffer.push_back(buffer->tail);
buffer++;
// Progress
if (print_progress) {
if (loaded_edges % step == 0) {
fprintf(stderr, ".");
if (loaded_edges % (step * 10) == 0) {
fprintf(stderr, "%lu", loaded_edges / 1000000ul);
}
}
}
}
}
// Finish the buffer
if (last_head != (NodeType) LL_NIL_NODE) {
load_node_in(graph, et, new_level, last_head, adj_list_buffer);
}
// Finish the in-edges
delete in_sort;
in_sort = NULL;
graph->in().finish_level_vertices();
graph->in().finish_level_edges();
}
// Finish
_last_has_more = _has_more;
_has_more = false;
return true;
}
};
/**
* The direct loader for node_pair_t buffers
*/
class ll_node_pair_loader : public ll_edge_list_loader<node_t, false>
{
std::vector<node_pair_t>* _buffer;
size_t _index;
bool _own;
public:
/**
* Create an instance of class ll_node_pair_loader
*
* @param buffer the buffer
* @param own true to transfer ownership of the buffer to this object
*/
ll_node_pair_loader(std::vector<node_pair_t>* buffer, bool own = false)
: ll_edge_list_loader<node_t, false>() {
_buffer = buffer;
_index = 0;
_own = own;
}
/**
* Destroy the loader
*/
virtual ~ll_node_pair_loader() {
if (_own) delete _buffer;
}
protected:
/**
* Read the next edge
*
* @param o_tail the output for tail
* @param o_head the output for head
* @param o_weight the output for weight (ignore if HasWeight is false)
* @return true if the edge was loaded, false if EOF or error
*/
virtual bool next_edge(node_t* o_tail, node_t* o_head,
float* o_weight) {
if (_index >= _buffer->size()) return false;
*o_tail = (*_buffer)[_index].tail;
*o_head = (*_buffer)[_index].head;
_index++;
return true;
}
/**
* Rewind the input file
*/
virtual void rewind() {
_index = 0;
}
public:
/**
* Get the size
*
* @return size
*/
inline size_t size() const {
return _buffer->size();
}
};
/**
* Loader for a queue of node_pair_t buffers
*/
class ll_node_pair_queue_loader : public ll_edge_list_loader<node_t, false> {
std::deque<std::vector<node_pair_t>*>* _buffer_queue;
std::deque<std::vector<node_pair_t>*>::iterator _buffer_queue_iterator;
size_t _inner_index;
public:
/**
* Create an instance of class ll_node_pair_queue_loader
*
* @param buffer_queue the buffer queue
*/
ll_node_pair_queue_loader(std::deque<std::vector<node_pair_t>*>* buffer_queue)
: ll_edge_list_loader<node_t, false>() {
_buffer_queue = buffer_queue;
rewind();
}
/**
* Destroy the loader
*/
virtual ~ll_node_pair_queue_loader() {
}
protected:
/**
* Read the next edge
*
* @param o_tail the output for tail
* @param o_head the output for head
* @param o_weight the output for weight (ignore if HasWeight is false)
* @return true if the edge was loaded, false if EOF or error
*/
virtual bool next_edge(node_t* o_tail, node_t* o_head,
float* o_weight) {
if (_buffer_queue_iterator == _buffer_queue->end()) return false;
std::vector<node_pair_t>* b = *_buffer_queue_iterator;
if (_inner_index >= b->size()) {
_buffer_queue_iterator++;
_inner_index = 0;
if (_buffer_queue_iterator == _buffer_queue->end()) return false;
}
*o_tail = (*b)[_inner_index].tail;
*o_head = (*b)[_inner_index].head;
_inner_index++;
return true;
}
/**
* Rewind the input file
*/
virtual void rewind() {
_buffer_queue_iterator = _buffer_queue->begin();
_inner_index = 0;
}
};
#endif
|
apply-kernels.h | #ifndef __ARRAY_APPLY_KERNELS_H__
#define __ARRAY_APPLY_KERNELS_H__
#include <type_traits>
#include <omp.h>
#include "../../../macros/macros.h"
#include "../../../meta/meta.h"
#include "../../../types/types.h"
namespace __core__ {
namespace __functional__ {
namespace __apply__ {
namespace __array__ {
namespace __private__ {
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunknown-pragmas"
template <typename fn_T,int threadnum,typename T,typename IT,typename... Args> __optimize__
void __apply_function_ckernel__(T* arr,IT size,Args... args) {
if(threadnum>1) {
#pragma omp parallel for num_threads(threadnum)
for(IT i=0;i<size;++i)
arr[i]=fn_T::fn(arr[i],args...);
}
else {
for(IT i=0;i<size;++i)
arr[i]=fn_T::fn(arr[i],args...);
}
}
template <typename fn_T,int threadnum,typename T,typename U,typename IT,typename... Args> __optimize__
void __apply_function_ckernel__(T* arr_dst,U* arr_src,IT size,Args... args) {
if(threadnum>1) {
#pragma omp parallel for num_threads(threadnum)
for(IT i=0;i<size;++i)
arr_dst[i]=fn_T::fn(arr_src[i],args...);
}
else {
for(IT i=0;i<size;++i)
arr_dst[i]=fn_T::fn(arr_src[i],args...);
}
}
template <typename fn_T,int threadnum,typename T,typename V,typename U,typename IT,typename... Args> __optimize__
void __apply_function_ckernel__(T* arr_dst,V* arr1,U* arr2,IT size,Args... args) {
if(threadnum>1) {
#pragma omp parallel for num_threads(threadnum)
for(IT i=0;i<size;++i)
arr_dst[i]=fn_T::fn(arr1[i],arr2[i],args...);
}
else {
for(IT i=0;i<size;++i)
arr_dst[i]=fn_T::fn(arr1[i],arr2[i],args...);
}
}
template <typename fn_T,int threadnum,typename T,typename V,typename U,typename W,typename IT,typename... Args> __optimize__
void __apply_function_ckernel__(T* arr_dst,V* arr1,U* arr2,W* arr3,IT size,Args... args) {
if(threadnum>1) {
#pragma omp parallel for num_threads(threadnum)
for(IT i=0;i<size;++i)
arr_dst[i]=fn_T::fn(arr1[i],arr2[i],arr3[i],args...);
}
else {
for(IT i=0;i<size;++i)
arr_dst[i]=fn_T::fn(arr1[i],arr2[i],arr3[i],args...);
}
}
template <typename fn_T,int threadnum,typename T,typename IT,typename... Args> __optimize__
void __apply_function_indexed_ckernel__(T* arr,IT size,Args... args) {
if(threadnum>1) {
#pragma omp parallel for num_threads(threadnum)
for(IT i=0;i<size;++i)
arr[i]=fn_T::fn(i,arr[i],args...);
}
else {
for(IT i=0;i<size;++i)
arr[i]=fn_T::fn(i,arr[i],args...);
}
}
template <typename fn_T,int threadnum,typename T,typename U,typename IT,typename... Args> __optimize__
void __apply_function_indexed_ckernel__(T* arr_dst,U* arr_src,IT size,Args... args) {
if(threadnum>1) {
#pragma omp parallel for num_threads(threadnum)
for(IT i=0;i<size;++i)
arr_dst[i]=fn_T::fn(i,arr_src[i],args...);
}
else {
for(IT i=0;i<size;++i)
arr_dst[i]=fn_T::fn(i,arr_src[i],args...);
}
}
template <typename fn_T,int threadnum,typename T,typename V,typename U,typename IT,typename... Args> __optimize__
void __apply_function_indexed_ckernel__(T* arr_dst,V* arr1,U* arr2,IT size,Args... args) {
if(threadnum>1) {
#pragma omp parallel for num_threads(threadnum)
for(IT i=0;i<size;++i)
arr_dst[i]=fn_T::fn(i,arr1[i],arr2[i],args...);
}
else {
for(IT i=0;i<size;++i)
arr_dst[i]=fn_T::fn(i,arr1[i],arr2[i],args...);
}
}
template <typename fn_T,int threadnum,typename T,typename V,typename U,typename W,typename IT,typename... Args> __optimize__
void __apply_function_indexed_ckernel__(T* arr_dst,V* arr1,U* arr2,W* arr3,IT size,Args... args) {
if(threadnum>1) {
#pragma omp parallel for num_threads(threadnum)
for(IT i=0;i<size;++i)
arr_dst[i]=fn_T::fn(i,arr1[i],arr2[i],arr3[i],args...);
}
else {
for(IT i=0;i<size;++i)
arr_dst[i]=fn_T::fn(i,arr1[i],arr2[i],arr3[i],args...);
}
}
template <typename fn_T,int threadnum,typename IT,typename... Args> __optimize__
void __apply_function_meta_ckernel__(IT size,Args... args) {
if(threadnum>1) {
#pragma omp parallel for num_threads(threadnum)
for(IT i=0;i<size;++i)
fn_T::fn(i,size,args...);
}
else {
for(IT i=0;i<size;++i)
fn_T::fn(i,size,args...);
}
}
template <typename fn_T,int threadnum,typename IT,typename... Args> __optimize__
void __apply_function_meta_ckernel__(IT size,IT private_mem_size,Args... args) {
if(threadnum>1) {
#pragma omp parallel num_threads(threadnum)
{
uchar *private_mem=malloc(private_mem_size);
#pragma omp for
for(IT i=0;i<size;++i)
fn_T::fn(i,size,private_mem,args...);
free(private_mem);
}
}
else {
uchar *private_mem=malloc(private_mem_size);
for(IT i=0;i<size;++i)
fn_T::fn(i,size,private_mem,args...);
free(private_mem);
}
}
#pragma GCC diagnostic pop
}
}
}
}
}
#endif
|
2DConvolution.c | /**
* 2DConvolution.c: This file was adapted from PolyBench/GPU 1.0 test suite
* to run on GPU with OpenMP 4.0 pragmas and OpenCL driver.
*
* http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*
* Contacts: Marcio M Pereira <mpereira@ic.unicamp.br>
* Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br>
* Luís Felipe Mattos <ra107822@students.ic.unicamp.br>
*/
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <time.h>
#include <unistd.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "BenchmarksUtil.h"
#define NI SIZE
#define NJ SIZE
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void conv2D(DATA_TYPE *A, DATA_TYPE *B) {
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +0.2;
c21 = +0.5;
c31 = -0.8;
c12 = -0.3;
c22 = +0.6;
c32 = -0.9;
c13 = +0.4;
c23 = +0.7;
c33 = +0.10;
for (int i = 1; i < NI - 1; ++i) // 0
{
for (int j = 1; j < NJ - 1; ++j) // 1
{
B[i * NJ + j] =
c11 * A[(i - 1) * NJ + (j - 1)] + c12 * A[(i + 0) * NJ + (j - 1)] +
c13 * A[(i + 1) * NJ + (j - 1)] + c21 * A[(i - 1) * NJ + (j + 0)] +
c22 * A[(i + 0) * NJ + (j + 0)] + c23 * A[(i + 1) * NJ + (j + 0)] +
c31 * A[(i - 1) * NJ + (j + 1)] + c32 * A[(i + 0) * NJ + (j + 1)] +
c33 * A[(i + 1) * NJ + (j + 1)];
}
}
}
void conv2D_OMP(DATA_TYPE *A, DATA_TYPE *B) {
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +0.2;
c21 = +0.5;
c31 = -0.8;
c12 = -0.3;
c22 = +0.6;
c32 = -0.9;
c13 = +0.4;
c23 = +0.7;
c33 = +0.10;
#pragma omp target teams distribute parallel for map(to : A[ : NI *NJ]) map(from : B[ : NI *NJ]) device(OMP_DEVICE_ID)
for (int i = 1; i < NI - 1; ++i) {
LLVM_MCA_BEGIN("loop_j");
for (int j = 1; j < NJ - 1; ++j) {
B[i * NJ + j] =
c11 * A[(i - 1) * NJ + (j - 1)] + c12 * A[(i + 0) * NJ + (j - 1)] +
c13 * A[(i + 1) * NJ + (j - 1)] + c21 * A[(i - 1) * NJ + (j + 0)] +
c22 * A[(i + 0) * NJ + (j + 0)] + c23 * A[(i + 1) * NJ + (j + 0)] +
c31 * A[(i - 1) * NJ + (j + 1)] + c32 * A[(i + 0) * NJ + (j + 1)] +
c33 * A[(i + 1) * NJ + (j + 1)];
}
LLVM_MCA_END("loop_j");
}
}
void init(DATA_TYPE *A) {
int i, j;
for (i = 0; i < NI; ++i) {
for (j = 0; j < NJ; ++j) {
A[i * NJ + j] = (float)rand() / RAND_MAX;
}
}
}
int compareResults(DATA_TYPE *B, DATA_TYPE *B_OMP) {
int i, j, fail;
fail = 0;
// Compare B and B_OMP
for (i = 1; i < (NI - 1); i++) {
for (j = 1; j < (NJ - 1); j++) {
if (percentDiff(B[i * NJ + j], B_OMP[i * NJ + j]) > ERROR_THRESHOLD) {
fail++;
}
}
}
return fail;
}
int main(int argc, char *argv[]) {
fprintf(stdout, ">> Two dimensional (2D) convolution <<\n");
// declare arrays and allocate memory
DATA_TYPE *A = (DATA_TYPE *)malloc(NI * NJ * sizeof(DATA_TYPE));
DATA_TYPE *B = NULL;
DATA_TYPE *B_OMP = NULL;
// initialize the arrays
init(A);
// run OMP on GPU or CPU if enabled
#if defined(RUN_OMP_GPU) || defined(RUN_OMP_CPU)
B_OMP = (DATA_TYPE *)malloc(NI * NJ * sizeof(DATA_TYPE));
BENCHMARK_OMP(conv2D_OMP(A, B_OMP));
// prevent dead code elimination
DCE_PREVENT(B_OMP, NI*NJ);
#endif
// run sequential version if enabled
#ifdef RUN_CPU_SEQ
B = (DATA_TYPE *)malloc(NI * NJ * sizeof(DATA_TYPE));
BENCHMARK_CPU(conv2D(A, B));
// prevent dead code elimination
DCE_PREVENT(B, NI*NJ);
#endif
int fail = 0;
// if test mode enabled, compare the results
#ifdef RUN_TEST
fail += compareResults(B, B_OMP);
printf("Errors on OMP (threshold %4.2lf): %d\n", ERROR_THRESHOLD, fail);
#endif
free(A);
free(B);
free(B_OMP);
return fail;
}
|
pastix.c | /* CalculiX - A 3-dimensional finite element program */
/*Copyright (C) 1998-2021 Guido Dhondt*/
/* This program is free software; you can redistribute it and/or */
/* modify it under the terms of the GNU General Public License as*/
/* published by the Free Software Foundation(version 2);*/
/**/
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of*/
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the*/
/* GNU General Public License for more details.*/
/* You should have received a copy of the GNU General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
#ifdef PASTIX
#include <spm.h>
#include <pastix.h>
//#include <api.h>
//#include <nompi.h>
//#include <datatypes.h>
#include <sys/time.h>
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include "CalculiX.h"
#include "pastix.h"
// Time structs for benchmarking
extern struct timespec totalCalculixTimeStart, totalCalculixTimeEnd;
double totalPastixTime;
// Variables for storing reuse history
int totalIterations = 0;
int totalReused = 0;
// Current sparse matrix in STI format
double* auPtr = NULL;
double* adPtr = NULL;
ITG *icolTotal = NULL, *irowTotal = NULL;
// Matrix data from previous iteration
ITG neqPrev=0, nzsPrev=0;
ITG *icolPrev=NULL,*irowPrev=NULL,*jqPrev=NULL,*offsetPrev=NULL;
ITG inputformatPrev=-1;
ITG basePrev=1;
char noScale=0;
// Current sparse matrix in CSC
double *aupastix=NULL;
ITG *icolpastix=NULL,*irowpastix=NULL;
// Global variable that indicates whether we are currently reusing or not
char redo = 1;
// Global variable which data set was previously used (basic/radiation) and now
#define BASIC 1
#define AS 2
char modePrev = BASIC;
char mode = BASIC;
#define SINGLE_SOLVE 1;
#define MULTI_SOLVE 2;
char usage = SINGLE_SOLVE;
// PaStiX configuration
spm_int_t iparm_basic[IPARM_SIZE];
spm_int_t iparm_as[IPARM_SIZE];
double dparm_basic[DPARM_SIZE];
double dparm_as[DPARM_SIZE];
spm_int_t *iparm = iparm_basic;
double *dparm = dparm_basic;
pastix_data_t* pastix_data = NULL;
spmatrix_t *spm = NULL;
// GPU active or not
char gpu = 0;
// Store how many nzs the merged Matrix has
ITG nzsTotal = 0;
// Size of allocated space for sparse matrix
ITG pastix_nnzBound = 0;
// Number of iterations that failed with mixed precision
ITG mixedFailed = 0;
// indicates whether this is the first invocation of PaStiX or not
char firstIter = 1;
// When this flag is activated, PaStiX will not reuse in the next iteration
char forceRedo = 0;
// Use double or mixed precision
char mixed = 1;
char globDoublePrecision = 0;
// This is set to one, when to many iterations with mixed precision did not converge
char stickToDouble = 0;
// Pointers for faster matrix transpose
ITG *irowacc = NULL;
ITG *irowPrediction = NULL;
// Number of threads
ITG nthread_mkl=0;
struct pastix_data_s {
int totalIterations;
int totalReused;
ITG *icolTotal;
ITG *irowTotal;
ITG neqPrev;
ITG nzsPrev;
ITG *icolPrev;
ITG *irowPrev;
ITG *jqPrev;
ITG inputformatPrev;
ITG basePrev;
ITG *offsetPrev;
double *aupastix;
ITG *icolpastix;
ITG *irowpastix;
char redo;
ITG nzsTotal;
ITG pastix_nnzBound;
ITG mixedFailed;
char firstIter;
char forceRedo;
char globDoublePrecision;
char stickToDouble;
ITG *irowacc;
ITG *irowPrediction;
spm_int_t *iparm;
double *dparm;
char gpu;
char mixed;
pastix_data_t* pastix_data;
spmatrix_t *spm;
};
typedef struct pastix_data_s pastix_data_object;
pastix_data_object pastix_mode_basic = {
0,0,NULL,NULL,0,0,NULL,NULL,NULL,-1,0,NULL,
NULL,NULL,NULL,1,0,0,0,1,0,0,0,NULL,NULL,iparm_basic,dparm_basic,
0,1,NULL,NULL
};
pastix_data_object pastix_mode_as = {
0,0,NULL,NULL,0,0,NULL,NULL,NULL,-1,0,NULL,
NULL,NULL,NULL,1,0,0,0,1,0,0,0,NULL,NULL,iparm_as,dparm_as,
0,1,NULL,NULL
};
// Initializes and configurates PaStiX environment. Also forwards the sparse matrix pointers
void pastix_init(double *ad, double *au, double *adb, double *aub,
double *sigma,ITG *icol, ITG *irow,
ITG *neq, ITG *nzs, ITG *symmetryflag, ITG *inputformat,
ITG *jq, ITG *nzs3){
// if reusing, only update the value pointer of the sparse matrix
if(!redo){
pastixResetSteps(pastix_data);
if(spm->values != aupastix && spm->values != NULL ) free(spm->values);
spm->values = aupastix;
printf("\n");
spmPrintInfo( spm, stdout );
printf("\n");
return;
}
ITG nthread, nthread_v;
char *env;
/*set MKL_NUM_THREADS to min(CCX_NPROC_EQUATION_SOLVER,OMP_NUM_THREADS)
must be done once*/
if (nthread_mkl == 0) {
nthread=1;
env=getenv("MKL_NUM_THREADS");
if(env) {
nthread=atoi(env);}
else {
env=getenv("OMP_NUM_THREADS");
if(env) {nthread=atoi(env);}
}
env=getenv("CCX_NPROC_EQUATION_SOLVER");
if(env) {
nthread_v=atoi(env);
if (nthread_v <= nthread) {nthread=nthread_v;}
}
if (nthread < 1) {nthread=1;}
nthread_mkl=nthread;
}
// Init integer and double parameters with default values
pastixInitParam( iparm, dparm );
// Set best PaStiX parameters for CalculiX usage
iparm[IPARM_ORDERING] = PastixOrderScotch;
if( mode == AS ){
iparm[IPARM_SCHEDULER] = PastixSchedStatic;
}
else{
iparm[IPARM_SCHEDULER] = PastixSchedParsec;
}
iparm[IPARM_THREAD_NBR] = nthread_mkl;
iparm[IPARM_GPU_NBR] = (int) gpu;
iparm[IPARM_FLOAT] = globDoublePrecision ? 3 : 2;
iparm[IPARM_MIN_BLOCKSIZE] = 1024;
iparm[IPARM_MAX_BLOCKSIZE] = 2048;
iparm[IPARM_FACTORIZATION] = PastixFactLU;
iparm[IPARM_TASKS2D_WIDTH] = globDoublePrecision ? 256 : 128;
iparm[IPARM_REFINEMENT] = PastixRefineGMRES;
iparm[IPARM_REUSE_LU] = firstIter ? 0 : 1;
iparm[IPARM_REUSE_LU] = forceRedo ? 2 : 1;
iparm[IPARM_GPU_MEMORY_PERCENTAGE] = 95;
iparm[IPARM_GPU_MEMORY_BLOCK_SIZE] = 64 * 1024;
char usage_call = MULTI_SOLVE
if( usage == usage_call ){
dparm[DPARM_EPSILON_REFINEMENT] = 1e-7;
iparm[IPARM_ITERMAX] = 50;
iparm[IPARM_GMRES_IM] = 50;
} else{
dparm[DPARM_EPSILON_REFINEMENT] = 1e-12;
dparm[DPARM_EPSILON_MAGN_CTRL] = 0.;
iparm[IPARM_ITERMAX] = 70;
iparm[IPARM_GMRES_IM] = 70;
}
// Initialize sparse matrix
spm = malloc( sizeof( spmatrix_t ) );
spmInit(spm);
spm->flttype = globDoublePrecision ? SpmDouble : SpmFloat;
if(spm->values != aupastix && spm->values != NULL ) free(spm->values);
spm->values = aupastix;
spm->fmttype = SpmCSC;
spm->nexp = spm->gNexp = spm->gN = spm->n = *neq;
spm->mtxtype = SpmGeneral;
if( *inputformat == 3 ){
spm->nnzexp = spm->gnnzexp = spm->gnnz = spm->nnz = nzsTotal + *neq;
} else{
spm->nnzexp = spm->gnnzexp = spm->gnnz = spm->nnz = nzsTotal * 2 + *neq;
}
spm->colptr = (spm_int_t*) icolpastix;
spm->rowptr = (spm_int_t*) irowpastix;
// initialize pastix
pastixInit( &pastix_data, MPI_COMM_WORLD, iparm, dparm );
printf("\n");
spmPrintInfo( spm, stdout );
printf("\n");
// perform reordering, analysis and symbolic factorization if it's more than 1 equation
if(spm->n > 1){
pastix_task_analyze( pastix_data, spm );
}
}
void pastix_csc_conversion(double *ad, double *au, double *adb, double *aub,
double *sigma,ITG *icol, ITG *irow,
ITG *neq, ITG *nzs, ITG *symmetryflag, ITG *inputformat,
ITG *jq, ITG *nzs3){
ITG i,j;
char merged=0;
// jq for the merged matrix
ITG* jqTotal = NULL;
if(*neq != neqPrev || *inputformat != inputformatPrev)
forceRedo = 1;
redo = forceRedo ? 1 : 0;
if(!redo){
nzsTotal = 0;
NNEW(icolTotal, ITG, *neq);
ITG base = jq[0];
// Compute the number of entries in the merged matrix
#pragma omp parallel for reduction(+:nzsTotal)
for(i=0;i<*neq;i++){
ITG kCur = jq[i] - base;
ITG kPrev = jqPrev[i] - basePrev;
ITG curColTotal = 0;
while(kCur < jq[i+1] - base && kPrev < jqPrev[i+1] - basePrev) {
if(irowPrev[kPrev] == irow[kCur]){
kCur++;
kPrev++;
}
else{
if(irowPrev[kPrev] < irow[kCur])
kPrev++;
else // irowPrev[kPrev] > irow[k]
kCur++;
}
curColTotal++;
}
while(kCur < jq[i+1] - base){
kCur++;
curColTotal++;
}
while(kPrev < jqPrev[i+1] - basePrev){
kPrev++;
curColTotal++;
}
icolTotal[i] = curColTotal;
nzsTotal += curColTotal;
}
// compute jq for the merged matrix
NNEW(jqTotal, ITG, (*neq+1));
jqTotal[0] = base;
for(i = 0; i < *neq; i++){
jqTotal[i+1] = jqTotal[i] + icolTotal[i];
}
// If the number of entries in the merged matrix is the same as in the last iteration, we can reuse
if(nzsTotal == nzsPrev){
printf("Reusing csc.!\n");
}
else{
redo = 1;
printf("Not reusing csc, merging patterns!\n");
}
// allocate space for the sparse matrix
if(*symmetryflag && *inputformat != 3)
NNEW(auPtr,double,2 * nzsTotal);
else
NNEW(auPtr,double,nzsTotal);
NNEW(adPtr,double,neqPrev);
NNEW(irowTotal, ITG, nzsTotal);
if(*symmetryflag && *inputformat != 3){
j=2*nzsTotal;
}
else{
j=nzsTotal;
}
memset(auPtr, 0, j * sizeof(double));
memset(adPtr, 0, *neq * sizeof(double));
// merge the old and the new sparsity pattern
#pragma omp parallel for shared(auPtr)
for(i=0;i<*neq;i++){
ITG kCur = jq[i] - base;
ITG kPrev = jqPrev[i] - base;
ITG kTotal = jqTotal[i] - base;
adPtr[i] = ad[i] - (*sigma == 0 ? 0.0 : (*sigma)*adb[i]);
while(kCur < jq[i+1] - base && kPrev < jqPrev[i+1] - base) {
if(irowPrev[kPrev] == irow[kCur]){
auPtr[kTotal] = au[kCur] - (*sigma == 0 ? 0.0 : (*sigma)*aub[kCur]);
if(*symmetryflag && *inputformat != 3 )
auPtr[kTotal + nzsTotal] = au[kCur + *nzs3] - (*sigma == 0 ? 0.0 : (*sigma)*aub[kCur + *nzs3]);
irowTotal[kTotal] = irow[kCur];
kCur++;
kPrev++;
}
else{
if(irowPrev[kPrev] < irow[kCur]){
// auPtr[kTotal] = 0.0;
// if(*symmetryflag)
// auPtr[kTotal + nzsTotal] = 0.0;
irowTotal[kTotal] = irowPrev[kPrev];
kPrev++;
}
else // irowPrev[kPrev] > irow[k]
{
auPtr[kTotal] = au[kCur] - (*sigma == 0 ? 0.0 : (*sigma)*aub[kCur]);
if(*symmetryflag && *inputformat != 3)
auPtr[kTotal + nzsTotal] = au[kCur + *nzs3] - (*sigma == 0 ? 0.0 : (*sigma)*aub[kCur + *nzs3]);
irowTotal[kTotal] = irow[kCur];
kCur++;
}
}
kTotal++;
}
while(kCur < jq[i+1] - base){
auPtr[kTotal] = au[kCur] - (*sigma == 0 ? 0.0 : (*sigma)*aub[kCur]);
if(*symmetryflag && *inputformat != 3)
auPtr[kTotal + nzsTotal] = au[kCur + *nzs3] - (*sigma == 0 ? 0.0 : (*sigma)*aub[kCur + *nzs3]);
irowTotal[kTotal] = irow[kCur];
kCur++;
kTotal++;
}
while(kPrev < jqPrev[i+1] - base){
// auPtr[kTotal] = 0.0;
// if(*symmetryflag)
// auPtr[kTotal + nzsTotal] = 0.0;
irowTotal[kTotal] = irowPrev[kPrev];
kPrev++;
kTotal++;
}
}
SFREE(irowPrev);
SFREE(icolPrev);
SFREE(jqPrev);
irowPrev = NULL;
icolPrev = NULL;
jqPrev = NULL;
// update pointers to the merged matrix
icol = icolTotal;
icolPrev = icolTotal;
irow = irowTotal;
irowPrev = irowTotal;
jqPrev = jqTotal;
nzsPrev = nzsTotal;
basePrev = base;
au = auPtr;
ad = adPtr;
merged = 1;
}
else
{
// This is executed in either the first iteration, or when the number of equations changed
printf("Not reusing csc.\n");
if(icolPrev != NULL){
SFREE(icolPrev);
icolPrev = NULL;
}
if(irowPrev != NULL){
SFREE(irowPrev);
irowPrev = NULL;
}
if(jqPrev != NULL){
SFREE(jqPrev);
jqPrev = NULL;
}
NNEW(icolPrev,ITG,*neq);
NNEW(irowPrev,ITG,*nzs);
NNEW(jqPrev,ITG,*neq+1);
memcpy(icolPrev, icol, sizeof(ITG) * *neq);
memcpy(irowPrev, irow, sizeof(ITG) * *nzs);
memcpy(jqPrev, jq, sizeof(ITG) * (*neq+1));
nzsTotal = *nzs;
nzsPrev = *nzs;
neqPrev = *neq;
jqTotal = jqPrev;
inputformatPrev = *inputformat;
}
// Convert Matrix Format
if(*inputformat==1 || *symmetryflag==0){
/* lower triangular matrix is stored column by column in
au, followed by the upper triangular matrix row by row;
the diagonal terms are stored in ad */
// allocate space for the matrix and the utility arrays
if(redo){
// We allocate 10% more space for the values than required so that we have to perform the expensive cudaMallocHost only once, even when the size of the matrix increases slightly
if((nzsTotal * 2 + *neq) > pastix_nnzBound){
// perform the call with PaStiX because pinned memory allocation via CUDA is performed if gpu is activated
if( !firstIter && aupastix == spm->values ) spm->values = NULL;
pastixAllocMemory((void**)&aupastix, sizeof(double) * 1.1 * (nzsTotal * 2 + *neq), gpu);
pastix_nnzBound = 1.1 * (nzsTotal * 2 + *neq);
}
if(irowpastix != NULL ){
SFREE(irowpastix);
if( irowpastix == spm->rowptr ) spm->rowptr = NULL;
}
NNEW(irowpastix,ITG,nzsTotal*2+*neq);
if(icolpastix != NULL ){
SFREE(icolpastix);
if( icolpastix == spm->colptr ) spm->colptr = NULL;
}
NNEW(icolpastix,ITG,*neq+1);
if(irowacc != NULL){
SFREE(irowacc);
irowacc = NULL;
}
NNEW(irowacc,ITG,*neq);
if(irowPrediction != NULL){
SFREE(irowPrediction);
irowPrediction = NULL;
}
NNEW(irowPrediction,ITG,nzsTotal);
}
// Compute utility pointers for parallelization
// irowPrediction stores the offset to the first entry in it's column of each entry
// irowacc stores the number of elements in each row
if(redo){
for(i=0;i<nzsTotal;i++){
irowPrediction[i] = irowacc[irow[i]-1]++;
}
icolpastix[0] = 1;
for(i=0;i<*neq;i++){
icolpastix[i+1] = icolpastix[i] + icol[i] + irowacc[i] + 1;
}
}
// copy lower triangular values to the right position in the CSC
#pragma omp parallel for private(j) shared(aupastix)
for(i=0;i<*neq;i++){
ITG k_pastix = icolpastix[i] + irowacc[i];
ITG k = jqTotal[i] - 1;
aupastix[k_pastix-1] = ad[i] - (merged != 0 ? 0.0 : (*sigma == 0.0 ? 0.0 : (*sigma)*adb[i]));
memcpy(aupastix + k_pastix, au + k, sizeof(double) * icol[i]);
if(*sigma != 0.0 && !merged ){
for(j=0;j<icol[i];j++){
aupastix[k_pastix+j] -= (*sigma)*aub[k+j];
}
}
}
// copy the upper triangular values to the right position in the CSC
#pragma omp parallel for private(j) shared(aupastix)
for(i=0;i<*neq;i++){
ITG k = jqTotal[i] - 1;
for(j=0;j<icol[i];j++){
aupastix[irowPrediction[k] + icolpastix[irow[k]-1] - 1] = au[k+(*symmetryflag == 0 ? 0 : (*nzs == *nzs3 ? nzsTotal : *nzs3))] - (merged != 0 ? 0 : (*sigma == 0.0 ? 0.0 : (*sigma *aub[k+(*symmetryflag == 0 ? 0 : (*nzs == *nzs3 ? nzsTotal : *nzs3))])));
k++;
}
}
// do the same for the rowptr (does not change when reusing)
if(redo){
#pragma omp parallel for
for(i=0;i<*neq;i++){
ITG k_pastix = icolpastix[i] + irowacc[i];
ITG k = jqTotal[i] - 1;
irowpastix[k_pastix-1] = i+1;
memcpy(irowpastix + k_pastix, irow + k, sizeof(ITG) * icol[i]);
}
#pragma omp parallel for private(j) shared(irowpastix)
for(i=0;i<*neq;i++){
ITG k = jqTotal[i] - 1;
for(j=0;j<icol[i];j++){
irowpastix[irowPrediction[k] + icolpastix[irow[k]-1] - 1] = i+1;
k++;
}
}
}
}
else if(*inputformat==3){
ITG countnew = 0;
ITG *row_newEntries = NULL;
ITG *col_newEntries = NULL;
// search for missing entries for a structural symmetric matrix
if(redo){
row_newEntries = malloc( sizeof(ITG) * nzsTotal );
col_newEntries = malloc( sizeof(ITG) * nzsTotal );
memset( row_newEntries, 0, sizeof(ITG) * nzsTotal );
memset( col_newEntries, 0, sizeof(ITG) * nzsTotal );
char found = 0;
ITG z = 0;
ITG temp = 0;
// loop through the columns
#pragma omp parallel for private(j,z) firstprivate(temp,found)
for(i=0;i<*neq;i++){
// loop through the entries in this column
for(j=jqTotal[i]-1;j<jqTotal[i+1]-1;j++){
temp = irow[j];
// loop through the symmetric column counter part to check for symmetry
for(z=jqTotal[temp-1]-1;z<jqTotal[temp]-1;z++){
if( irow[z]-1 == i ){
found=1;
break;
}
}
// if no entry was found add a dummy to the array and increase the counter for missing entries
#pragma omp critical
if( found == 0 ){
row_newEntries[countnew] = i + 1;
col_newEntries[countnew] = temp;
countnew++;
}
found = 0;
}
}
printf("added %d entries to the matrix\n",countnew);
nzsTotal += countnew;
// allocate memory for the PaStiX arrays and free the old ones if necessary
if((nzsTotal + *neq) > pastix_nnzBound){
if( !firstIter && aupastix == spm->values ) spm->values = NULL;
pastixAllocMemory((void**)&aupastix, sizeof(double) * 1.1 * (nzsTotal + *neq), gpu);
pastix_nnzBound = 1.1 * (nzsTotal + *neq);
}
memset( aupastix, 0, sizeof(double) * pastix_nnzBound );
if(irowpastix != NULL ){
SFREE(irowpastix);
if(irowpastix == spm->rowptr) spm->rowptr = NULL;
}
NNEW(irowpastix,ITG,nzsTotal+*neq);
if(icolpastix != NULL ){
SFREE(icolpastix);
if(icolpastix == spm->colptr) spm->colptr = NULL;
}
NNEW(icolpastix,ITG,*neq+1);
memcpy(icolpastix, jqTotal, sizeof(ITG) * (*neq+1));
if(offsetPrev != NULL ){
SFREE(offsetPrev);
}
NNEW(offsetPrev,ITG,*neq+1);
memset(offsetPrev,0,*neq+1);
}
else{
nzsTotal += offsetPrev[*neq];
memset( aupastix, 0, sizeof(double) * pastix_nnzBound );
}
//#pragma omp parallel for private(j) firstprivate(offsetPrev)
for(i=0;i<*neq;i++){
ITG entriesPerColumn = jqTotal[i+1] - jqTotal[i];
ITG offsetSource = jqTotal[i] - jqTotal[0];
if(redo){
// copy irow column per column and add the additional diagonal entry
memcpy(irowpastix + i + offsetSource + offsetPrev[i], irow + offsetSource,
sizeof(ITG) * entriesPerColumn);
irowpastix[i+jqTotal[i+1]-jqTotal[0]+offsetPrev[i]] = i+1;
}
// copy au column per column
memcpy(aupastix + i + offsetSource + offsetPrev[i], au + offsetSource,
sizeof(double) * entriesPerColumn);
// subtract the buckling values
if(*sigma != 0 && merged == 0){
for(j=0;j<entriesPerColumn;j++){
aupastix[i + offsetSource + j + offsetPrev[i]] -= (*sigma)*aub[offsetSource + j];
}
}
// add the diagonal entries to aupastix
aupastix[i + (jqTotal[i+1] - jqTotal[0]) + offsetPrev[i]] = ad[i] - (merged != 0 ? 0.0 : (*sigma == 0 ? 0.0 : (*sigma)*adb[i]));
// add the found entries for making the matrix structural symmetric and increase the resulting offset in arrays
if(redo){
offsetPrev[i+1] = offsetPrev[i];
for( j=0;j<countnew;j++ ){
if( col_newEntries[j]-1 == i ){
irowpastix[i + (jqTotal[i+1] - jqTotal[0]) + 1 + offsetPrev[i+1]] = row_newEntries[j];
offsetPrev[i+1]++;
}
}
// add the diagonal and additional entries to the column pointer
icolpastix[i+1] += i+1+offsetPrev[i+1];
}
}
if(redo) icolpastix[*neq] = nzsTotal + *neq + 1;
// free arrays for added symmetrized entries
if(row_newEntries){
SFREE(row_newEntries);
row_newEntries = NULL;
}
if(col_newEntries){
SFREE(col_newEntries);
col_newEntries = NULL;
}
}
// Free the merged array in STI format
if(auPtr){
SFREE(auPtr);
auPtr = NULL;
}
if(adPtr){
SFREE(adPtr);
adPtr = NULL;
}
}
// PaStiX invocation when the factorization function is called individually
void pastix_factor_main_generic(double *ad, double *au, double *adb, double *aub,
double *sigma,ITG *icol, ITG *irow,
ITG *neq, ITG *nzs, ITG *symmetryflag, ITG *inputformat,
ITG *jq, ITG *nzs3){
pastix_set_globals(mode);
// Set GPU flag from environment
const char* pastix_gpu = getenv("PASTIX_GPU");
if(pastix_gpu)
gpu = ( mode == AS ) ? 0 : (*pastix_gpu == '1') ? 1 : 0;
// Perform individual invocations always in double precision. If previous iterations were in single precision, do not reuse.
forceRedo=1;
globDoublePrecision = 1;
// invoke PaStiX
pastix_csc_conversion(ad, au, adb, aub, sigma, icol, irow, neq, nzs, symmetryflag, inputformat, jq, nzs3);
pastix_cleanup(neq,symmetryflag);
pastix_init(ad, au, adb, aub, sigma, icol, irow, neq, nzs, symmetryflag, inputformat, jq, nzs3);
gpu = 0;
iparm[IPARM_GPU_NBR]=0;
pastix_data->iparm[IPARM_GPU_NBR]=0;
pastix_factor(ad, au, adb, aub, sigma, icol, irow, neq, nzs, symmetryflag, inputformat, jq, nzs3);
}
// invokes the factorization routine of PaStiX
void pastix_factor(double *ad, double *au, double *adb, double *aub,
double *sigma,ITG *icol, ITG *irow,
ITG *neq, ITG *nzs, ITG *symmetryflag, ITG *inputformat,
ITG *jq, ITG *nzs3){
if(spm->n == 1)
return;
pastix_task_numfact( pastix_data, spm );
}
// invokes the solve and iterative refinement routines of PaStiX
ITG pastix_solve_generic(double *x, ITG *neq,ITG *symmetryflag,ITG *nrhs){
ITG i;
double* b;
float* buffer;
ITG rc=0;
// dont call pastix with only one equation, might lead to segfault
if(spm->n == 1)
{
x[0] = x[0] / aupastix[0];
return 0;
}
// check whether the RHS consists of only Zeroes and return in that case
char allZero = 1;
for(i = 0; i < *neq; i++){
if(x[i] != 0){
allZero = 0;
break;
}
}
if(allZero){
printf("RHS only consists of 0.0\n");
return 0;
}
//Copy the b so that we can modify x without losing b
NNEW(b,double,*nrhs**neq);
memcpy(b, x, sizeof(double) * (*nrhs) * (*neq));
// If we are in mixed precision mode, cast double x to float x and call solve. Afterwards upcast the solution.
if(!globDoublePrecision){
NNEW(buffer,float,*nrhs**neq);
#pragma omp parallel for
for(i = 0; i < (*nrhs) * (*neq); i++){
buffer[i] = (float) x[i];
}
rc = pastix_task_solve( pastix_data, *nrhs, buffer, spm->n );
#pragma omp parallel for
for(i = 0; i < (*nrhs) * (*neq); i++){
x[i] = (double) buffer[i];
}
SFREE(buffer);
buffer = NULL;
}
else{
rc = pastix_task_solve( pastix_data, *nrhs, x, spm->n );
}
// check for NaN in the solution
if( x[0] != x[0] ){
printf("\nSolution contains NaN!\n\n");
if( noScale ){
return -1;
}
else{
return -2;
}
}
// invoke iterative refinement in double precision
//dparm[DPARM_EPSILON_MAGN_CTRL] = 1e-4;
//iparm[IPARM_ITERMAX] = 3;
char usage_call = SINGLE_SOLVE
if( usage == usage_call || rc != 0 ){
// invoke iterative refinement in double precision
rc = pastix_task_refine( pastix_data, spm->n, *nrhs, (void*)b, spm->n, (void*)x, spm->n );
iparm[IPARM_GPU_NBR] = 0;
dparm[DPARM_EPSILON_MAGN_CTRL] = 1e-14;
iparm[IPARM_ITERMAX] = 50;
rc = pastix_task_refine( pastix_data, spm->n, *nrhs, (void*)b, spm->n, (void*)x, spm->n );
iparm[IPARM_GPU_NBR] = (int) gpu;
iparm[IPARM_ITERMAX] = 70;
} else{
rc = pastix_task_refine( pastix_data, spm->n, *nrhs, (void*)b, spm->n, (void*)x, spm->n );
}
// dparm[DPARM_EPSILON_MAGN_CTRL] = 1e-14;
// iparm[IPARM_ITERMAX] = 70;
// FILE *f=fopen("spm.out","a");
// fprintf(f,"\n\nMatrix\n");
// spmConvert(SpmCSR, spm);
// spmPrint( spm, f);
// spmConvert(SpmCSC, spm);
//
// fprintf(f,"Solution vector b:\n");
// for(i=0;i<*neq;i++){
// fprintf(f,"b[%d] = %.17f\n",i,b[i]);
// }
//
// fprintf(f,"Solution vector x:\n");
// for(i=0;i<*neq;i++){
// fprintf(f,"x[%d] = %.17f\n",i,x[i]);
// }
//
// fclose(f);
SFREE(b);
b = NULL;
modePrev = mode;
if( !rc ) firstIter = 0;
if( rc == -1 && globDoublePrecision && !noScale ){
rc = -2;
}
return rc;
}
ITG pastix_solve(double *x, ITG *neq,ITG *symmetryflag,ITG *nrhs){
mode = BASIC;
usage = MULTI_SOLVE;
if( modePrev != mode ) pastix_set_globals(mode);
ITG rc = pastix_solve_generic(x,neq,symmetryflag,nrhs);
return rc;
}
ITG pastix_solve_as(double *x, ITG *neq,ITG *symmetryflag,ITG *nrhs){
mode = AS;
usage = MULTI_SOLVE;
if( modePrev != mode ) pastix_set_globals(mode);
ITG rc = pastix_solve_generic(x,neq,symmetryflag,nrhs);
return rc;
}
// Invokes pastixFinalize and spmExit which frees everything but the dense LU array and parsec pointer
void pastix_cleanup(ITG *neq,ITG *symmetryflag){
if( redo && !firstIter ){
if(spm->values == aupastix) spm->values = NULL;
if(spm->values == spm->valuesGPU) spm->valuesGPU = NULL;
if(spm->colptr == icolpastix) spm->colptr = NULL;
if(spm->rowptr == irowpastix) spm->rowptr = NULL;
spmExit( spm );
if(spm != NULL){
free( spm );
spm = NULL;
}
pastixFinalize( &pastix_data );
}
return;
}
void pastix_cleanup_as(ITG *neq,ITG *symmetryflag){
pastix_cleanup(neq,symmetryflag);
return;
}
// main method for executing PaStiX
void pastix_main_generic(double *ad, double *au, double *adb, double *aub,
double *sigma,double *b, ITG *icol, ITG *irow,
ITG *neq, ITG *nzs,ITG *symmetryflag,ITG *inputformat,
ITG *jq, ITG *nzs3,ITG *nrhs){
if(*neq==0){
return;
}
else if(*neq==1){
noScale=1;
}
pastix_set_globals( mode );
const char* pastix_gpu = getenv("PASTIX_GPU");
if(pastix_gpu)
gpu = (*pastix_gpu == '1') ? 1 : 0;
usage = SINGLE_SOLVE;
// check mixed precision environment variable
const char* pastix_mixed = getenv("PASTIX_MIXED_PRECISION");
if( pastix_mixed != NULL ){
mixed = (*pastix_mixed == '1') ? 1 : 0;
}
else{
mixed = 1;
}
if( stickToDouble == 0 && mixed == 1 ){
if( globDoublePrecision == 1 ){
forceRedo = 1;
}
globDoublePrecision = 0;
}
else{
globDoublePrecision = 1;
}
// use double precision for inputformat 3 like mortar (better perfromance and convergence)
if( pastix_mixed == NULL && *inputformat == 3 ){
globDoublePrecision = 1;
forceRedo = 0;
stickToDouble = 1;
}
// backup b in case mixed precision solve corrupts the original array
double* b_backup = NULL;
NNEW(b_backup, double, *nrhs * *neq);
memcpy(b_backup, b, sizeof(double) * (*nrhs)*(*neq));
// benchmarking structs
struct timespec start, end;
struct timespec stepCscConversionStart, stepCscConversionEnd;
struct timespec stepInitStart, stepInitEnd;
struct timespec stepFactorizeStart, stepFactorizeEnd;
struct timespec stepSolveStart, stepSolveEnd;
struct timespec stepCleanUpStart, stepCleanUpEnd;
double pastixTime, stepCscConversion, stepInit, stepFactorize, stepSolve, stepCleanUp, totalCCXTime, CCXwithoutPastix;
clock_gettime(CLOCK_MONOTONIC, &start);
clock_gettime(CLOCK_MONOTONIC, &stepCscConversionStart);
// invoke csc conversion
pastix_csc_conversion(ad,au,adb,aub,sigma,icol,irow,
neq,nzs,symmetryflag,inputformat,jq,nzs3);
clock_gettime(CLOCK_MONOTONIC, &stepCscConversionEnd);
clock_gettime(CLOCK_MONOTONIC, &stepCleanUpStart);
// invoke cleanup
pastix_cleanup(neq,symmetryflag);
clock_gettime(CLOCK_MONOTONIC, &stepCleanUpEnd);
clock_gettime(CLOCK_MONOTONIC, &stepInitStart);
// scale the matrix with diagonals to 1
if( *inputformat !=3 && !noScale ){
ITG i=0;
#pragma omp parallel for
for(i=0;i<*neq;i++){
b[i] /= ad[i];
}
double normb=0;
#pragma omp parallel for reduction(+:normb)
for(i=0;i<*neq;i++){
normb += pow(b[i],2);
}
normb = sqrt(normb);
if( normb < 1e-9 ){
printf("||b|| getting too small with scaling, boost it statically\n");
double scal = 1e-6/normb;
//memcpy(b, b_backup, sizeof(double) * (*nrhs)*(*neq));
#pragma omp parallel for
for(i=0;i<*neq;i++){
b[i] *= scal;
}
#pragma omp parallel for
for(i=0;i<icolpastix[*neq]-1;i++){
aupastix[i] *= scal/ad[irowpastix[i]-1];
}
}
else{
#pragma omp parallel for
for(i=0;i<icolpastix[*neq]-1;i++){
aupastix[i] /= ad[irowpastix[i]-1];
}
}
}
//invoke init
pastix_init(ad,au,adb,aub,sigma,icol,irow,
neq,nzs,symmetryflag,inputformat,jq,nzs3);
clock_gettime(CLOCK_MONOTONIC, &stepInitEnd);
clock_gettime(CLOCK_MONOTONIC, &stepFactorizeStart);
// invoke factor
pastix_factor(ad,au,adb,aub,sigma,icol,irow,
neq,nzs,symmetryflag,inputformat,jq,nzs3);
clock_gettime(CLOCK_MONOTONIC, &stepFactorizeEnd);
clock_gettime(CLOCK_MONOTONIC, &stepSolveStart);
// if solve does not converge
ITG rc = pastix_solve_generic(b,neq,symmetryflag,nrhs);
if( rc == -1){
// Give up, if we tried it with double precision, use backup b otherwise
if(globDoublePrecision == 1){
printf("PaStiX could not converge to a valid result\n");
exit(5);
}
else{
memcpy(b, b_backup, sizeof(double) * (*nrhs)*(*neq));
printf("falling back to double precision\n");
globDoublePrecision = 1;
forceRedo = 1;
stickToDouble = 1;
mixedFailed++;
// call pastix_main recursively, but now in double precision
pastix_main_generic(ad, au, adb, aub, sigma, b, icol, irow, neq, nzs, symmetryflag, inputformat, jq, nzs3, nrhs);
}
// make sure that we switch to double and do not reuse in the next iteration
dparm[DPARM_EPSILON_REFINEMENT] = 1e-12;
dparm[DPARM_EPSILON_MAGN_CTRL] = .0;
iparm[IPARM_ITERMAX] = 70;
iparm[IPARM_GMRES_IM] = 70;
// if we do not converge with mixed precision for the third time, permanently switch to double precision
if(mixedFailed <= 2){
stickToDouble = 0;
forceRedo = 1;
}
return;
}
else if( rc == -2){
memcpy(b, b_backup, sizeof(double) * (*nrhs)*(*neq));
printf("turning diagonal scaling off\n");
forceRedo = 1;
noScale = 1;
// call pastix_main recursively, but now in double precision
pastix_main_generic(ad, au, adb, aub, sigma, b, icol, irow, neq, nzs, symmetryflag, inputformat, jq, nzs3, nrhs);
}
else{
forceRedo = 0;
}
clock_gettime(CLOCK_MONOTONIC, &stepSolveEnd);
clock_gettime(CLOCK_MONOTONIC, &end);
// compute benchmark times
pastixTime = (end.tv_sec - start.tv_sec) * 1e9;
pastixTime = (pastixTime + (end.tv_nsec - start.tv_nsec)) * 1e-9;
totalPastixTime += pastixTime;
clock_gettime(CLOCK_MONOTONIC, &totalCalculixTimeEnd);
totalCCXTime = (totalCalculixTimeEnd.tv_sec - totalCalculixTimeStart.tv_sec) * 1e9;
totalCCXTime = (totalCCXTime + (totalCalculixTimeEnd.tv_nsec - totalCalculixTimeStart.tv_nsec)) * 1e-9;
CCXwithoutPastix = totalCCXTime - totalPastixTime;
stepCscConversion = (stepCscConversionEnd.tv_sec - stepCscConversionStart.tv_sec) * 1e9;
stepCscConversion = (stepCscConversion + (stepCscConversionEnd.tv_nsec - stepCscConversionStart.tv_nsec)) * 1e-9;
stepInit = (stepInitEnd.tv_sec - stepInitStart.tv_sec) * 1e9;
stepInit = (stepInit + (stepInitEnd.tv_nsec - stepInitStart.tv_nsec)) * 1e-9;
stepFactorize = (stepFactorizeEnd.tv_sec - stepFactorizeStart.tv_sec) * 1e9;
stepFactorize = (stepFactorize + (stepFactorizeEnd.tv_nsec - stepFactorizeStart.tv_nsec)) * 1e-9;
stepSolve = (stepSolveEnd.tv_sec - stepSolveStart.tv_sec) * 1e9;
stepSolve = (stepSolve + (stepSolveEnd.tv_nsec - stepSolveStart.tv_nsec)) * 1e-9;
stepCleanUp = (stepCleanUpEnd.tv_sec - stepCleanUpStart.tv_sec) * 1e9;
stepCleanUp = (stepCleanUp + (stepCleanUpEnd.tv_nsec - stepCleanUpStart.tv_nsec)) * 1e-9;
// upate iteration timer
totalIterations++;
if(!redo)
totalReused++;
// benchmark output
printf("________________________________________\n\n");
printf("CSC Conversion Time: %lf\n", stepCscConversion);
printf("Init Time: %lf\n", stepInit);
printf("Factorize Time: %lf\n", stepFactorize);
printf("Solve Time: %lf\n", stepSolve);
printf("Clean up Time: %lf\n", stepCleanUp);
printf("---------------------------------\n");
printf("Sum: %lf\n", pastixTime);
printf("\n");
printf("Total PaStiX Time: %lf\n", totalPastixTime);
printf("CCX without PaStiX Time: %lf\n", CCXwithoutPastix);
printf("Share of PaStiX Time: %lf\n", totalPastixTime / totalCCXTime );
printf("Total Time: %lf\n", totalCCXTime);
printf("Reusability: %d : %d \n", totalReused, totalIterations);
printf("________________________________________\n\n");
SFREE(b_backup);
b_backup = NULL;
return;
}
void pastix_factor_main(double *ad, double *au, double *adb, double *aub,
double *sigma,ITG *icol, ITG *irow,
ITG *neq, ITG *nzs, ITG *symmetryflag, ITG *inputformat,
ITG *jq, ITG *nzs3){
mode = BASIC;
usage = MULTI_SOLVE;
pastix_factor_main_generic(ad, au, adb, aub, sigma, icol, irow, neq,
nzs, symmetryflag, inputformat, jq, nzs3);
return;
}
void pastix_factor_main_as(double *ad, double *au, double *adb, double *aub,
double *sigma,ITG *icol, ITG *irow,
ITG *neq, ITG *nzs, ITG *symmetryflag, ITG *inputformat,
ITG *jq, ITG *nzs3){
mode = AS;
usage = MULTI_SOLVE;
pastix_factor_main_generic(ad, au, adb, aub, sigma, icol, irow, neq,
nzs, symmetryflag, inputformat, jq, nzs3);
return;
}
void pastix_main(double *ad, double *au, double *adb, double *aub,
double *sigma,double *b, ITG *icol, ITG *irow,
ITG *neq, ITG *nzs,ITG *symmetryflag,ITG *inputformat,
ITG *jq, ITG *nzs3,ITG *nrhs){
mode = BASIC;
pastix_main_generic(ad, au, adb, aub, sigma, b, icol, irow, neq,
nzs, symmetryflag, inputformat, jq, nzs3, nrhs);
return;
}
void pastix_main_as(double *ad, double *au, double *adb, double *aub,
double *sigma,double *b, ITG *icol, ITG *irow,
ITG *neq, ITG *nzs,ITG *symmetryflag,ITG *inputformat,
ITG *jq, ITG *nzs3,ITG *nrhs){
mode = AS;
pastix_main_generic(ad, au, adb, aub, sigma, b, icol, irow, neq,
nzs, symmetryflag, inputformat, jq, nzs3, nrhs);
return;
}
void pastix_set_globals(char mode){
if( modePrev != mode ){
pastix_data_object *temp,*temp2;
switch(mode){
case BASIC:
temp = &pastix_mode_basic;
temp2 = &pastix_mode_as;
break;
case AS:
temp = &pastix_mode_as;
temp2 = &pastix_mode_basic;
break;
}
// saving old data set
temp2->totalIterations = totalIterations;
temp2->totalReused = totalReused;
// Current sparse matrix in STI format
temp2->icolTotal = icolTotal;
temp2->irowTotal = irowTotal;
// Matrix data from previous iteration
temp2->neqPrev = neqPrev;
temp2->nzsPrev = nzsPrev;
temp2->icolPrev = icolPrev;
temp2->irowPrev = irowPrev;
temp2->jqPrev = jqPrev;
temp2->inputformatPrev = inputformatPrev;
temp2->basePrev = basePrev;
temp2->offsetPrev = offsetPrev;
// Current sparse matrix in CSC
temp2->aupastix = aupastix;
temp2->icolpastix = icolpastix;
temp2->irowpastix = irowpastix;
// Global variable that indicates whether we are currently reusing or not
temp2->redo = redo;
// PaStiX configuration
temp2->iparm = iparm;
temp2->dparm = dparm;
temp2->pastix_data = pastix_data;
temp2->spm = spm;
// GPU active or not
temp2->gpu = gpu;
// Store how many nzs the merged Matrix has
temp2->nzsTotal = nzsTotal;
// Size of allocated space for sparse matrix
temp2->pastix_nnzBound = pastix_nnzBound;
// Number of iterations that failed with mixed precision
temp2->mixedFailed = mixedFailed;
// indicates whether this is the first invocation of PaStiX or not
temp2->firstIter = firstIter;
// When this flag is activated, PaStiX will not reuse in the next iteration
temp2->forceRedo = forceRedo;
// Use double or mixed precision
temp2->mixed = mixed;
temp2->globDoublePrecision = globDoublePrecision;
// This is set to one, when to many iterations with mixed precision did not converge
temp2->stickToDouble = stickToDouble;
// Pointers for faster matrix transpose
temp2->irowacc = irowacc;
temp2->irowPrediction = irowPrediction;
// Number of threads
// nthread_mkl=0;
// setting new data set
totalIterations = temp->totalIterations;
totalReused = temp->totalReused;
// Current sparse matrix in STI format
icolTotal = temp->icolTotal;
irowTotal = temp->irowTotal;
// Matrix data from previous iteration
neqPrev = temp->neqPrev;
nzsPrev = temp->nzsPrev;
icolPrev = temp->icolPrev;
irowPrev = temp->irowPrev;
jqPrev = temp->jqPrev;
inputformatPrev = temp->inputformatPrev;
basePrev = temp->basePrev;
offsetPrev = temp->offsetPrev;
// Current sparse matrix in CSC
aupastix = temp->aupastix;
icolpastix = temp->icolpastix;
irowpastix = temp->irowpastix;
// Global variable that indicates whether we are currently reusing or not
redo = temp->redo;
// PaStiX configuration
iparm = temp->iparm;
dparm = temp->dparm;
pastix_data = temp->pastix_data;
spm = temp->spm;
// GPU active or not
gpu = temp->gpu;
// Store how many nzs the merged Matrix has
nzsTotal = temp->nzsTotal;
// Size of allocated space for sparse matrix
pastix_nnzBound = temp->pastix_nnzBound;
// Number of iterations that failed with mixed precision
mixedFailed = temp->mixedFailed;
// indicates whether this is the first invocation of PaStiX or not
firstIter = temp->firstIter;
// When this flag is activated, PaStiX will not reuse in the next iteration
forceRedo = temp->forceRedo;
// Use double or mixed precision
mixed = temp->mixed;
globDoublePrecision = temp->globDoublePrecision;
// This is set to one, when to many iterations with mixed precision did not converge
stickToDouble = temp->stickToDouble;
// Pointers for faster matrix transpose
irowacc = temp->irowacc;
irowPrediction = temp->irowPrediction;
// Number of threads
// nthread_mkl=0;
if( firstIter ){
forceRedo=1;
}
}
modePrev = mode;
return;
}
#endif
|
BaseFunc.h |
bool
IsPolarized(double &Polarization)
{
if (Polarization==-1.){return false;}
else {return true;}
}
void
PolarizationTerm(uint ThetaLength,
double * ThetaPtr,
double * CosTerm,
double * SinTerm,
bool & Polarized)
{
if (Polarized==true)
{
for (uint t = 0; t < ThetaLength; t++)
{
CosTerm[t] = cos(Polarization + ThetaPtr[t]) ;
SinTerm[t] = sin(Polarization + ThetaPtr[t]) ;
}
}
else
{
const double term = 1./sqrt(2);
for (uint t = 0; t < ThetaLength; t++)
{
CosTerm[t] = term ;
SinTerm[t] = term ;
}
}
}
void
Unstructured(uint ThetaLength,
uint PhiLength,
complex128 *array0,
double *array1,
complex128 scalar,
complex128 *output)
{
#pragma omp parallel for default(none) shared(output, scalar, array0, array1, p, PhiLength)
for (uint p=0; p < PhiLength; p++ )
{
*output = scalar * array0[p] * array1[p];
output++;
}
}
void
Structured(uint ThetaLength,
uint PhiLength,
complex128 *array0,
double *array1,
complex128 scalar,
complex128 *output)
{
for (uint p=0; p < PhiLength; p++ )
{
for (uint t=0; t < ThetaLength; t++ )
{
*output = scalar * array0[p] * array1[t];
output++;
}
}
}
// -
|
GB_unop__identity_fp64_int32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fp64_int32)
// op(A') function: GB (_unop_tran__identity_fp64_int32)
// C type: double
// A type: int32_t
// cast: double cij = (double) aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
double z = (double) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = (double) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fp64_int32)
(
double *Cx, // Cx and Ax may be aliased
const int32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
double z = (double) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int32_t aij = Ax [p] ;
double z = (double) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fp64_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__acos_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__acos_fc32_fc32)
// op(A') function: GB (_unop_tran__acos_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = cacosf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = cacosf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = cacosf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ACOS || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__acos_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = cacosf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = cacosf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__acos_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
norm2Many.c | /*
The MIT License (MIT)
Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
extern "C" void FUNC(norm2Many)(const dlong & Nblocks, const dlong & N,
const dlong & Nfields,
const dlong & offset,
const dfloat * __restrict__ cpu_a,
dfloat * __restrict__ normA){
dfloat wa2 = 0;
#ifdef __NEKRS__OMP__
#pragma omp parallel for collapse(2) reduction(+:wa2)
#endif
for(int fld=0;fld<Nfields;fld++) {
for(int i=0;i<N;++i){
const dlong id = i + fld*offset;
const dfloat ai = cpu_a[id];
wa2 += ai*ai;
}
}
normA[0] = wa2;
}
|
debug_test_system.h | // ==========================================================================
// SeqAn - The Library for Sequence Analysis
// ==========================================================================
// Copyright (c) 2006-2013, Knut Reinert, FU Berlin
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of Knut Reinert or the FU Berlin nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
//
// ==========================================================================
// Author: Manuel Holtgrewe <manuel.holtgrewe@fu-berlin.de>
// ==========================================================================
// The SeqAn testing infrastructure. Based on ideas from the OpenMS
// "ClassTest.h".
// ==========================================================================
// TODO(holtgrew): This could use some cleanup.
// SEQAN_NO_GENERATED_FORWARDS
#ifndef SEQAN_CORE_INCLUDE_SEQAN_BASIC_DEBUG_TEST_SYSTEM_H_
#define SEQAN_CORE_INCLUDE_SEQAN_BASIC_DEBUG_TEST_SYSTEM_H_
#include <iostream> // stdout, stderr
#include <iomanip>
#include <cstring> // strrpos
#include <cstdlib> // exit()
#include <cstdio>
#include <cstdarg> // va_start, va_list, va_end
#include <set>
#include <vector>
#include <string>
#ifdef PLATFORM_WINDOWS
#include <Windows.h> // DeleteFile()
#else // #ifdef PLATFORM_WINDOWS
#include <unistd.h> // unlink()
#include <sys/stat.h> // mkdir()
#include <dirent.h> // DIR
#if SEQAN_HAS_EXECINFO
#include <execinfo.h> // backtrace(), backtrace_symbols()
#endif // #if SEQAN_HAS_EXECINFO
#include <cxxabi.h> // __cxa_demangle()
#include <signal.h>
#endif // #ifdef PLATFORM_WINDOWS
/**
.Macro.SEQAN_FAIL
..cat:Assertions
..summary:Force abortion of program, regardless of debugging settings.
..signature:SEQAN_FAIL(msg[, args])
..param.msg:A format string.
..param.args:An optional list of arguments.
..remarks:Use this if something really unexpected happens inside your functions and there is no way to report this through the API. A good example would be logic errors, e.g. invalid values.
..example.text:In the following example, the $SEQAN_FAIL$ is there if a possible value is added to $MyEnum$ but the function $foo$ is not updated accordingly.
..example.code:
enum MyEnum {
VALUE_ONE,
VALUE_TWO
};
bool foo(MyEnum x) {
switch (x) {
case VALUE_ONE:
// do something
return true;
case VALUE_TWO:
// do something
return true;
}
SEQAN_FAIL("Logic error. Should never reach here. x == %d.", x);
return false;
}
..include:seqan/basic.h
..see:Macro.SEQAN_CHECK
*/
#define SEQAN_FAIL(...) \
do { \
::seqan::ClassTest::forceFail(__FILE__, __LINE__, \
__VA_ARGS__); \
::seqan::ClassTest::fail(); \
} while (false)
/**
.Macro.SEQAN_CHECK
..cat:Assertions
..summary:Force abortion of program if a condition is not met, regardless of debugging settings.
..signature:SEQAN_CHECK(condition, msg[, args])
..param.msg:A format string.
..param.args:An optional list of arguments.
..remarks:Use this if something really unexpected happens inside your functions and there is no way to report this through the API. A good example would be logic errors, e.g. invalid values.
..example.text:In the following example, the $SEQAN_CHECK$ stops program execution if a value is added to $MyEnum$ but the function $foo$ is not updated accordingly.
..example.code:
enum MyEnum {
VALUE_ONE,
VALUE_TWO
};
bool foo(MyEnum x) {
SEQAN_CHECK((x == VALUE_ONE || x == VALUE_TWO), "Invalid value for x == %d.", x);
switch (x) {
case VALUE_ONE:
// do something
return true;
case VALUE_TWO:
// do something
return true;
}
return false; // Should never reach here, checked above with SEQAN_CHECK.
}
..include:seqan/basic.h
..see:Macro.SEQAN_FAIL
*/
#define SEQAN_CHECK(_arg1, ...) \
do { \
if (!::seqan::ClassTest::testTrue(__FILE__, __LINE__, \
(_arg1), # _arg1, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// SeqAn's has three global debug/testing levels: testing, debug and
// release. Depending on the level, the SEQAN_ASSERT_* and
// SEQAN_CHECKPOINT macros will be enabled.
//
// Note that this is independent of the <cassert> assertions and
// NDEBUG being defined.
//
// The levels are enabled by the values of the macros
// SEQAN_ENABLE_TESTING and SEQAN_ENABLE_DEBUG. By setting a macro to
// 0, one disables the level and by setting the macro to 1, one
// enables a level. Enabling testing also enables debug, overriding a
// value of 0 for SEQAN_ENABLE_DEBUG.
//
// If the level is release (both the macros for debug and testing are
// 0), the assertions will be disabled. If the level is debug then
// the assertions will be enabled. If the level is testing then the
// checkpoint macros will also be enabled.
//
// The default is to enable debugging but disable testing.
//
// You can print the current level using the function seqan::printDebugLevel().
/**
.Macro.SEQAN_ENABLE_TESTING
..cat:Testing & Debugging
..summary:Indicates whether testing is enabled.
..signature:SEQAN_ENABLE_DEBUG
..remarks:When enabled (set to 1), testing is enabled. This means the macros for the tests (@Macro.SEQAN_BEGIN_TESTSUITE@, @Macro.SEQAN_DEFINE_TEST@, @Macro.SEQAN_CALL_TEST@, and @Macro.SEQAN_END_TESTSUITE@) will be enabled. This makes failing assertions raise exceptions instead of call $abort()$ and enables checkpoints.
..remarks:By default, this is set to 0.
..remarks:If @Macro.SEQAN_ENABLE_CHECKPOINTS@ is not defined before including $<seqan/basic.h>$, then @Macro.SEQAN_ENABLE_CHECKPOINTS@ will be set to the value of @Macro.SEQAN_ENABLE_TESTING@ (after the default initialization to 0).
..remarks:If you want to change this value, you have to define this value before including any SeqAn header.
..remarks:If set to 1 then @Macro.SEQAN_ENABLE_TESTING@ is force-set to 0 as well.
..see:Macro.SEQAN_ENABLE_DEBUG
..see:Macro.SEQAN_ENABLE_CHECKPOINTS
*/
// Set default for SEQAN_ENABLE_TESTING.
#ifndef SEQAN_ENABLE_TESTING
#define SEQAN_ENABLE_TESTING 0
#endif // #ifndef SEQAN_ENABLE_TESTING
/**
.Macro.SEQAN_ENABLE_DEBUG
..cat:Testing & Debugging
..summary:Indicates whether debugging is enabled.
..signature:SEQAN_ENABLE_DEBUG
..remarks:When enabled (set to 1), debugging is enabled. This means the assertion macros are expanded to actual code and not to nothing.
..remarks:By default, this is set to 0 if $NDEBUG$ is defined and to 1 if $NDEBUG$ is not defined.
..remarks:If you want to change this value, you have to define this value before including any SeqAn header.
..remarks:Force-enabled if @Macro.SEQAN_ENABLE_TESTING@ is set to 1.
..see:Macro.SEQAN_ENABLE_TESTING
..see:Macro.SEQAN_ENABLE_CHECKPOINTS
*/
// Set default for SEQAN_ENABLE_DEBUG.
#ifndef SEQAN_ENABLE_DEBUG
#ifdef NDEBUG
#define SEQAN_ENABLE_DEBUG 0
#else // #ifdef NDEBUG
#define SEQAN_ENABLE_DEBUG 1
#endif // #ifdef NDEBUG
#endif // #ifndef SEQAN_ENABLE_DEBUG
// Force-enable debugging if testing is enabled.
#if SEQAN_ENABLE_TESTING
#undef SEQAN_ENABLE_DEBUG
#define SEQAN_ENABLE_DEBUG 1
#endif // #if SEQAN_ENABLE_TESTING
/**
.Macro.SEQAN_ENABLE_CHECKPOINTS
..cat:Testing & Debugging
..summary:Indicates whether checkpoints are enabled.
..signature:SEQAN_ENABLE_CHECKPOINTS
..remarks:When enabled (set to 1), checkpoints are enabled. This means the $SEQAN_CHECKPOINT$ macros are expanded to actual code and not to nothing.
..remarks:By default, this is set to $SEQAN_ENABLE_TESTING$.
..remarks:Checkpoints can come at large increases of running time in your tests. Disable them when your test run too slow.
..remarks:If you want to change this value, you have to define this value before including any SeqAn header.
..example.text:Disable checkpoints in a program.
..example.code:
// Disable SeqAn checkpoints in this program.
#define SEQAN_ENABLE_CHECKPOINTS 0
// Any SeqAn headers or headers including SeqAn headers have to come AFTER the
// definition of SEQAN_ENABLE_CHECKPOINT above.
#include <seqan/base.h>
int main(int argc, char const ** argv)
{
// Any call to SeqAn functions will NOT log any checkpoints.
return 0;
}
..see:Macro.SEQAN_ENABLE_DEBUG
..see:Macro.SEQAN_ENABLE_TESTING
*/
// Allow disabling checkpoints independent of testing.
#ifndef SEQAN_ENABLE_CHECKPOINTS
#define SEQAN_ENABLE_CHECKPOINTS 0 // SEQAN_ENABLE_TESTING
#endif // #ifndef SEQAN_ENABLE_CHECKPOINTS
/**
.Macro.SEQAN_TYPEDEF_FOR_DEBUG
..cat:Testing & Debugging
..summary: When using typedefs that are only used in debug mode then they have to be marked with macro.
..signature:SEQAN_TYPEDEF_FOR_DEBUG
..example.code:
typedef int TInt SEQAN_TYPEDEF_FOR_DEBUG;
*/
#if !SEQAN_ENABLE_DEBUG
# if defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)))
# define SEQAN_TYPEDEF_FOR_DEBUG __attribute__((unused))
# else
# define SEQAN_TYPEDEF_FOR_DEBUG
# endif
#else
# define SEQAN_TYPEDEF_FOR_DEBUG
#endif
// TODO(holtgrew): This one is for profiling and in tests.
#if defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)))
# define SEQAN_UNUSED_TYPEDEF __attribute__((unused))
#else
# define SEQAN_UNUSED_TYPEDEF
#endif
namespace seqan {
// SEQAN_CXX_FLAGS_ contains the compiler flags, SEQAN_CXX_FLAGS is a string
// literal with this value.
#if !defined(SEQAN_CXX_FLAGS_)
#define SEQAN_CXX_FLAGS_ SEQAN_CXX_FLAGS_NOT_SET
#endif // !defined(SEQAN_CXX_FLAGS__)
#define SEQAN_MKSTRING_(str) # str
#define SEQAN_MKSTRING(str) SEQAN_MKSTRING_(str)
#define SEQAN_CXX_FLAGS SEQAN_MKSTRING(SEQAN_CXX_FLAGS_)
//#undef SEQAN_MKSTRING
//#undef SEQAN_MKSTRING_
/**
.Function.printDebugLevel
..cat:Testing & Debugging
..summary:Print the current SeqAn debug level and the compiler flags to the given stream.
..signature:printDebugLevel(stream)
..param.stream:The stream to print to, e.g. $std::cout$.
..include:seqan/basic.h
*/
template <typename TStream>
void printDebugLevel(TStream & stream)
{
stream << "SEQAN_ENABLE_DEBUG == " << SEQAN_ENABLE_DEBUG << std::endl;
stream << "SEQAN_ENABLE_TESTING == " << SEQAN_ENABLE_TESTING << std::endl;
stream << "SEQAN_ENABLE_CHECKPOINTS == " << SEQAN_ENABLE_CHECKPOINTS << std::endl;
stream << "SEQAN_CXX_FLAGS == \"" << SEQAN_CXX_FLAGS << "\"" << std::endl;
}
#if defined(PLATFORM_WINDOWS) || !SEQAN_HAS_EXECINFO
template <typename TSize>
void printStackTrace(TSize /*maxFrames*/)
{}
#else
// print a demangled stack backtrace of the caller function
template <typename TSize>
void printStackTrace(TSize maxFrames)
{
void * addrlist[256];
char temp[4096];
char addr[20];
char offset[20];
size_t size;
int status;
char * symname;
char * demangled;
std::cerr << std::endl << "stack trace:" << std::endl;
int addrlist_len = backtrace(addrlist, maxFrames);
char ** symbollist = backtrace_symbols(addrlist, addrlist_len);
for (int i = 1; i < addrlist_len; ++i)
{
offset[0] = 0;
addr[0] = 0;
demangled = NULL;
// LINUX FORMAT:
// ./sam2svg [0x473b8c]
// /lib/libc.so.6 [0x7f40d2526f60]
// ./sam2svg(_Z2f3v+0x10) [0x47200c]
// ./sam2svg(_Z2f2v+0xd) [0x472021]
// ./sam2svg(main+0x1367) [0x4735fc]
// /lib/libc.so.6(__libc_start_main+0xe6) [0x7f40d25131a6]
//
if (3 == sscanf(symbollist[i], "%*[^(](%4095[^+]+%[^)]) %s", temp, offset, addr))
{
symname = temp;
if (NULL != (demangled = abi::__cxa_demangle(temp, NULL, &size, &status)))
{
symname = demangled;
}
}
// MAC OS X FORMAT:
// 1 sam2svg 0x0000000100003a39 _ZN5seqanL28signalHandlerPrintStackTraceEi + 21
// 2 libSystem.B.dylib 0x00007fff87a6d67a _sigtramp + 26
// 3 libSystem.B.dylib 0x00007fff87a76df7 tiny_free_do_recirc_to_depot + 980
// 4 sam2svg 0x00000001000021b9 _Z2f2v + 9
// 5 sam2svg 0x00000001000034b1 main + 4546
// 6 sam2svg 0x0000000100002190 start + 52
else if (3 == sscanf(symbollist[i], "%*d %*s %s %s %*s %s", addr, temp, offset))
{
symname = temp;
if (NULL != (demangled = abi::__cxa_demangle(temp, NULL, &size, &status)))
{
symname = demangled;
}
}
// LINUX FORMAT:
// ./sam2svg [0x473b8c]
// /lib/libc.so.6 [0x7f40d2526f60]
else if (2 == sscanf(symbollist[i], "%s %s", temp, addr))
{
symname = temp;
}
// DEFAULT:
else
{
symname = symbollist[i];
}
std::cerr << std::setw(3) << i - 1;
std::cerr << std::setw(20) << addr;
std::cerr << " " << symname;
if (offset[0] != 0)
std::cerr << " + " << offset;
std::cerr << std::endl;
free(demangled);
}
std::cerr << std::endl;
// Only the array must be freed according to man page, not the contents.
free(symbollist);
}
static void signalHandlerPrintStackTrace(int signum)
{
std::cerr << std::endl;
printStackTrace(20);
signal(signum, SIG_DFL);
kill(getpid(), signum);
}
inline int _deploySignalHandlers()
{
signal(SIGSEGV, signalHandlerPrintStackTrace); // segfault
signal(SIGFPE, signalHandlerPrintStackTrace); // divide by zero
// ...
return 0;
}
#if SEQAN_ENABLE_DEBUG
// automatically deploy signal handlers that output the stack trace on a trap (in debug mode)
template <typename T>
struct SignalHandlersDummy_
{
static const int i;
};
template <typename T>
const int SignalHandlersDummy_<T>::i = _deploySignalHandlers();
namespace {
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-variable"
#endif // ifdef __clang__
volatile int signalHandlersDummy_ = SignalHandlersDummy_<void>::i;
#ifdef __clang__
#pragma clang diagnostic pop
#endif // ifdef __clang__
}
#endif // #if SEQAN_ENABLE_DEBUG
#endif // defined(PLATFORM_WINDOWS) || !SEQAN_HAS_EXECINFO
// Namespace for the testing infrastructure.
//
// This namespace contains the variables and functions that are used
// in the macros below to perform the tests.
namespace ClassTest {
// Raised when an assertion fails in test mode.
struct AssertionFailedException {};
// Container for static global data for the tests.
struct StaticData
{
// Number of tests that were run.
static int & testCount()
{
static int result = 0;
return result;
}
// Number of errors that occurred.
static int & errorCount()
{
static int result = 0;
return result;
}
// Number of skipped tests.
static int & skippedCount()
{
static int result = 0;
return result;
}
// Flag whether there was an error in this test.
static bool & thisTestOk()
{
static bool result = 0;
return result;
}
// Flag whether this test was skipped.
static bool & thisTestSkipped()
{
static bool result = 0;
return result;
}
// Name of the current test.
static const char * & currentTestName()
{
const char * defaultValue = "";
static const char * result = const_cast<char *>(defaultValue);
return result;
}
// Base path to the binary. Extrapolated from __FILE__.
static char * & basePath()
{
const char * defaultValue = ".";
static char * result = const_cast<char *>(defaultValue);
return result;
}
// Base path to the directory containing "core" and "extras."
// Extrapolated from __FILE__.
static char * & pathToRoot()
{
const char * defaultValue = ".";
static char * result = const_cast<char *>(defaultValue);
return result;
}
// Total number of checkpoints in header file.
static int & totalCheckPointCount()
{
static int result = 0;
return result;
}
// Total number of checkpoints found in binary files.
static int & foundCheckPointCount()
{
static int result = 0;
return result;
}
// Names of temporary files as returned by tempFileName. This
// global state is used to remove any existing such files
// after completing the testsuite.
static::std::vector<std::string> & tempFileNames()
{
static::std::vector<std::string> filenames;
return filenames;
}
};
// Open a temporary file, unlink it, return posix handle. Note: This has not been tested yet.
// TODO(holtgrew): Not used yet and Windows code does not work.
/*
inline
int openTempFile() {
#ifdef PLATFORM_WINDOWS
char * fileName = _tempnam(NULL, "SQN");
if (!fileName) {
::std::cerr << "Cannot create a unique temporary filename" << ::std::endl;
exit(1);
}
int result = open(fileName, _O_RDWR | OPEN_TEMPORARY);
free(fileName);
return result;
#else // A Unix...
char filenameBuffer[100];
strcpy(filenameBuffer, "/tmp/SEQANXXXXXXXXXX");
int result = mkstemp(filenameBuffer);
unlink(filenameBuffer);
return result;
#endif // ifdef PLATFORM_WINDOWS
}
*/
// Return the path to a temporary file, in a static buffer in this
// function. This is not thread safe!
inline
const char * tempFileName()
{
//IOREV _duplicate_ overlaps with some stuff in system/file_sync.h, should be moved to io-module
static char fileNameBuffer[1000];
#ifdef PLATFORM_WINDOWS_VS
static char filePathBuffer[1000];
// Gets the temp path env string (no guarantee it's a valid path).
DWORD dwRetVal = 0;
dwRetVal = GetTempPath(1000, // length of the buffer
filePathBuffer); // buffer for path
if (dwRetVal > 1000 || (dwRetVal == 0))
{
std::cerr << "GetTempPath failed" << std::endl;
exit(1);
}
UINT uRetVal = 0;
uRetVal = GetTempFileName(filePathBuffer, // directory for tmp files
TEXT("SEQAN."), // temp file name prefix
0, // create unique name
fileNameBuffer); // buffer for name
if (uRetVal == 0)
{
std::cerr << "GetTempFileName failed" << std::endl;
exit(1);
}
DeleteFile(fileNameBuffer);
CreateDirectoryA(fileNameBuffer, NULL);
StaticData::tempFileNames().push_back(fileNameBuffer);
strcat(fileNameBuffer, "\\test_file");
return fileNameBuffer;
#else // ifdef PLATFORM_WINDOWS_VS
strcpy(fileNameBuffer, "/tmp/SEQAN.XXXXXXXXXXXXXXXXXXXX");
#ifdef PLATFORM_WINDOWS_MINGW
// There is no mkstemp in MinGW but it does not complain about tmpnam.
tmpnam(fileNameBuffer);
#else // ifdef PLATFORM_WINDOWS_MINGW
int _tmp = mkstemp(fileNameBuffer);
(void) _tmp;
unlink(fileNameBuffer);
mkdir(fileNameBuffer, 0777);
StaticData::tempFileNames().push_back(fileNameBuffer);
strcat(fileNameBuffer, "/test_file");
#endif // #ifdef PLATFORM_WINDOWS_MINGW
return fileNameBuffer;
#endif // ifdef PLATFORM_WINDOWS_VS
}
// Initialize the testing infrastructure.
//
// Used through SEQAN_BEGIN_TESTSUITE(test_name)
inline
void beginTestSuite(const char * testSuiteName, const char * argv0)
{
// First things first: Print test suite name and current debug level.
std::cout << "TEST SUITE " << testSuiteName << std::endl;
printDebugLevel(std::cout);
(void)testSuiteName;
StaticData::testCount() = 0;
StaticData::skippedCount() = 0;
StaticData::errorCount() = 0;
StaticData::totalCheckPointCount() = 0;
StaticData::foundCheckPointCount() = 0;
// Get path to argv0.
const char * end = argv0;
const char * ptr = std::min(strchr(argv0, '\\'), strchr(argv0, '/')); // On Windows, we can have both \ and /.
for (; ptr != 0; ptr = std::min(strchr(ptr + 1, '\\'), strchr(ptr + 1, '/')))
end = ptr;
int rpos = end - argv0;
if (rpos <= 0)
{
StaticData::basePath() = new char[2];
strcpy(StaticData::basePath(), ".");
}
else
{
int len = rpos;
StaticData::basePath() = new char[len];
strncpy(StaticData::basePath(), argv0, len);
}
// Get path to projects.
const char * file = __FILE__;
int pos = -1;
for (size_t i = 0; i < strlen(file) - strlen("core"); ++i)
{
if (strncmp(file + i, "core", strlen("core")) == 0)
{
pos = i;
}
}
for (; pos > 0 && *(file + pos - 1) != '/' && *(file + pos - 1) != '\\'; --pos)
continue;
if (pos == -1)
{
std::cerr << "Could not extrapolate path to repository from __FILE__ == \""
<< __FILE__ << "\"" << std::endl;
exit(1);
}
StaticData::pathToRoot() = new char[pos];
strncpy(StaticData::pathToRoot(), file, pos);
StaticData::pathToRoot()[pos - 1] = '\0';
#ifdef PLATFORM_WINDOWS_VS
// Set CRT reporting such that everything goes to stderr and there are
// no popups causing timeouts.
_set_error_mode(_OUT_TO_STDERR);
_CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE);
_CrtSetReportFile(_CRT_WARN, _CRTDBG_FILE_STDERR);
_CrtSetReportMode(_CRT_ERROR, _CRTDBG_MODE_FILE);
_CrtSetReportFile(_CRT_ERROR, _CRTDBG_FILE_STDERR);
_CrtSetReportMode(_CRT_ASSERT, _CRTDBG_MODE_FILE);
_CrtSetReportFile(_CRT_ASSERT, _CRTDBG_FILE_STDERR);
#endif // PLATFORM_WINDOWS_VS
}
// Run test suite finalization.
//
// Used through SEQAN_END_TESTSUITE
//
// Prints a bottom banner with the error count and returns the
// program's return code.
inline
int endTestSuite()
{
delete[] StaticData::basePath();
delete[] StaticData::pathToRoot();
std::cout << "**************************************" << std::endl;
std::cout << " Total Check Points : " << StaticData::totalCheckPointCount() << std::endl;
std::cout << " Found Check Points : " << StaticData::foundCheckPointCount() << std::endl;
std::cout << " Lost Check Points : " << StaticData::totalCheckPointCount() - StaticData::foundCheckPointCount() << std::endl;
std::cout << "--------------------------------------" << std::endl;
std::cout << " Total Tests: " << StaticData::testCount() << std::endl;
std::cout << " Skipped: " << StaticData::skippedCount() << std::endl;
std::cout << " Errors: " << StaticData::errorCount() << std::endl;
std::cout << "**************************************" << std::endl;
// TODO(holtgrew): Re-enable that all check points have to be found for the test to return 1;
/*
if (StaticData::totalCheckPointCount() != StaticData::foundCheckPointCount())
return 1;
*/
// Delete all temporary files that still exist.
for (unsigned i = 0; i < StaticData::tempFileNames().size(); ++i)
{
#ifdef PLATFORM_WINDOWS
HANDLE hFind;
WIN32_FIND_DATA data;
std::string temp = StaticData::tempFileNames()[i].c_str() + std::string("\\*");
hFind = FindFirstFile(temp.c_str(), &data);
if (hFind != INVALID_HANDLE_VALUE)
{
do
{
std::string tempp = StaticData::tempFileNames()[i].c_str() + std::string("\\") + data.cFileName;
DeleteFile(tempp.c_str());
}
while (FindNextFile(hFind, &data));
FindClose(hFind);
}
RemoveDirectory(StaticData::tempFileNames()[i].c_str());
#else // #ifdef PLATFORM_WINDOWS
DIR * dpdf;
struct dirent * epdf;
dpdf = opendir(StaticData::tempFileNames()[i].c_str());
if (dpdf != NULL)
{
while ((epdf = readdir(dpdf)) != NULL)
{
std::string temp = StaticData::tempFileNames()[i].c_str() + std::string("/") + std::string(epdf->d_name);
unlink(temp.c_str());
}
}
rmdir(StaticData::tempFileNames()[i].c_str());
#endif // #ifdef PLATFORM_WINDOWS
}
if (StaticData::errorCount() != 0)
return 1;
return 0;
}
// Run test initialization.
inline
void beginTest(const char * testName)
{
StaticData::currentTestName() = testName;
StaticData::thisTestOk() = true;
StaticData::thisTestSkipped() = false;
StaticData::testCount() += 1;
}
// Run test finalization.
inline
void endTest()
{
if (StaticData::thisTestSkipped())
{
std::cout << StaticData::currentTestName() << " SKIPPED" << std::endl;
}
else if (StaticData::thisTestOk())
{
std::cout << StaticData::currentTestName() << " OK" << std::endl;
}
else
{
std::cerr << StaticData::currentTestName() << " FAILED" << std::endl;
}
}
// Marks the current test as "skipped".
inline
void skipCurrentTest()
{
StaticData::thisTestSkipped() = true;
StaticData::skippedCount() += 1;
}
// Called by the macro SEQAN_ASSERT_FAIL.
inline void forceFail(const char * file, int line,
const char * comment, ...)
{
StaticData::errorCount() += 1;
std::cerr << file << ":" << line << " FAILED! ";
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
}
// Similar to forceFail above, but accepting a va_list parameter.
inline void vforceFail(const char * file, int line,
const char * comment, va_list argp)
{
StaticData::errorCount() += 1;
std::cerr << file << ":" << line << " FAILED! ";
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
}
// Same as forceFail above, but with comment set to 0.
inline void forceFail(const char * file, int line)
{
forceFail(file, line, 0);
}
// Called by the macro SEQAN_ASSERT_EQ.
//
// Tests that the given two value are equal. Returns true iff the
// two values are equal.
template <typename T1, typename T2>
bool testEqual(char const * file, int line,
T1 const & value1, char const * expression1,
T2 const & value2, char const * expression2,
char const * comment, ...)
{
if (!(value1 == value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " == " << expression2 << " was: " << value1
<< " != " << value2;
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Similar to testEqual above, but accepts a va_list instead of variadic
// parameters.
template <typename T1, typename T2>
bool vtestEqual(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, va_list argp)
{
if (!(value1 == value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " == " << expression2 << " was: " << value1
<< " != " << value2;
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Same as testEqual above, but with comment set to 0.
template <typename T1, typename T2>
bool testEqual(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2)
{
return testEqual(file, line, value1, expression1, value2, expression2, 0);
}
// Called by the macro SEQAN_ASSERT_IN_DELTA.
//
// Tests that the given two value are equal. Returns true iff the
// two values are equal.
template <typename T1, typename T2, typename T3>
bool testInDelta(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const T3 & value3, const char * expression3,
const char * comment, ...)
{
if (!(value1 >= value2 - value3 && value1 <= value2 + value3))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " in [" << expression2 << " - " << expression3
<< ", " << expression2 << " + " << expression3 << "] was: " << value1
<< " not in [" << value2 - value3 << ", " << value2 + value3 << "]";
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Similar to testInDelta above, but accepts a va_list instead of variadic
// parameters.
template <typename T1, typename T2, typename T3>
bool vtestInDelta(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const T3 & value3, const char * expression3,
const char * comment, va_list argp)
{
if (!(value1 >= value2 - value3 && value1 <= value2 + value3))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " in [" << expression2 << " - " << expression3
<< ", " << expression2 << " + " << expression3 << "] was: " << value1
<< " not in [" << value2 - value3 << ", " << value2 + value3 << "]";
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Same as testInDelta above, but with comment set to 0.
template <typename T1, typename T2, typename T3>
bool testInDelta(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const T3 & value3, const char * expression3)
{
return testInDelta(file, line, value1, expression1, value2, expression2, value3, expression3, 0);
}
// Called by the macro SEQAN_ASSERT_NEQ.
//
// Tests that the given two value are not equal. Returns true iff
// the two values are equal.
template <typename T1, typename T2>
bool testNotEqual(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, ...)
{
if (!(value1 != value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " != " << expression2 << " was: " << value1
<< " == " << value2;
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Similar to testNotEqual above, but accepts a va_list instead of variadic
// parameters.
template <typename T1, typename T2>
bool vtestNotEqual(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, va_list argp)
{
if (!(value1 != value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " != " << expression2 << " was: " << value1
<< " == " << value2;
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Same as testNotEqual above, but with comment set to 0.
template <typename T1, typename T2>
bool testNotEqual(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2)
{
return testNotEqual(file, line, value1, expression1, value2, expression2, 0);
}
// Called by the macro SEQAN_ASSERT_GEQ.
//
// Tests that the first value is greater than or equal to the
// second one. Returns true iff the test yields true.
template <typename T1, typename T2>
bool testGeq(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, ...)
{
if (!(value1 >= value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " >= " << expression2 << " was: " << value1
<< " < " << value2;
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Similar to testGeq above, but accepts a va_list instead of variadic
// parameters.
template <typename T1, typename T2>
bool vtestGeq(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, va_list argp)
{
if (!(value1 >= value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " >= " << expression2 << " was: " << value1
<< " < " << value2;
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Same as testGeq above, but with comment set to 0.
template <typename T1, typename T2>
bool testGeq(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2)
{
return testGeq(file, line, value1, expression1, value2, expression2, 0);
}
// Called by the macro SEQAN_ASSERT_GT.
//
// Tests that the first value is greater than the second one.
// Returns true iff the test yields true.
template <typename T1, typename T2>
bool testGt(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, ...)
{
if (!(value1 > value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " > " << expression2 << " was: " << value1
<< " <= " << value2;
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Similar to testGt above, but accepts a va_list instead of variadic
// parameters.
template <typename T1, typename T2>
bool vtestGt(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, va_list argp)
{
if (!(value1 > value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " > " << expression2 << " was: " << value1
<< " <= " << value2;
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Same as testGt above, but with comment set to 0.
template <typename T1, typename T2>
bool testGt(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2)
{
return testGt(file, line, value1, expression1, value2, expression2, 0);
}
// Called by the macro SEQAN_ASSERT_LEQ.
//
// Tests that the first value is less than or equal to the second
// one. Returns true iff the test yields true.
template <typename T1, typename T2>
bool testLeq(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, ...)
{
if (!(value1 <= value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " <= " << expression2 << " was: " << value1
<< " > " << value2;
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Similar to testLeq above, but accepts a va_list instead of variadic
// parameters.
template <typename T1, typename T2>
bool vtestLeq(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, va_list argp)
{
if (!(value1 <= value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " <= " << expression2 << " was: " << value1
<< " > " << value2;
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Same as testLeq above, but with comment set to 0.
template <typename T1, typename T2>
bool testLeq(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2)
{
return testLeq(file, line, value1, expression1, value2, expression2, 0);
}
// Called by the macro SEQAN_ASSERT_LT.
//
// Tests that the first value is greater than the second one.
// Returns true iff the test yields true.
template <typename T1, typename T2>
bool testLt(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, ...)
{
if (!(value1 < value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " < " << expression2 << " was: " << value1
<< " >= " << value2;
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Similar to testLt above, but accepts a va_list instead of variadic
// parameters.
template <typename T1, typename T2>
bool vtestLt(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2,
const char * comment, va_list argp)
{
if (!(value1 < value2))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression1 << " < " << expression2 << " was: " << value1
<< " >= " << value2;
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Same as testLt above, but comment is 0.
template <typename T1, typename T2>
bool testLt(const char * file, int line,
const T1 & value1, const char * expression1,
const T2 & value2, const char * expression2)
{
return testLt(file, line, value1, expression1, value2, expression2, 0);
}
// Called by the macro SEQAN_ASSERT.
//
// Test that the given argument evaluates to true.
template <typename T>
bool testTrue(const char * file, int line,
const T & value_, const char * expression_,
const char * comment, ...)
{
if (!(value_))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression_ << " should be true but was " << (value_);
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Similar to testTrue above, but accepts a va_list instead of variadic
// parameters.
template <typename T>
bool vtestTrue(const char * file, int line,
const T & value_, const char * expression_,
const char * comment, va_list argp)
{
if (!(value_))
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression_ << " should be true but was " << (value_);
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Same as testTrue above, but comment will automatically be set to 0.
template <typename T>
bool testTrue(const char * file, int line,
const T & value_, const char * expression_)
{
return testTrue(file, line, value_, expression_, 0);
}
// Called by the macro SEQAN_ASSERT.
//
// Test that the given argument evaluates to false.
template <typename T>
bool testFalse(const char * file, int line,
const T & value_, const char * expression_,
const char * comment, ...)
{
if (value_)
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression_ << " should be false but was " << (value_);
if (comment)
{
std::cerr << " (";
va_list args;
va_start(args, comment);
vfprintf(stderr, comment, args);
va_end(args);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Similar to testFalse above, but accepts a va_list instead of variadic
// parameters.
template <typename T>
bool vtestFalse(const char * file, int line,
const T & value_, const char * expression_,
const char * comment, va_list argp)
{
if (value_)
{
// Increase global error count.
StaticData::thisTestOk() = false;
StaticData::errorCount() += 1;
// Print assertion failure text, with comment if any is given.
std::cerr << file << ":" << line << " Assertion failed : "
<< expression_ << " should be false but was " << (value_);
if (comment)
{
std::cerr << " (";
vfprintf(stderr, comment, argp);
std::cerr << ")";
}
std::cerr << std::endl;
return false;
}
return true;
}
// Same as testFalse above, but comment will automatically be set to 0.
template <typename T>
bool testFalse(const char * file, int line,
const T & value_, const char * expression_)
{
return testFalse(file, line, value_, expression_, 0);
}
// Represents a check point in a file.
struct CheckPoint
{
// Path to the file.
const char * file;
// Line in the file.
unsigned int line;
// Less-than comparator for check points.
bool operator<(const CheckPoint & other) const
{
int c = strcmp(file, other.file);
if (c < 0)
return true;
if (c == 0 && line < other.line)
return true;
return false;
}
};
// Wrapper for a set of check points.
// TODO(holtgrew): Simply store the set?
struct CheckPointStore
{
static::std::set<CheckPoint> & data()
{
static::std::set<CheckPoint> result;
return result;
}
};
// Puts the given check point into the CheckPointStore's data.
inline bool
registerCheckPoint(unsigned int line, const char * file)
{
const char * file_name = strrchr(file, '/');
const char * file_name_2 = strrchr(file, '\\');
if (file_name_2 > file_name)
file_name = file_name_2;
if (!file_name)
file_name = file;
else
++file_name;
CheckPoint cp = {file_name, line};
#ifdef _OMP
#pragma omp critical
#endif // #ifdef _OMP
CheckPointStore::data().insert(cp);
return true;
}
// Test whether the given check point exists in the check point
// store.
inline void
testCheckPoint(const char * file, unsigned int line)
{
StaticData::totalCheckPointCount() += 1;
CheckPoint cp = {file, line};
if (CheckPointStore::data().find(cp) == CheckPointStore::data().end())
{
std::cerr << file << ":" << line << " -- Check point lost."
<< std::endl;
return;
}
StaticData::foundCheckPointCount() += 1;
}
// Verify the check points for the given file.
inline void
verifyCheckPoints(const char * file)
{
char const * file_name = strrchr(file, '/');
char const * file_name_2 = strrchr(file, '\\');
if (file_name_2 > file_name)
file_name = file_name_2;
if (!file_name)
file_name = file;
else
++file_name;
int len = strlen(StaticData::pathToRoot()) +
strlen("/") + strlen(file) + 1;
char * absolutePath = new char[len];
absolutePath[0] = '\0';
strcat(absolutePath, StaticData::pathToRoot());
strcat(absolutePath, "/");
strcat(absolutePath, file);
FILE * fl = ::std::fopen(absolutePath, "r");
delete[] absolutePath;
if (!fl)
{
std::cerr << file << " -- verifyCheckPoints could not find this file." << std::endl;
}
unsigned int line_number = 1;
char buf[1 << 16];
while (::std::fgets(buf, sizeof(buf), fl))
{
if (::std::strstr(buf, "SEQAN_CHECKPOINT"))
{
testCheckPoint(file_name, line_number);
}
++line_number;
}
::std::fclose(fl);
}
#if SEQAN_ENABLE_TESTING
// If in testing mode then raise an AssertionFailedException.
inline void fail()
{
StaticData::thisTestOk() = false;
printStackTrace(20);
throw AssertionFailedException();
}
#else
// If not in testing mode then quit with an abort.
inline void fail()
{
printStackTrace(20);
abort();
}
#endif // #if SEQAN_ENABLE_TESTING
} // namespace ClassTest
/**
.Macro.SEQAN_DEFINE_TEST
..summary:Expand to test definition.
..cat:Testing & Debugging
..signature:SEQAN_DEFINE_TEST(test_name)
..param.test_name:The name of the test.
..remarks:This macro expands to the definition of a $void$ function with $SEQAN_TEST_ + test_name$ as its name.
..example.code:
SEQAN_DEFINE_TEST(test_name)
{
SEQAN_ASSERT_LT(0, 3);
}
..see:Macro.SEQAN_SKIP_TEST
..see:Macro.SEQAN_CALL_TEST
..see:Macro.SEQAN_BEGIN_TESTSUITE
..see:Macro.SEQAN_END_TESTSUITE
*/
// This macro expands to function header for one test.
#define SEQAN_DEFINE_TEST(test_name) \
template <bool speed_up_dummy_to_prevent_compilation_of_unused_tests_> \
void SEQAN_TEST_ ## test_name()
/**
.Macro.SEQAN_BEGIN_TESTSUITE
..summary:Expand to a test suite beginning.
..cat:Testing & Debugging
..signature:SEQAN_BEGIN_TESTSUITE(name)
..param.name:The name of the test suite.
..remarks:This macro expands to a $main()$ function and some initialization code that sets up the test system.
..example.code:
#include <seqan/basic.h>
SEQAN_BEGIN_TESTSUITE(test_foo)
{
SEQAN_CALL_TEST(test_foo_my_test);
}
SEQAN_END_TESTSUITE
..see:Macro.SEQAN_SKIP_TEST
..see:Macro.SEQAN_DEFINE_TEST
..see:Macro.SEQAN_CALL_TEST
..see:Macro.SEQAN_END_TESTSUITE
*/
#if SEQAN_ENABLE_TESTING
// This macro expands to startup code for a test file.
#define SEQAN_BEGIN_TESTSUITE(suite_name) \
int main(int argc, char ** argv) { \
(void) argc; \
::seqan::ClassTest::beginTestSuite(# suite_name, argv[0]);
/**
.Macro.SEQAN_END_TESTSUITE
..summary:Expand to a test suite ending.
..cat:Testing & Debugging
..signature:SEQAN_END_TESTSUITE
..remarks:This macro expands to finalization code for a test suite.
..example.code:
#include <seqan/basic.h>
SEQAN_BEGIN_TESTSUITE(test_foo)
{
SEQAN_CALL_TEST(test_foo_my_test);
}
SEQAN_END_TESTSUITE
..see:Macro.SEQAN_SKIP_TEST
..see:Macro.SEQAN_DEFINE_TEST
..see:Macro.SEQAN_CALL_TEST
..see:Macro.SEQAN_BEGIN_TESTSUITE
*/
// This macro expands to shutdown code for a test file.
#define SEQAN_END_TESTSUITE \
return ::seqan::ClassTest::endTestSuite(); \
}
/**
.Macro.SEQAN_CALL_TEST
..summary:Expand to calling a test.
..cat:Testing & Debugging
..signature:SEQAN_CALL_TEST(test_name)
..param.test_name:The name of the test.
..remarks:This expects the test to be defined with @Macro.SEQAN_DEFINE_TEST@. This macro will expand to code that calls the code inside a try/catch block. Use this macro within a test suite, only.
..example.code:
// Within a test suite.
SEQAN_CALL_TEST(test_name);
..see:Macro.SEQAN_SKIP_TEST
..see:Macro.SEQAN_DEFINE_TEST
..see:Macro.SEQAN_BEGIN_TESTSUITE
..see:Macro.SEQAN_END_TESTSUITE
*/
// This macro expands to code to call a given test.
#define SEQAN_CALL_TEST(test_name) \
do { \
::seqan::ClassTest::beginTest(# test_name); \
try { \
SEQAN_TEST_ ## test_name<true>(); \
} catch (::seqan::ClassTest::AssertionFailedException e) { \
/* Swallow exception, go on with next test. */ \
(void) e; /* Get rid of unused variable warning. */ \
} \
::seqan::ClassTest::endTest(); \
} while (false)
/**
.Macro.SEQAN_SKIP_TEST
..cat:Testing & Debugging
..summary:Force the test to return without failing and mark it as skipped.
..signature:SEQAN_SKIP_TEST
..example.code:
SEQAN_DEFINE_TEST(test_skipped)
{
SEQAN_SKIP_TEST;
}
..see:Macro.SEQAN_DEFINE_TEST
..see:Macro.SEQAN_CALL_TEST
..see:Macro.SEQAN_BEGIN_TESTSUITE
..see:Macro.SEQAN_END_TESTSUITE
*/
// This macro returns from the current function and logs a "skipped"
// event for the current test.
#define SEQAN_SKIP_TEST \
do { \
::seqan::ClassTest::skipCurrentTest(); \
return; \
} while (false)
#endif // #if SEQAN_ENABLE_TESTING
// variadic macros are not supported by VS 2003 and before
#if !defined(_MSC_VER) || (_MSC_VER >= 1400)
#if SEQAN_ENABLE_DEBUG
/**
.Macro.SEQAN_ASSERT
..cat:Assertions
..summary:Test that the given expression can be coerced to $true$.
..signature:SEQAN_ASSERT(expression)
..signature:SEQAN_ASSERT_MSG(expression, message[, parameters])
..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call.
..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings.
..example.code:
SEQAN_ASSERT(0); // will fail
SEQAN_ASSERT(1); // will run through
SEQAN_ASSERT_MSG(0, "message %d", 2); // Will fail with message.
..see:Macro.SEQAN_ASSERT_NOT
..see:Macro.SEQAN_ASSERT_EQ
..see:Macro.SEQAN_ASSERT_NEQ
..see:Macro.SEQAN_ASSERT_LEQ
..see:Macro.SEQAN_ASSERT_GEQ
..see:Macro.SEQAN_ASSERT_LT
..see:Macro.SEQAN_ASSERT_GT
..see:Macro.SEQAN_ASSERT_IN_DELTA
..see:Macro.SEQAN_CHECK
..see:Macro.SEQAN_FAIL
.Macro.SEQAN_ASSERT_NOT
..cat:Assertions
..summary:Test that the given expression can be coerced to $false$.
..signature:SEQAN_ASSERT(expression)
..signature:SEQAN_ASSERT_MSG(expression, message[, parameters])
..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call.
..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings.
..example.code:
SEQAN_ASSERT_NOT(0); // will run through
SEQAN_ASSERT_NOT(1); // will fail
SEQAN_ASSERT_NOT_MSG(0, "msg %s", "test"); // will fail with message
..see:Macro.SEQAN_ASSERT
..see:Macro.SEQAN_ASSERT_EQ
..see:Macro.SEQAN_ASSERT_NEQ
..see:Macro.SEQAN_ASSERT_LEQ
..see:Macro.SEQAN_ASSERT_GEQ
..see:Macro.SEQAN_ASSERT_LT
..see:Macro.SEQAN_ASSERT_GT
..see:Macro.SEQAN_ASSERT_IN_DELTA
..see:Macro.SEQAN_CHECK
..see:Macro.SEQAN_FAIL
.Macro.SEQAN_ASSERT_EQ
..cat:Assertions
..summary:Test that two given expressions are equal, as defined by the matching call to the $operator=(,)$.
..signature:SEQAN_ASSERT_EQ(expression1, expression2)
..signature:SEQAN_ASSERT_EQ_MSG(expression1, expression2, comment[, parameters])
..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call.
..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings.
..example.code:
SEQAN_ASSERT_EQ(0, false); // will run through
SEQAN_ASSERT_EQ(1, false); // will fail
SEQAN_ASSERT_EQ(1, "foo"); // will not compile
SEQAN_ASSERT_EQ_MSG(1, false, "msg"); // will fail with message
..see:Macro.SEQAN_ASSERT
..see:Macro.SEQAN_ASSERT_NOT
..see:Macro.SEQAN_ASSERT_NEQ
..see:Macro.SEQAN_ASSERT_LEQ
..see:Macro.SEQAN_ASSERT_GEQ
..see:Macro.SEQAN_ASSERT_LT
..see:Macro.SEQAN_ASSERT_GT
..see:Macro.SEQAN_ASSERT_IN_DELTA
..see:Macro.SEQAN_CHECK
..see:Macro.SEQAN_FAIL
.Macro.SEQAN_ASSERT_NEQ
..cat:Assertions
..summary:Test that two given expressions are not equal, as defined by the matching call to the $operator!=(,)$.
..signature:SEQAN_ASSERT_NEQ(expression)
..signature:SEQAN_ASSERT_NEQ_MSG(expression, message[, parameters])
..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call.
..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings.
..example.code:
SEQAN_ASSERT_NEQ(0, false); // will fail
SEQAN_ASSERT_NEQ(1, false); // will run through
SEQAN_ASSERT_NEQ(1, "foo"); // will not compile
SEQAN_ASSERT_NEQ_MSG(1, false, "msg"); // will fail with message
..see:Macro.SEQAN_ASSERT
..see:Macro.SEQAN_ASSERT_NOT
..see:Macro.SEQAN_ASSERT_EQ
..see:Macro.SEQAN_ASSERT_LEQ
..see:Macro.SEQAN_ASSERT_GEQ
..see:Macro.SEQAN_ASSERT_LT
..see:Macro.SEQAN_ASSERT_GT
..see:Macro.SEQAN_ASSERT_IN_DELTA
..see:Macro.SEQAN_CHECK
..see:Macro.SEQAN_FAIL
.Macro.SEQAN_ASSERT_LT
..cat:Assertions
..summary:Test that the two given expressions are in the less-than relation as defined by the matching call to operator<(,).
..signature:SEQAN_ASSERT_LT(expression1, expression2)
..signature:SEQAN_ASSERT_LT(expression1, expression2, comment[, parameters])
..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call.
..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings.
..example.code:
SEQAN_ASSERT_LT(0, 1); // will run through
SEQAN_ASSERT_LT(1, 1); // will not run through
SEQAN_ASSERT_LT_MSG(1, 1, "msg"); // will fail with message
..see:Macro.SEQAN_ASSERT
..see:Macro.SEQAN_ASSERT_NOT
..see:Macro.SEQAN_ASSERT_EQ
..see:Macro.SEQAN_ASSERT_NEQ
..see:Macro.SEQAN_ASSERT_LEQ
..see:Macro.SEQAN_ASSERT_GEQ
..see:Macro.SEQAN_ASSERT_GT
..see:Macro.SEQAN_ASSERT_IN_DELTA
..see:Macro.SEQAN_CHECK
..see:Macro.SEQAN_FAIL
.Macro.SEQAN_ASSERT_LEQ
..cat:Assertions
..summary:Test that the two given expressions are in the less-than-or-equal relation as defined by the matching call to operator<=(,).
..signature:SEQAN_ASSERT_LEQ(expression1, expression2)
..signature:SEQAN_ASSERT_LEQ_MSG(expression1, expression2, comment[, parameters])
..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call.
..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings.
..example.code:
SEQAN_ASSERT_LEQ(1, 1); // will run through
SEQAN_ASSERT_LEQ(1, 2); // will not run through
SEQAN_ASSERT_LEQ_MSG(1, 2, "msg"); // will fail with message
..see:Macro.SEQAN_ASSERT
..see:Macro.SEQAN_ASSERT_NOT
..see:Macro.SEQAN_ASSERT_EQ
..see:Macro.SEQAN_ASSERT_NEQ
..see:Macro.SEQAN_ASSERT_GEQ
..see:Macro.SEQAN_ASSERT_LT
..see:Macro.SEQAN_ASSERT_GT
..see:Macro.SEQAN_ASSERT_IN_DELTA
..see:Macro.SEQAN_CHECK
..see:Macro.SEQAN_FAIL
.Macro.SEQAN_ASSERT_GT
..cat:Assertions
..summary:Test that the two given expressions are in the greather-than relation as defined by the matching call to operator>(,).
..signature:SEQAN_ASSERT_GT(expression1, expression2)
..signature:SEQAN_ASSERT_GT_MSG(expression1, expression2, comment[, parameters])
..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call.
..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings.
..example.code:
SEQAN_ASSERT_GT(2, 1); // will run through
SEQAN_ASSERT_GT(1, 1); // will not run through
SEQAN_ASSERT_GT_MSG(1, 1, "msg"); // will fail with message
..see:Macro.SEQAN_ASSERT
..see:Macro.SEQAN_ASSERT_NOT
..see:Macro.SEQAN_ASSERT_EQ
..see:Macro.SEQAN_ASSERT_NEQ
..see:Macro.SEQAN_ASSERT_LEQ
..see:Macro.SEQAN_ASSERT_GEQ
..see:Macro.SEQAN_ASSERT_LT
..see:Macro.SEQAN_ASSERT_IN_DELTA
..see:Macro.SEQAN_CHECK
..see:Macro.SEQAN_FAIL
.Macro.SEQAN_ASSERT_GEQ
..cat:Assertions
..summary:Test that the two given expressions are in the greater-than-or-equal relation as defined by the matching call to operator>=(,).
..signature:SEQAN_ASSERT_GEQ(expression1, expression2)
..signature:SEQAN_ASSERT_GEQ_MSG(expression1, expression2, comment[, parameters])
..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call.
..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings.
..example.code:
SEQAN_ASSERT_GEQ(1, 1); // will run through
SEQAN_ASSERT_GEQ(0, 1); // will not run through
SEQAN_ASSERT_GEQ_MSG(0, 1, "msg"); // will fail with message
..see:Macro.SEQAN_ASSERT
..see:Macro.SEQAN_ASSERT_NOT
..see:Macro.SEQAN_ASSERT_EQ
..see:Macro.SEQAN_ASSERT_NEQ
..see:Macro.SEQAN_ASSERT_LEQ
..see:Macro.SEQAN_ASSERT_LT
..see:Macro.SEQAN_ASSERT_GT
..see:Macro.SEQAN_ASSERT_IN_DELTA
..see:Macro.SEQAN_CHECK
..see:Macro.SEQAN_FAIL
.Macro.SEQAN_ASSERT_IN_DELTA
..cat:Assertions
..summary:Test that the given expression can be coerced to $true$.
..signature:SEQAN_ASSERT_IN_DELTA(x, y, delta)
..signature:SEQAN_ASSERT_IN_DELTA_MSG(x, y, delta, comment[, parameters])
..remarks:The main advantage of this macro is that it prints the values of its argument on failures. Note that the $operator<<$ to the type of $std::cerr$ has to be defined for the type of both expression parameters. Otherwise, simply use the equivalent @Macro.SEQAN_ASSERT@ call.
..remarks:See @Macro.SEQAN_CHECK@ and @Macro.SEQAN_FAIL@ for (conditionally) aborting your program regardless of debug settings.
..example.code:
SEQAN_ASSERT_IN_DELTA(0, 0, 0.1); // will run through
SEQAN_ASSERT_IN_DELTA(1, -2, 1); // will fail
SEQAN_ASSERT_IN_DELTA(1, "foo"); // will not compile
SEQAN_ASSERT_IN_DELTA_MSG(1, 0, 0.1, "msg"); // will fail with message
..see:Macro.SEQAN_ASSERT
..see:Macro.SEQAN_ASSERT_NOT
..see:Macro.SEQAN_ASSERT_EQ
..see:Macro.SEQAN_ASSERT_NEQ
..see:Macro.SEQAN_ASSERT_LEQ
..see:Macro.SEQAN_ASSERT_GEQ
..see:Macro.SEQAN_ASSERT_LT
..see:Macro.SEQAN_ASSERT_GT
..see:Macro.SEQAN_CHECK
..see:Macro.SEQAN_FAIL
*/
// Force a test failure.
//
// Usage: SEQAN_ASSERT_FAIL("Failed at position %d", pos);
#define SEQAN_ASSERT_FAIL(...) \
do { \
::seqan::ClassTest::forceFail(__FILE__, __LINE__, \
__VA_ARGS__); \
::seqan::ClassTest::fail(); \
} while (false)
// Equality assertion without a comment.
//
// Usage: SEQAN_ASSERT_EQ(4, 4);
#define SEQAN_ASSERT_EQ(_arg1, _arg2) \
do { \
if (!::seqan::ClassTest::testEqual(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Equality assertion with a comment.
//
// Usage: SEQAN_ASSERT_EQ(4, 4);
#define SEQAN_ASSERT_EQ_MSG(_arg1, _arg2, ...) \
do { \
if (!::seqan::ClassTest::testEqual(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// In-delta-environment assertion without a comment.
//
// Usage: SEQAN_ASSERT_IN_DELTA(4.1, 4, 0.1);
#define SEQAN_ASSERT_IN_DELTA(_arg1, _arg2, _arg3) \
do { \
if (!::seqan::ClassTest::testInDelta(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2, \
(_arg3), # _arg3)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// In-delta-environment assertion witha comment.
//
// Usage: SEQAN_ASSERT_IN_DELTA_MSG(4.1, 4, 0.1, "3.9 <= 4.1 <= 4.1");
#define SEQAN_ASSERT_IN_DELTA_MSG(_arg1, _arg2, _arg3, ...) \
do { \
if (!::seqan::ClassTest::testInDelta(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2, \
(_arg3), # _arg3, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Inequality assertion without a comment.
//
// Usage: SEQAN_ASSERT_NEQ(4, 5);
#define SEQAN_ASSERT_NEQ(_arg1, _arg2) \
do { \
if (!::seqan::ClassTest::testNotEqual(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Inequality assertion with a comment.
//
// Usage: SEQAN_ASSERT_NEQ(4, 5);
#define SEQAN_ASSERT_NEQ_MSG(_arg1, _arg2, ...) \
do { \
if (!::seqan::ClassTest::testNotEqual(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Less-than-or-equal assertion without a comment.
#define SEQAN_ASSERT_LEQ(_arg1, _arg2) \
do { \
if (!::seqan::ClassTest::testLeq(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Less-than-or-equal assertion with a comment.
#define SEQAN_ASSERT_LEQ_MSG(_arg1, _arg2, ...) \
do { \
if (!::seqan::ClassTest::testLeq(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Less-than assertion without a comment.
#define SEQAN_ASSERT_LT(_arg1, _arg2) \
do { \
if (!::seqan::ClassTest::testLt(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Less-than assertion with a comment.
#define SEQAN_ASSERT_LT_MSG(_arg1, _arg2, ...) \
do { \
if (!::seqan::ClassTest::testLt(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Greater-than-or-equal assertion without a comment.
#define SEQAN_ASSERT_GEQ(_arg1, _arg2) \
do { \
if (!::seqan::ClassTest::testGeq(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Greater-than-or-equal assertion with a comment.
#define SEQAN_ASSERT_GEQ_MSG(_arg1, _arg2, ...) \
do { \
if (!::seqan::ClassTest::testGeq(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Greater-than assertion without a comment.
#define SEQAN_ASSERT_GT(_arg1, _arg2) \
do { \
if (!::seqan::ClassTest::testGt(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Greater-than assertion with a comment.
#define SEQAN_ASSERT_GT_MSG(_arg1, _arg2, ...) \
do { \
if (!::seqan::ClassTest::testGt(__FILE__, __LINE__, \
(_arg1), # _arg1, \
(_arg2), # _arg2, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// TODO(holtgrew): Rename to SEQAN_ASSERT once that name is free.;
// Trueness assertion with a comment.
//
// Usage: SEQAN_ASSERT(false);
#define SEQAN_ASSERT(_arg1) \
do { \
if (!::seqan::ClassTest::testTrue(__FILE__, __LINE__, \
(_arg1), # _arg1)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// TODO(holtgrew): Rename to SEQAN_ASSERT once that name is free.;
// Trueness assertion with a comment.
#define SEQAN_ASSERT_MSG(_arg1, ...) \
do { \
if (!::seqan::ClassTest::testTrue(__FILE__, __LINE__, \
(_arg1), # _arg1, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Falseness assertion without a comment.
//
// Usage: SEQAN_ASSERT_NOT(false);
#define SEQAN_ASSERT_NOT(_arg1) \
do { \
if (!::seqan::ClassTest::testFalse(__FILE__, __LINE__, \
(_arg1), # _arg1)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
// Falseness assertion with a comment.
#define SEQAN_ASSERT_NOT_MSG(_arg1, ...) \
do { \
if (!::seqan::ClassTest::testFalse(__FILE__, __LINE__, \
(_arg1), # _arg1, \
__VA_ARGS__)) { \
::seqan::ClassTest::fail(); \
} \
} while (false)
#else // #if SEQAN_ENABLE_DEBUG
#define SEQAN_ASSERT_EQ(_arg1, _arg2) do {} while (false)
#define SEQAN_ASSERT_EQ_MSG(_arg1, _arg2, ...) do {} while (false)
#define SEQAN_ASSERT_NEQ(_arg1, _arg2) do {} while (false)
#define SEQAN_ASSERT_NEQ_MSG(_arg1, _arg2, ...) do {} while (false)
#define SEQAN_ASSERT_LEQ(_arg1, _arg2) do {} while (false)
#define SEQAN_ASSERT_LEQ_MSG(_arg1, _arg2, ...) do {} while (false)
#define SEQAN_ASSERT_LT(_arg1, _arg2) do {} while (false)
#define SEQAN_ASSERT_LT_MSG(_arg1, _arg2, ...) do {} while (false)
#define SEQAN_ASSERT_GEQ(_arg1, _arg2) do {} while (false)
#define SEQAN_ASSERT_GEQ_MSG(_arg1, _arg2, ...) do {} while (false)
#define SEQAN_ASSERT_GT(_arg1, _arg2) do {} while (false)
#define SEQAN_ASSERT_GT_MSG(_arg1, _arg2, ...) do {} while (false)
#define SEQAN_ASSERT(_arg1) do {} while (false)
#define SEQAN_ASSERT_MSG(_arg1, ...) do {} while (false)
#define SEQAN_ASSERT_NOT(_arg1) do {} while (false)
#define SEQAN_ASSERT_NOT_MSG(_arg1, ...) do {} while (false)
#define SEQAN_ASSERT_FAIL(...) do {} while (false)
#endif // #if SEQAN_ENABLE_DEBUG
#else // no variadic macros
#if SEQAN_ENABLE_DEBUG
inline void SEQAN_ASSERT_FAIL(const char * comment, ...)
{
va_list args;
va_start(args, comment);
::seqan::ClassTest::vforceFail("", 0, comment, args);
::seqan::ClassTest::fail();
va_end(args);
}
template <typename T1, typename T2, typename T3>
void SEQAN_ASSERT_IN_DELTA(T1 const & _arg1, T2 const & _arg2, T3 const & _arg3)
{
if (!::seqan::ClassTest::testInDelta("", 0, _arg1, "", _arg2, "", _arg3, ""))
::seqan::ClassTest::fail();
}
template <typename T1, typename T2, typename T3>
void SEQAN_ASSERT_IN_DELTA_MSG(T1 const & _arg1, T2 const & _arg2, T3 const & _arg3, const char * comment, ...)
{
va_list args;
va_start(args, comment);
if (!::seqan::ClassTest::vtestInDelta("", 0, _arg1, "", _arg2, "", _arg3, "", comment, args))
::seqan::ClassTest::fail();
va_end(args);
}
template <typename T1, typename T2>
void SEQAN_ASSERT_EQ(T1 const & _arg1, T2 const & _arg2)
{
if (!::seqan::ClassTest::testEqual("", 0, _arg1, "", _arg2, ""))
::seqan::ClassTest::fail();
}
template <typename T1, typename T2>
void SEQAN_ASSERT_EQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...)
{
va_list args;
va_start(args, comment);
if (!::seqan::ClassTest::vtestEqual("", 0, _arg1, "", _arg2, "", comment, args))
::seqan::ClassTest::fail();
va_end(args);
}
template <typename T1, typename T2>
void SEQAN_ASSERT_NEQ(T1 const & _arg1, T2 const & _arg2)
{
if (!::seqan::ClassTest::testNotEqual("", _arg1, "", _arg2, ""))
::seqan::ClassTest::fail();
}
template <typename T1, typename T2>
void SEQAN_ASSERT_NEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...)
{
va_list args;
va_start(args, comment);
if (!::seqan::ClassTest::vtestNotEqual("", _arg1, "", _arg2, "", comment, args))
::seqan::ClassTest::fail();
va_end(args);
}
template <typename T1, typename T2>
void SEQAN_ASSERT_LEQ(T1 const & _arg1, T2 const & _arg2)
{
if (!::seqan::ClassTest::testLeq("", 0, _arg1, "", _arg2, ""))
::seqan::ClassTest::fail();
}
template <typename T1, typename T2>
void SEQAN_ASSERT_LEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...)
{
va_list args;
va_start(args, comment);
if (!::seqan::ClassTest::vtestLeq("", 0, _arg1, "", _arg2, "", comment, args))
::seqan::ClassTest::fail();
va_end(args);
}
template <typename T1, typename T2>
void SEQAN_ASSERT_LT(T1 const & _arg1, T2 const & _arg2)
{
if (!::seqan::ClassTest::testLt("", 0, _arg1, "", _arg2, ""))
::seqan::ClassTest::fail();
}
template <typename T1, typename T2>
void SEQAN_ASSERT_LT_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...)
{
va_list args;
va_start(args, comment);
if (!::seqan::ClassTest::vtestLt("", 0, _arg1, "", _arg2, "", comment, args))
::seqan::ClassTest::fail();
va_end(args);
}
template <typename T1, typename T2>
void SEQAN_ASSERT_GEQ(T1 const & _arg1, T2 const & _arg2)
{
if (!::seqan::ClassTest::testGeq("", 0, _arg1, "", _arg2, ""))
::seqan::ClassTest::fail();
}
template <typename T1, typename T2>
void SEQAN_ASSERT_GEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...)
{
va_list args;
va_start(args, comment);
if (!::seqan::ClassTest::vtestGeq("", 0, _arg1, "", _arg2, "", comment, args))
::seqan::ClassTest::fail();
va_end(args);
}
template <typename T1, typename T2>
void SEQAN_ASSERT_GT(T1 const & _arg1, T2 const & _arg2)
{
if (!::seqan::ClassTest::testGt("", 0, _arg1, "", _arg2, ""))
::seqan::ClassTest::fail();
}
template <typename T1, typename T2>
void SEQAN_ASSERT_GT_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...)
{
va_list args;
va_start(args, comment);
if (!::seqan::ClassTest::vtestGt("", 0, _arg1, "", _arg2, "", comment, args))
::seqan::ClassTest::fail();
va_end(args);
}
template <typename T1>
void SEQAN_ASSERT(T1 const & _arg1)
{
if (!::seqan::ClassTest::testTrue("", 0, _arg1, ""))
::seqan::ClassTest::fail();
}
template <typename T1>
void SEQAN_ASSERT_MSG(T1 const & _arg1, const char * comment, ...)
{
va_list args;
va_start(args, comment);
if (!::seqan::ClassTest::vtestTrue("", 0, _arg1, "", comment, args))
::seqan::ClassTest::fail();
va_end(args);
}
template <typename T1>
void SEQAN_ASSERT_NOT(T1 const & _arg1)
{
if (!::seqan::ClassTest::testFalse("", 0, _arg1, ""))
::seqan::ClassTest::fail();
}
template <typename T1>
void SEQAN_ASSERT_NOT_MSG(T1 const & _arg1, const char * comment, ...)
{
va_list args;
va_start(args, comment);
if (!::seqan::ClassTest::vtestFalse("", 0, _arg1, "", comment, args))
::seqan::ClassTest::fail();
va_end(args);
}
#else // #if SEQAN_ENABLE_DEBUG
inline void SEQAN_ASSERT_FAIL(const char * comment, ...) {}
template <typename T1, typename T2, typename T3>
void SEQAN_ASSERT_IN_DELTA(T1 const & _arg1, T2 const & _arg2, T3 const & _arg3) {}
template <typename T1, typename T2, typename T3>
void SEQAN_ASSERT_IN_DELTA_MSG(T1 const & _arg1, T2 const & _arg2, T3 const & _arg3, const char * comment, ...) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_EQ(T1 const & _arg1, T2 const & _arg2) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_EQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_NEQ(T1 const & _arg1, T2 const & _arg2) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_NEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_LEQ(T1 const & _arg1, T2 const & _arg2) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_LEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_LT(T1 const & _arg1, T2 const & _arg2) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_LT_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_GEQ(T1 const & _arg1, T2 const & _arg2) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_GEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_GT(T1 const & _arg1, T2 const & _arg2) {}
template <typename T1, typename T2>
void SEQAN_ASSERT_GT_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {}
template <typename T1>
void SEQAN_ASSERT(T1 const & _arg1) {}
template <typename T1>
void SEQAN_ASSERT_MSG(T1 const & _arg1, const char * comment, ...) {}
template <typename T1>
void SEQAN_ASSERT_NOT(T1 const & _arg1) {}
template <typename T1>
void SEQAN_ASSERT_NOT_MSG(T1 const & _arg1, const char * comment, ...) {}
#endif // #if SEQAN_ENABLE_DEBUG
#endif // no variadic macros
// Returns a string (of type char*) with the path to the called binary.
//
// Use this to locate files relative to the test binary.
#define SEQAN_PROGRAM_PATH \
::seqan::ClassTest::StaticData::basePath()
// TODO(holtgrew): Subject to change wiht restructuring.
/**
.Macro.SEQAN_PATH_TO_ROOT
..cat:Testing & Debugging
..summary:Return path to the checkout root directory (i.e. containing core/extras).
..returns:$char const *$, string with the path to the parent directory of the tests directory.
..signature:SEQAN_PATH_TO_ROOT()
..remarks:The pointed to string is initialized on program startup by the code generated by @Macro.SEQAN_BEGIN_TESTSUITE@.
..example.code:
const char *p = SEQAN_PATH_TO_ROOT);
char buffer[1000];
strcpy(buffer, p);
strcat(buffer, "/tests/files/example.txt");
FILE *f = fopen(buffer, "w");
fprintf(f, "Test Data");
fclose(f);
..see:Macro.SEQAN_TEMP_FILENAME
*/
// Returns a const char * string with the path to the projects directory.
#define SEQAN_PATH_TO_ROOT() \
::seqan::ClassTest::StaticData::pathToRoot()
// Returns the POSIX int file handle to an open file.
// TODO(holtgrewe): Uncomment if openTempFile has been implemented.
// #define SEQAN_OPEN_TEMP_FILE() (::seqan::ClassTest::openTempFile())
/**
.Macro.SEQAN_TEMP_FILENAME
..cat:Testing & Debugging
..summary:Generates the name to a temporary file.
..returns:$char const *$, string with the path to a temporary file.
..signature:SEQAN_TEMP_FILENAME()
..remarks:The pointed to string is stored in a buffer and is overwritten by the next call to this macro. Copy it out if you need it.
..example.code:
const char *p = SEQAN_TEMP_FILENAME();
buffer char tempFilename[1000];
strcpy(tempFilename, p);
FILE *f = fopen(tempFilename, "w");
fprintf(f, "Test Data");
fclose(f);
..see:Macro.SEQAN_PATH_TO_ROOT
*/
// Returns a temporary filename.
#define SEQAN_TEMP_FILENAME() (::seqan::ClassTest::tempFileName())
/**
.Macro.SEQAN_VERIFY_CHECKPOINTS
..cat:Testing & Debugging
..summary:Verify check points for the given file name.
..signature:SEQAN_VERIFY_CHECKPOINTS(path)
..param.path:Path to the file to verify check points for. Relative to parent directory of tests.
..example.code:
SEQAN_VERIFY_CHECKPOINTS("core/include/seqan/basic_alphabet.h");
..see:Macro.SEQAN_CHECKPOINT
.Macro.SEQAN_CHECKPOINT
..cat:Testing & Debugging
..summary:Generate a check point.
..signature:SEQAN_CHECKPOINT
..remarks:Whever the code executes the instructions generated by this macro, the check point for this line will be set in global testing state. Use @Macro.SEQAN_VERIFY_CHECKPOINTS@ to verify whether all checkpoints have been reached in a file up to this point.
SEQAN_CHECKPOINT;
..see:Macro.SEQAN_VERIFY_CHECKPOINTS
*/
#if SEQAN_ENABLE_CHECKPOINTS
// Create a check point at the point where the macro is placed.
// TODO(holtgrew): Should be called SEQAN_CHECK_POINT to be consistent.
#define SEQAN_CHECKPOINT \
::seqan::ClassTest::registerCheckPoint(__LINE__, __FILE__);
// Call the check point verification code for the given file.
#define SEQAN_VERIFY_CHECKPOINTS(filename) \
::seqan::ClassTest::verifyCheckPoints(filename)
#else // #if SEQAN_ENABLE_CHECKPOINTS
#define SEQAN_CHECKPOINT
// If checkpoints are to be verified if testing is disabled then print
// a warning.
#define SEQAN_VERIFY_CHECKPOINTS(filename) \
do { \
fprintf(stderr, ("WARNING: Check point verification is " \
"disabled. Trying to verify %s from %s:%d.\n"), \
filename, __FILE__, __LINE__); \
} while (false)
#endif // #if SEQAN_ENABLE_CHECKPOINTS
#if !SEQAN_ENABLE_TESTING
#define SEQAN_BEGIN_TESTSUITE(suite_name) \
int main(int argc, char ** argv) { \
(void) argc; \
(void) argv; \
fprintf(stderr, "Warning: SEQAN_ENABLE_TESTING is wrong and you used the macro SEQAN_BEGIN_TESTSUITE!\n");
#define SEQAN_END_TESTSUITE \
return 0; \
}
#define SEQAN_CALL_TEST(test_name) do { SEQAN_TEST_ ## test_name(); } while (false)
#define SEQAN_SKIP_TEST do {} while (false)
#endif // #if !SEQAN_ENABLE_TESTING
} // namespace seqan
#endif // SEQAN_CORE_INCLUDE_SEQAN_BASIC_DEBUG_TEST_SYSTEM_H_
|
nn_index.h | /***********************************************************************
* Software License Agreement (BSD License)
*
* Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
* Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
*
* THE BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*************************************************************************/
#ifndef FLANN_NNINDEX_H
#define FLANN_NNINDEX_H
#include <vector>
#include "flann/general.h"
#include "flann/util/matrix.h"
#include "flann/util/params.h"
#include "flann/util/result_set.h"
#include "flann/util/dynamic_bitset.h"
#include "flann/util/saving.h"
namespace flann
{
#define KNN_HEAP_THRESHOLD 250
class IndexBase
{
public:
virtual ~IndexBase() {};
virtual size_t veclen() const = 0;
virtual size_t size() const = 0;
virtual flann_algorithm_t getType() const = 0;
virtual int usedMemory() const = 0;
virtual IndexParams getParameters() const = 0;
virtual void loadIndex(FILE* stream) = 0;
virtual void saveIndex(FILE* stream) = 0;
};
/**
* Nearest-neighbour index base class
*/
template <typename Distance>
class NNIndex : public IndexBase
{
public:
typedef typename Distance::ElementType ElementType;
typedef typename Distance::ResultType DistanceType;
NNIndex(Distance d) : distance_(d), last_id_(0), size_(0), size_at_build_(0), veclen_(0),
removed_(false), removed_count_(0), data_ptr_(NULL)
{
}
NNIndex(const IndexParams& params, Distance d) : distance_(d), last_id_(0), size_(0), size_at_build_(0), veclen_(0),
index_params_(params), removed_(false), removed_count_(0), data_ptr_(NULL)
{
}
NNIndex(const NNIndex& other) :
distance_(other.distance_),
last_id_(other.last_id_),
size_(other.size_),
size_at_build_(other.size_at_build_),
veclen_(other.veclen_),
index_params_(other.index_params_),
removed_(other.removed_),
removed_points_(other.removed_points_),
removed_count_(other.removed_count_),
ids_(other.ids_),
points_(other.points_),
data_ptr_(NULL)
{
if (other.data_ptr_) {
data_ptr_ = new ElementType[size_*veclen_];
std::copy(other.data_ptr_, other.data_ptr_+size_*veclen_, data_ptr_);
for (size_t i=0;i<size_;++i) {
points_[i] = data_ptr_ + i*veclen_;
}
}
}
virtual ~NNIndex()
{
if (data_ptr_) {
delete[] data_ptr_;
}
}
virtual NNIndex* clone() const = 0;
/**
* Builds the index
*/
virtual void buildIndex()
{
freeIndex();
cleanRemovedPoints();
// building index
buildIndexImpl();
size_at_build_ = size_;
}
/**
* Builds th index using using the specified dataset
* @param dataset the dataset to use
*/
virtual void buildIndex(const Matrix<ElementType>& dataset)
{
setDataset(dataset);
this->buildIndex();
}
/**
* @brief Incrementally add points to the index.
* @param points Matrix with points to be added
* @param rebuild_threshold
*/
virtual void addPoints(const Matrix<ElementType>& points, float rebuild_threshold = 2)
{
throw FLANNException("Functionality not supported by this index");
}
/**
* Remove point from the index
* @param index Index of point to be removed
*/
virtual void removePoint(size_t id)
{
if (!removed_) {
ids_.resize(size_);
for (size_t i=0;i<size_;++i) {
ids_[i] = i;
}
removed_points_.resize(size_);
removed_points_.reset();
last_id_ = size_;
removed_ = true;
}
size_t point_index = id_to_index(id);
if (point_index!=size_t(-1) && !removed_points_.test(point_index)) {
removed_points_.set(point_index);
removed_count_++;
}
}
/**
* Get point with specific id
* @param id
* @return
*/
virtual ElementType* getPoint(size_t id)
{
size_t index = id_to_index(id);
if (index!=size_t(-1)) {
return points_[index];
}
else {
return NULL;
}
}
/**
* @return number of features in this index.
*/
inline size_t size() const
{
return size_ - removed_count_;
}
/**
* @return The dimensionality of the features in this index.
*/
inline size_t veclen() const
{
return veclen_;
}
/**
* Returns the parameters used by the index.
*
* @return The index parameters
*/
IndexParams getParameters() const
{
return index_params_;
}
template<typename Archive>
void serialize(Archive& ar)
{
IndexHeader header;
if (Archive::is_saving::value) {
header.data_type = flann_datatype_value<ElementType>::value;
header.index_type = getType();
header.rows = size_;
header.cols = veclen_;
}
ar & header;
// sanity checks
if (Archive::is_loading::value) {
if (strcmp(header.signature,FLANN_SIGNATURE_)!=0) {
throw FLANNException("Invalid index file, wrong signature");
}
if (header.data_type != flann_datatype_value<ElementType>::value) {
throw FLANNException("Datatype of saved index is different than of the one to be created.");
}
if (header.index_type != getType()) {
throw FLANNException("Saved index type is different then the current index type.");
}
// TODO: check for distance type
}
ar & size_;
ar & veclen_;
ar & size_at_build_;
bool save_dataset;
if (Archive::is_saving::value) {
save_dataset = get_param(index_params_,"save_dataset", false);
}
ar & save_dataset;
if (save_dataset) {
if (Archive::is_loading::value) {
if (data_ptr_) {
delete[] data_ptr_;
}
data_ptr_ = new ElementType[size_*veclen_];
points_.resize(size_);
for (size_t i=0;i<size_;++i) {
points_[i] = data_ptr_ + i*veclen_;
}
}
for (size_t i=0;i<size_;++i) {
ar & serialization::make_binary_object (points_[i], veclen_*sizeof(ElementType));
}
} else {
if (points_.size()!=size_) {
throw FLANNException("Saved index does not contain the dataset and no dataset was provided.");
}
}
ar & last_id_;
ar & ids_;
ar & removed_;
if (removed_) {
ar & removed_points_;
}
ar & removed_count_;
}
/**
* @brief Perform k-nearest neighbor search
* @param[in] queries The query points for which to find the nearest neighbors
* @param[out] indices The indices of the nearest neighbors found
* @param[out] dists Distances to the nearest neighbors found
* @param[in] knn Number of nearest neighbors to return
* @param[in] params Search parameters
*/
virtual int knnSearch(const Matrix<ElementType>& queries,
Matrix<size_t>& indices,
Matrix<DistanceType>& dists,
size_t knn,
const SearchParams& params) const
{
assert(queries.cols == veclen());
assert(indices.rows >= queries.rows);
assert(dists.rows >= queries.rows);
assert(indices.cols >= knn);
assert(dists.cols >= knn);
bool use_heap;
if (params.use_heap==FLANN_Undefined) {
use_heap = (knn>KNN_HEAP_THRESHOLD)?true:false;
}
else {
use_heap = (params.use_heap==FLANN_True)?true:false;
}
int count = 0;
if (use_heap) {
#pragma omp parallel num_threads(params.cores)
{
KNNResultSet2<DistanceType> resultSet(knn);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = std::min(resultSet.size(), knn);
resultSet.copy(indices[i], dists[i], n, params.sorted);
indices_to_ids(indices[i], indices[i], n);
count += n;
}
}
}
else {
#pragma omp parallel num_threads(params.cores)
{
KNNSimpleResultSet<DistanceType> resultSet(knn);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = std::min(resultSet.size(), knn);
resultSet.copy(indices[i], dists[i], n, params.sorted);
indices_to_ids(indices[i], indices[i], n);
count += n;
}
}
}
return count;
}
/**
*
* @param queries
* @param indices
* @param dists
* @param knn
* @param params
* @return
*/
int knnSearch(const Matrix<ElementType>& queries,
Matrix<int>& indices,
Matrix<DistanceType>& dists,
size_t knn,
const SearchParams& params) const
{
flann::Matrix<size_t> indices_(new size_t[indices.rows*indices.cols], indices.rows, indices.cols);
int result = knnSearch(queries, indices_, dists, knn, params);
for (size_t i=0;i<indices.rows;++i) {
for (size_t j=0;j<indices.cols;++j) {
indices[i][j] = indices_[i][j];
}
}
delete[] indices_.ptr();
return result;
}
/**
* @brief Perform k-nearest neighbor search
* @param[in] queries The query points for which to find the nearest neighbors
* @param[out] indices The indices of the nearest neighbors found
* @param[out] dists Distances to the nearest neighbors found
* @param[in] knn Number of nearest neighbors to return
* @param[in] params Search parameters
*/
int knnSearch(const Matrix<ElementType>& queries,
std::vector< std::vector<size_t> >& indices,
std::vector<std::vector<DistanceType> >& dists,
size_t knn,
const SearchParams& params) const
{
assert(queries.cols == veclen());
bool use_heap;
if (params.use_heap==FLANN_Undefined) {
use_heap = (knn>KNN_HEAP_THRESHOLD)?true:false;
}
else {
use_heap = (params.use_heap==FLANN_True)?true:false;
}
if (indices.size() < queries.rows ) indices.resize(queries.rows);
if (dists.size() < queries.rows ) dists.resize(queries.rows);
int count = 0;
if (use_heap) {
#pragma omp parallel num_threads(params.cores)
{
KNNResultSet2<DistanceType> resultSet(knn);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = std::min(resultSet.size(), knn);
indices[i].resize(n);
dists[i].resize(n);
if (n>0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
count += n;
}
}
}
else {
#pragma omp parallel num_threads(params.cores)
{
KNNSimpleResultSet<DistanceType> resultSet(knn);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = std::min(resultSet.size(), knn);
indices[i].resize(n);
dists[i].resize(n);
if (n>0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
count += n;
}
}
}
return count;
}
/**
*
* @param queries
* @param indices
* @param dists
* @param knn
* @param params
* @return
*/
int knnSearch(const Matrix<ElementType>& queries,
std::vector< std::vector<int> >& indices,
std::vector<std::vector<DistanceType> >& dists,
size_t knn,
const SearchParams& params) const
{
std::vector<std::vector<size_t> > indices_;
int result = knnSearch(queries, indices_, dists, knn, params);
indices.resize(indices_.size());
for (size_t i=0;i<indices_.size();++i) {
indices[i].assign(indices_[i].begin(), indices_[i].end());
}
return result;
}
/**
* @brief Perform radius search
* @param[in] query The query point
* @param[out] indices The indinces of the neighbors found within the given radius
* @param[out] dists The distances to the nearest neighbors found
* @param[in] radius The radius used for search
* @param[in] params Search parameters
* @return Number of neighbors found
*/
int radiusSearch(const Matrix<ElementType>& queries,
Matrix<size_t>& indices,
Matrix<DistanceType>& dists,
float radius,
const SearchParams& params) const
{
assert(queries.cols == veclen());
int count = 0;
size_t num_neighbors = std::min(indices.cols, dists.cols);
int max_neighbors = params.max_neighbors;
if (max_neighbors<0) max_neighbors = num_neighbors;
else max_neighbors = std::min(max_neighbors,(int)num_neighbors);
if (max_neighbors==0) {
#pragma omp parallel num_threads(params.cores)
{
CountRadiusResultSet<DistanceType> resultSet(radius);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
count += resultSet.size();
}
}
}
else {
// explicitly indicated to use unbounded radius result set
// and we know there'll be enough room for resulting indices and dists
if (params.max_neighbors<0 && (num_neighbors>=size())) {
#pragma omp parallel num_threads(params.cores)
{
RadiusResultSet<DistanceType> resultSet(radius);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = resultSet.size();
count += n;
if (n>num_neighbors) n = num_neighbors;
resultSet.copy(indices[i], dists[i], n, params.sorted);
// mark the next element in the output buffers as unused
if (n<indices.cols) indices[i][n] = size_t(-1);
if (n<dists.cols) dists[i][n] = std::numeric_limits<DistanceType>::infinity();
indices_to_ids(indices[i], indices[i], n);
}
}
}
else {
// number of neighbors limited to max_neighbors
#pragma omp parallel num_threads(params.cores)
{
KNNRadiusResultSet<DistanceType> resultSet(radius, max_neighbors);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = resultSet.size();
count += n;
if ((int)n>max_neighbors) n = max_neighbors;
resultSet.copy(indices[i], dists[i], n, params.sorted);
// mark the next element in the output buffers as unused
if (n<indices.cols) indices[i][n] = size_t(-1);
if (n<dists.cols) dists[i][n] = std::numeric_limits<DistanceType>::infinity();
indices_to_ids(indices[i], indices[i], n);
}
}
}
}
return count;
}
/**
*
* @param queries
* @param indices
* @param dists
* @param radius
* @param params
* @return
*/
int radiusSearch(const Matrix<ElementType>& queries,
Matrix<int>& indices,
Matrix<DistanceType>& dists,
float radius,
const SearchParams& params) const
{
flann::Matrix<size_t> indices_(new size_t[indices.rows*indices.cols], indices.rows, indices.cols);
int result = radiusSearch(queries, indices_, dists, radius, params);
for (size_t i=0;i<indices.rows;++i) {
for (size_t j=0;j<indices.cols;++j) {
indices[i][j] = indices_[i][j];
}
}
delete[] indices_.ptr();
return result;
}
/**
* @brief Perform radius search
* @param[in] query The query point
* @param[out] indices The indinces of the neighbors found within the given radius
* @param[out] dists The distances to the nearest neighbors found
* @param[in] radius The radius used for search
* @param[in] params Search parameters
* @return Number of neighbors found
*/
int radiusSearch(const Matrix<ElementType>& queries,
std::vector< std::vector<size_t> >& indices,
std::vector<std::vector<DistanceType> >& dists,
float radius,
const SearchParams& params) const
{
assert(queries.cols == veclen());
int count = 0;
// just count neighbors
if (params.max_neighbors==0) {
#pragma omp parallel num_threads(params.cores)
{
CountRadiusResultSet<DistanceType> resultSet(radius);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
count += resultSet.size();
}
}
}
else {
if (indices.size() < queries.rows ) indices.resize(queries.rows);
if (dists.size() < queries.rows ) dists.resize(queries.rows);
if (params.max_neighbors<0) {
// search for all neighbors
#pragma omp parallel num_threads(params.cores)
{
RadiusResultSet<DistanceType> resultSet(radius);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = resultSet.size();
count += n;
indices[i].resize(n);
dists[i].resize(n);
if (n > 0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
}
}
}
else {
// number of neighbors limited to max_neighbors
#pragma omp parallel num_threads(params.cores)
{
KNNRadiusResultSet<DistanceType> resultSet(radius, params.max_neighbors);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = resultSet.size();
count += n;
if ((int)n>params.max_neighbors) n = params.max_neighbors;
indices[i].resize(n);
dists[i].resize(n);
if (n > 0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
}
}
}
}
return count;
}
/**
*
* @param queries
* @param indices
* @param dists
* @param radius
* @param params
* @return
*/
int radiusSearch(const Matrix<ElementType>& queries,
std::vector< std::vector<int> >& indices,
std::vector<std::vector<DistanceType> >& dists,
float radius,
const SearchParams& params) const
{
std::vector<std::vector<size_t> > indices_;
int result = radiusSearch(queries, indices_, dists, radius, params);
indices.resize(indices_.size());
for (size_t i=0;i<indices_.size();++i) {
indices[i].assign(indices_[i].begin(), indices_[i].end());
}
return result;
}
virtual void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams) const = 0;
protected:
virtual void freeIndex() = 0;
virtual void buildIndexImpl() = 0;
size_t id_to_index(size_t id)
{
if (ids_.size()==0) {
return id;
}
size_t point_index = size_t(-1);
if (id < ids_.size() && ids_[id]==id) {
return id;
}
else {
// binary search
size_t start = 0;
size_t end = ids_.size();
while (start<end) {
size_t mid = (start+end)/2;
if (ids_[mid]==id) {
point_index = mid;
break;
}
else if (ids_[mid]<id) {
start = mid + 1;
}
else {
end = mid;
}
}
}
return point_index;
}
void indices_to_ids(const size_t* in, size_t* out, size_t size) const
{
if (removed_) {
for (size_t i=0;i<size;++i) {
out[i] = ids_[in[i]];
}
}
}
void setDataset(const Matrix<ElementType>& dataset)
{
size_ = dataset.rows;
veclen_ = dataset.cols;
last_id_ = 0;
ids_.clear();
removed_points_.clear();
removed_ = false;
removed_count_ = 0;
points_.resize(size_);
for (size_t i=0;i<size_;++i) {
points_[i] = dataset[i];
}
}
void extendDataset(const Matrix<ElementType>& new_points)
{
size_t new_size = size_ + new_points.rows;
if (removed_) {
removed_points_.resize(new_size);
ids_.resize(new_size);
}
points_.resize(new_size);
for (size_t i=size_;i<new_size;++i) {
points_[i] = new_points[i-size_];
if (removed_) {
ids_[i] = last_id_++;
removed_points_.reset(i);
}
}
size_ = new_size;
}
void cleanRemovedPoints()
{
if (!removed_) return;
size_t last_idx = 0;
for (size_t i=0;i<size_;++i) {
if (!removed_points_.test(i)) {
points_[last_idx] = points_[i];
ids_[last_idx] = ids_[i];
removed_points_.reset(last_idx);
++last_idx;
}
}
points_.resize(last_idx);
ids_.resize(last_idx);
removed_points_.resize(last_idx);
size_ = last_idx;
removed_count_ = 0;
}
void swap(NNIndex& other)
{
std::swap(distance_, other.distance_);
std::swap(last_id_, other.last_id_);
std::swap(size_, other.size_);
std::swap(size_at_build_, other.size_at_build_);
std::swap(veclen_, other.veclen_);
std::swap(index_params_, other.index_params_);
std::swap(removed_, other.removed_);
std::swap(removed_points_, other.removed_points_);
std::swap(removed_count_, other.removed_count_);
std::swap(ids_, other.ids_);
std::swap(points_, other.points_);
std::swap(data_ptr_, other.data_ptr_);
}
protected:
/**
* The distance functor
*/
Distance distance_;
/**
* Each index point has an associated ID. IDs are assigned sequentially in
* increasing order. This indicates the ID assigned to the last point added to the
* index.
*/
size_t last_id_;
/**
* Number of points in the index (and database)
*/
size_t size_;
/**
* Number of features in the dataset when the index was last built.
*/
size_t size_at_build_;
/**
* Size of one point in the index (and database)
*/
size_t veclen_;
/**
* Parameters of the index.
*/
IndexParams index_params_;
/**
* Flag indicating if at least a point was removed from the index
*/
bool removed_;
/**
* Array used to mark points removed from the index
*/
DynamicBitset removed_points_;
/**
* Number of points removed from the index
*/
size_t removed_count_;
/**
* Array of point IDs, returned by nearest-neighbour operations
*/
std::vector<size_t> ids_;
/**
* Point data
*/
std::vector<ElementType*> points_;
/**
* Pointer to dataset memory if allocated by this index, otherwise NULL
*/
ElementType* data_ptr_;
};
#define USING_BASECLASS_SYMBOLS \
using NNIndex<Distance>::distance_;\
using NNIndex<Distance>::size_;\
using NNIndex<Distance>::size_at_build_;\
using NNIndex<Distance>::veclen_;\
using NNIndex<Distance>::index_params_;\
using NNIndex<Distance>::removed_points_;\
using NNIndex<Distance>::ids_;\
using NNIndex<Distance>::removed_;\
using NNIndex<Distance>::points_;\
using NNIndex<Distance>::extendDataset;\
using NNIndex<Distance>::setDataset;\
using NNIndex<Distance>::cleanRemovedPoints;\
using NNIndex<Distance>::indices_to_ids;
}
#endif //FLANN_NNINDEX_H
|
1.c | // Contributed by Jeremy Zerfas
// This controls the initial size used for the hash tables. This needs to be a power of two because a mask is also
// calculated from this by using INITIAL_HASH_TABLE_SIZE-1.
#define INITIAL_HASH_TABLE_SIZE 16
// This controls the maximum length for each set of oligonucleotide frequencies and each oligonucleotide count output by
// this program.
#define MAXIMUM_OUTPUT_LENGTH 4096
#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
// intptr_t should be the native integer type on most sane systems.
typedef intptr_t intnative_t;
//******************************************
//*** Start of hash table implementation ***
//******************************************
// In order to prevent too many collisions from occurring the hash table is grown when it is filled to a certain
// percentage. This value sets the percentage that controls when growing should occur. This value must be set as a
// fraction between 0 and 1 but sane values are generally around 3/4. Also do NOT place this value in parentheses since
// it would just perform integer division and evaluate as zero, the value needs to be multiplied by another value first
// before the division is performed in order to generate useful values. Setting the value too low causes the hash table
// to be made larger than it needs to be which reduces the effectiveness of caches and setting it too high will cause a
// large amount of collisions.
#define HASH_TABLE_LOAD_LIMIT 12/16
typedef struct element{
#define EMPTY_VALUE_KEY -1
int64_t key; // If key is negative, then this element is empty, otherwise key and value contain the unmodified key
// and value.
int32_t value;
} element;
typedef struct hash_table{
intnative_t size; // The current capacity of the hash table. Never will actually be reached since the hash
// table will be grown first when it reaches element_Limit.
int64_t key_Mask; // ANDed with keys to make sure that hash table indices do not exceed the size of the
// hash table.
intnative_t element_Limit; // Controls the maximum amount of elements that are allowed in the hash table before it
// will be grown.
intnative_t element_Count; // The current amount of elements in the hash table.
element * elements;
} hash_table;
// Create a hash table with space allocated for requested_Size elements. requested_Size must be a power of two since the
// mask for keys is defined as requested_Size-1.
hash_table * create_Hash_Table(intnative_t requested_Size){
hash_table * created_Hash_Table=malloc(sizeof(hash_table));
// Initialize the properties for the created_Hash_Table.
created_Hash_Table->size=requested_Size;
created_Hash_Table->key_Mask=requested_Size-1;
created_Hash_Table->element_Limit=requested_Size*HASH_TABLE_LOAD_LIMIT;
created_Hash_Table->element_Count=0;
created_Hash_Table->elements=malloc(requested_Size*sizeof(element));
// Initialize all elements in the created_Hash_Table to have initial keys set to EMPTY_VALUE_KEY and values set to
// 0.
for(intnative_t i=0; i<requested_Size; i++)
created_Hash_Table->elements[i]=(element){EMPTY_VALUE_KEY, 0};
return created_Hash_Table;
}
// Destroy hash table pointed to by hash_Table_To_Destroy and all of its elements.
void destroy_Hash_Table(hash_table * hash_Table_To_Destroy){
free(hash_Table_To_Destroy->elements);
free(hash_Table_To_Destroy);
}
// Hash function used to hash keys.
#define hash_Key(key) (key ^ key>>7)
// Grow hash_Table_To_Grow by quadrupling it in size. A new elements array is created, the existing elements are
// inserted into the new elements array, the old elements array is deleted, and the properties for hash_Table_To_Grow
// are updated.
void grow_Hash_Table(hash_table * hash_Table_To_Grow){
intnative_t old_Hash_Table_Size=hash_Table_To_Grow->size;
intnative_t new_Hash_Table_Size=old_Hash_Table_Size*4;
// Keep a reference to old_Hash_Table_Elements and allocate space for new_Hash_Table_Elements.
element * old_Hash_Table_Elements=hash_Table_To_Grow->elements;
element * new_Hash_Table_Elements=malloc(new_Hash_Table_Size*sizeof(element));
// Update the properties for the hash_Table_To_Grow.
hash_Table_To_Grow->size=new_Hash_Table_Size;
hash_Table_To_Grow->key_Mask=new_Hash_Table_Size-1;
hash_Table_To_Grow->element_Limit=new_Hash_Table_Size*HASH_TABLE_LOAD_LIMIT;
hash_Table_To_Grow->elements=new_Hash_Table_Elements;
// Initialize all elements in new_Hash_Table_Elements to have initial keys set to EMPTY_VALUE_KEY and values set to
// 0.
for(intnative_t i=0; i<new_Hash_Table_Size; i++)
new_Hash_Table_Elements[i]=(element){EMPTY_VALUE_KEY, 0};
// Copy all old_Hash_Table_Elements to new_Hash_Table_Elements. This code is simpler and faster than using the
// find_Or_Add_Element_For_Key() function since we don't need to worry about updating element_Count and checking to
// see if we have reached element_Limit.
for(intnative_t i=0; i<old_Hash_Table_Size; i++){
if(old_Hash_Table_Elements[i].key>=0){
int64_t elements_Index=hash_Key(old_Hash_Table_Elements[i].key) & hash_Table_To_Grow->key_Mask;
// Find the first free spot in new_Hash_Table_Elements and copy the old element to it.
while(new_Hash_Table_Elements[elements_Index].key>=0){
elements_Index++;
elements_Index&=hash_Table_To_Grow->key_Mask;
}
new_Hash_Table_Elements[elements_Index]=old_Hash_Table_Elements[i];
}
}
free(old_Hash_Table_Elements);
}
// See if key is already in hash_Table and if so then return the element for it, otherwise add the key to hash_table
// (and grow it if necessary) and return the element for it.
element * find_Or_Add_Element_For_Key(hash_table * hash_Table, int64_t key){
int64_t elements_Index=hash_Key(key) & hash_Table->key_Mask;
// Search hash_Table for key.
element * elements=hash_Table->elements;
while(elements[elements_Index].key!=key){
// If we reach a key with a negative value then that means that key is not in hash_Table so we will go ahead and
// add it.
if(elements[elements_Index].key<0){
// If we're at the hash table's load limit then grow the hash table and call this function a second time to
// add and return an item.
if(hash_Table->element_Count>=hash_Table->element_Limit){
grow_Hash_Table(hash_Table);
return find_Or_Add_Element_For_Key(hash_Table, key);
}
// Set the key for this element to key, increment element_Count, and break out of the loop so that this
// element will be returned.
elements[elements_Index].key=key;
hash_Table->element_Count++;
break;
}
// Still haven't found key or a free spot so continue to the next index.
elements_Index++;
elements_Index&=hash_Table->key_Mask;
}
return &elements[elements_Index];
}
//******************************************
//*** End of hash table implementation ***
//******************************************
// Macro to convert a nucleotide character to a code. Note that upper and lower case ASCII letters only differ in the
// fifth bit from the right and we only need the three least significant bits to differentiate the letters 'A', 'C',
// 'G', and 'T'. Spaces in this array/string will never be used as long as characters other than 'A', 'C', 'G', and 'T'
// aren't used.
#define code_For_Nucleotide(nucleotide) (" \0 \1\3 \2"[nucleotide & 0b111])
// And one more macro to convert the codes back to nucleotide characters.
#define nucleotide_For_Code(code) ("ACGT"[code & 0b11])
// Function to use when sorting elements with qsort() later. Elements with larger values will come first and in cases of
// identical values then elements with smaller keys will come first.
int element_Compare(const void * uncasted_Left_Element, const void * uncasted_Right_Element){
const element * left_Element=uncasted_Left_Element, * right_Element=uncasted_Right_Element;
// Sort based on element values.
if(left_Element->value < right_Element->value) return 1;
if(left_Element->value > right_Element->value) return -1;
// If we got here then both items have the same value so then sort based on key.
if(left_Element->key > right_Element->key)
return 1;
else
return -1;
}
// Generate frequences for all oligonucleotides in polynucleotide that are of desired_Length_For_Oligonucleotides and
// then save it to output.
void generate_Frequencies_For_Desired_Length_Oligonucleotides(char * polynucleotide, intnative_t polynucleotide_Length
, intnative_t desired_Length_For_Oligonucleotides, char * output){
hash_table * hash_Table=create_Hash_Table(INITIAL_HASH_TABLE_SIZE);
// Add all the complete oligonucleotides of desired_Length_For_Oligonucleotides to hash_Table.
int64_t code=0;
for(intnative_t i=0; i<polynucleotide_Length; i++){
int64_t mask=((int64_t)1<<2*desired_Length_For_Oligonucleotides)-1;
code=(code<<2 & mask) | polynucleotide[i];
if(i>=desired_Length_For_Oligonucleotides-1)
find_Or_Add_Element_For_Key(hash_Table, code)->value++;
}
// Create an array of elements from hash_Table.
intnative_t elements_Array_Size=hash_Table->element_Count;
element * elements_Array=malloc(elements_Array_Size*sizeof(element));
for(intnative_t i=0, j=0; i<hash_Table->size; i++){
if(hash_Table->elements[i].key>=0){
elements_Array[j].key=hash_Table->elements[i].key;
elements_Array[j].value=hash_Table->elements[i].value;
j++;
}
}
destroy_Hash_Table(hash_Table);
// Sort elements_Array.
qsort(elements_Array, elements_Array_Size, sizeof(element), &element_Compare);
// Calculate the total_Count of all elements.
intnative_t total_Count=0;
for(intnative_t i=0; i<elements_Array_Size; i++)
total_Count+=elements_Array[i].value;
// Print the frequencies for each element.
for(intnative_t output_Position=0, i=0; i<elements_Array_Size; i++){
// Decode key back into a oligonucleotide.
char oligonucleotide[desired_Length_For_Oligonucleotides+1];
for(intnative_t j=desired_Length_For_Oligonucleotides-1; j>-1; j--){
oligonucleotide[j]=nucleotide_For_Code(elements_Array[i].key);
elements_Array[i].key>>=2;
}
oligonucleotide[desired_Length_For_Oligonucleotides]='\0';
// Output the frequency for oligonucleotide to output.
output_Position+=snprintf(output+output_Position, MAXIMUM_OUTPUT_LENGTH-output_Position, "%s %.3f\n"
, oligonucleotide, 100.0f*elements_Array[i].value/total_Count);
}
free(elements_Array);
}
// Generate a count for the number of times oligonucleotide appears in polynucleotide and then save it to output.
void generate_Count_For_Oligonucleotide(char * polynucleotide, intnative_t polynucleotide_Length, char * oligonucleotide
, char * output){
intnative_t oligonucleotide_Length=strlen(oligonucleotide);
hash_table * hash_Table=create_Hash_Table(INITIAL_HASH_TABLE_SIZE);
// Add all the complete oligonucleotides of oligonucleotide_Length to hash_Table.
int64_t key=0;
for(intnative_t i=0; i<polynucleotide_Length; i++){
int64_t mask=((int64_t)1<<2*oligonucleotide_Length)-1;
key=(key<<2 & mask) | polynucleotide[i];
if(i>=oligonucleotide_Length)
find_Or_Add_Element_For_Key(hash_Table, key)->value++;
}
// Generate key for the oligonucleotide.
key=0;
for(intnative_t i=0; i<oligonucleotide_Length; i++)
key=(key<<2) | code_For_Nucleotide(oligonucleotide[i]);
// Output the count for oligonucleotide to output.
intnative_t count=find_Or_Add_Element_For_Key(hash_Table, key)->value;
snprintf(output, MAXIMUM_OUTPUT_LENGTH, "%jd\t%s", (intmax_t)count, oligonucleotide);
destroy_Hash_Table(hash_Table);
}
int main(int argc, char * argv[]){
char buffer[4096];
// Open the file that was specified as a command line argument.
FILE * input_File=fopen(argv[1], "r");
// Find the start of the third polynucleotide.
while(fgets(buffer, sizeof(buffer), input_File) && memcmp(">THREE", buffer, sizeof(">THREE")-1));
// Start with 64 KiB of storage for reading in the polynucleotide and grow geometrically.
intnative_t polynucleotide_Capacity=65536;
intnative_t polynucleotide_Length=0;
char * polynucleotide=malloc(polynucleotide_Capacity);
// Start reading and encoding the third polynucleotide.
while(fgets(buffer, sizeof(buffer), input_File) && buffer[0]!='>'){
for(intnative_t i=0; buffer[i]!='\0'; i++)
if(buffer[i]!='\n')
polynucleotide[polynucleotide_Length++]=code_For_Nucleotide(buffer[i]);
// Make sure we still have enough memory allocated for any potential nucleotides in the next line.
if(polynucleotide_Capacity-polynucleotide_Length < sizeof(buffer))
polynucleotide=realloc(polynucleotide, polynucleotide_Capacity*=2);
}
// Free up any leftover memory.
polynucleotide=realloc(polynucleotide, polynucleotide_Length);
char output_Buffer[7][MAXIMUM_OUTPUT_LENGTH];
// Do the following functions in parallel.
#pragma omp parallel sections
{
#pragma omp section
generate_Frequencies_For_Desired_Length_Oligonucleotides(polynucleotide, polynucleotide_Length, 1
, output_Buffer[0]);
#pragma omp section
generate_Frequencies_For_Desired_Length_Oligonucleotides(polynucleotide, polynucleotide_Length, 2
, output_Buffer[1]);
#pragma omp section
generate_Count_For_Oligonucleotide(polynucleotide, polynucleotide_Length, "GGT", output_Buffer[2]);
#pragma omp section
generate_Count_For_Oligonucleotide(polynucleotide, polynucleotide_Length, "GGTA", output_Buffer[3]);
#pragma omp section
generate_Count_For_Oligonucleotide(polynucleotide, polynucleotide_Length, "GGTATT", output_Buffer[4]);
#pragma omp section
generate_Count_For_Oligonucleotide(polynucleotide, polynucleotide_Length, "GGTATTTTAATT", output_Buffer[5]);
#pragma omp section
generate_Count_For_Oligonucleotide(polynucleotide, polynucleotide_Length, "GGTATTTTAATTTATAGT"
, output_Buffer[6]);
}
for(intnative_t i=0; i<7; printf("%s\n", output_Buffer[i++]));
free(polynucleotide);
}
|
GB_binop__pow_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pow_fp64)
// A.*B function (eWiseMult): GB (_AemultB_08__pow_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__pow_fp64)
// A.*B function (eWiseMult): GB (_AemultB_04__pow_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_fp64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__pow_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__pow_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_fp64)
// C=scalar+B GB (_bind1st__pow_fp64)
// C=scalar+B' GB (_bind1st_tran__pow_fp64)
// C=A+scalar GB (_bind2nd__pow_fp64)
// C=A'+scalar GB (_bind2nd_tran__pow_fp64)
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = GB_pow (aij, bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_pow (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_POW || GxB_NO_FP64 || GxB_NO_POW_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__pow_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pow_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pow_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pow_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__pow_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__pow_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__pow_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__pow_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__pow_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = GB_pow (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__pow_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = GB_pow (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_pow (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__pow_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_pow (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__pow_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
libperf.c | /**
* Copyright (C) Mellanox Technologies Ltd. 2001-2019. ALL RIGHTS RESERVED.
* Copyright (C) UT-Battelle, LLC. 2015. ALL RIGHTS RESERVED.
* Copyright (C) The University of Tennessee and The University
* of Tennessee Research Foundation. 2015-2016. ALL RIGHTS RESERVED.
* Copyright (C) ARM Ltd. 2017. ALL RIGHTS RESERVED.
* See file LICENSE for terms.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <ucs/debug/log.h>
#include <ucs/arch/bitops.h>
#include <ucs/sys/module.h>
#include <ucs/sys/string.h>
#include <string.h>
#include <tools/perf/lib/libperf_int.h>
#include <unistd.h>
#if _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#define ATOMIC_OP_CONFIG(_size, _op32, _op64, _op, _msg, _params, _status) \
_status = __get_atomic_flag((_size), (_op32), (_op64), (_op)); \
if (_status != UCS_OK) { \
ucs_error(UCT_PERF_TEST_PARAMS_FMT" does not support atomic %s for " \
"message size %zu bytes", UCT_PERF_TEST_PARAMS_ARG(_params), \
(_msg)[_op], (_size)); \
return _status; \
}
#define ATOMIC_OP_CHECK(_size, _attr, _required, _params, _msg) \
if (!ucs_test_all_flags(_attr, _required)) { \
if ((_params)->flags & UCX_PERF_TEST_FLAG_VERBOSE) { \
ucs_error(UCT_PERF_TEST_PARAMS_FMT" does not support required " \
#_size"-bit atomic: %s", UCT_PERF_TEST_PARAMS_ARG(_params), \
(_msg)[ucs_ffs64(~(_attr) & (_required))]); \
} \
return UCS_ERR_UNSUPPORTED; \
}
typedef struct {
union {
struct {
size_t dev_addr_len;
size_t iface_addr_len;
size_t ep_addr_len;
} uct;
struct {
size_t worker_addr_len;
size_t total_wireup_len;
} ucp;
};
size_t rkey_size;
unsigned long recv_buffer;
} ucx_perf_ep_info_t;
const ucx_perf_allocator_t* ucx_perf_mem_type_allocators[UCS_MEMORY_TYPE_LAST];
static const char *perf_iface_ops[] = {
[ucs_ilog2(UCT_IFACE_FLAG_AM_SHORT)] = "am short",
[ucs_ilog2(UCT_IFACE_FLAG_AM_BCOPY)] = "am bcopy",
[ucs_ilog2(UCT_IFACE_FLAG_AM_ZCOPY)] = "am zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_PUT_SHORT)] = "put short",
[ucs_ilog2(UCT_IFACE_FLAG_PUT_BCOPY)] = "put bcopy",
[ucs_ilog2(UCT_IFACE_FLAG_PUT_ZCOPY)] = "put zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_GET_SHORT)] = "get short",
[ucs_ilog2(UCT_IFACE_FLAG_GET_BCOPY)] = "get bcopy",
[ucs_ilog2(UCT_IFACE_FLAG_GET_ZCOPY)] = "get zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_ERRHANDLE_PEER_FAILURE)] = "peer failure handler",
[ucs_ilog2(UCT_IFACE_FLAG_CONNECT_TO_IFACE)] = "connect to iface",
[ucs_ilog2(UCT_IFACE_FLAG_CONNECT_TO_EP)] = "connect to ep",
[ucs_ilog2(UCT_IFACE_FLAG_AM_DUP)] = "full reliability",
[ucs_ilog2(UCT_IFACE_FLAG_CB_SYNC)] = "sync callback",
[ucs_ilog2(UCT_IFACE_FLAG_CB_ASYNC)] = "async callback",
[ucs_ilog2(UCT_IFACE_FLAG_PENDING)] = "pending",
[ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_SHORT)] = "tag eager short",
[ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_BCOPY)] = "tag eager bcopy",
[ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_ZCOPY)] = "tag eager zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_TAG_RNDV_ZCOPY)] = "tag rndv zcopy"
};
static const char *perf_atomic_op[] = {
[UCT_ATOMIC_OP_ADD] = "add",
[UCT_ATOMIC_OP_AND] = "and",
[UCT_ATOMIC_OP_OR] = "or" ,
[UCT_ATOMIC_OP_XOR] = "xor"
};
static const char *perf_atomic_fop[] = {
[UCT_ATOMIC_OP_ADD] = "fetch-add",
[UCT_ATOMIC_OP_AND] = "fetch-and",
[UCT_ATOMIC_OP_OR] = "fetch-or",
[UCT_ATOMIC_OP_XOR] = "fetch-xor",
[UCT_ATOMIC_OP_SWAP] = "swap",
[UCT_ATOMIC_OP_CSWAP] = "cswap"
};
/*
* This Quickselect routine is based on the algorithm described in
* "Numerical recipes in C", Second Edition,
* Cambridge University Press, 1992, Section 8.5, ISBN 0-521-43108-5
* This code by Nicolas Devillard - 1998. Public domain.
*/
static ucs_time_t __find_median_quick_select(ucs_time_t arr[], int n)
{
int low, high ;
int median;
int middle, ll, hh;
#define ELEM_SWAP(a,b) { register ucs_time_t t=(a);(a)=(b);(b)=t; }
low = 0 ; high = n-1 ; median = (low + high) / 2;
for (;;) {
if (high <= low) /* One element only */
return arr[median] ;
if (high == low + 1) { /* Two elements only */
if (arr[low] > arr[high])
ELEM_SWAP(arr[low], arr[high]) ;
return arr[median] ;
}
/* Find median of low, middle and high items; swap into position low */
middle = (low + high) / 2;
if (arr[middle] > arr[high]) ELEM_SWAP(arr[middle], arr[high]) ;
if (arr[low] > arr[high]) ELEM_SWAP(arr[low], arr[high]) ;
if (arr[middle] > arr[low]) ELEM_SWAP(arr[middle], arr[low]) ;
/* Swap low item (now in position middle) into position (low+1) */
ELEM_SWAP(arr[middle], arr[low+1]) ;
/* Nibble from each end towards middle, swapping items when stuck */
ll = low + 1;
hh = high;
for (;;) {
do ll++; while (arr[low] > arr[ll]) ;
do hh--; while (arr[hh] > arr[low]) ;
if (hh < ll)
break;
ELEM_SWAP(arr[ll], arr[hh]) ;
}
/* Swap middle item (in position low) back into correct position */
ELEM_SWAP(arr[low], arr[hh]) ;
/* Re-set active partition */
if (hh <= median)
low = ll;
if (hh >= median)
high = hh - 1;
}
}
static ucs_status_t
uct_perf_test_alloc_host(const ucx_perf_context_t *perf, size_t length,
unsigned flags, uct_allocated_memory_t *alloc_mem)
{
ucs_status_t status;
status = uct_iface_mem_alloc(perf->uct.iface, length,
flags, "perftest", alloc_mem);
if (status != UCS_OK) {
ucs_free(alloc_mem);
ucs_error("failed to allocate memory: %s", ucs_status_string(status));
return status;
}
ucs_assert(alloc_mem->md == perf->uct.md);
return UCS_OK;
}
static void uct_perf_test_free_host(const ucx_perf_context_t *perf,
uct_allocated_memory_t *alloc_mem)
{
uct_iface_mem_free(alloc_mem);
}
static void ucx_perf_test_memcpy_host(void *dst, ucs_memory_type_t dst_mem_type,
const void *src, ucs_memory_type_t src_mem_type,
size_t count)
{
if ((dst_mem_type != UCS_MEMORY_TYPE_HOST) ||
(src_mem_type != UCS_MEMORY_TYPE_HOST)) {
ucs_error("wrong memory type passed src - %d, dst - %d",
src_mem_type, dst_mem_type);
} else {
memcpy(dst, src, count);
}
}
static ucs_status_t uct_perf_test_alloc_mem(ucx_perf_context_t *perf)
{
ucx_perf_params_t *params = &perf->params;
ucs_status_t status;
unsigned flags;
size_t buffer_size;
if ((UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) && params->iov_stride) {
buffer_size = params->msg_size_cnt * params->iov_stride;
} else {
buffer_size = ucx_perf_get_message_size(params);
}
/* TODO use params->alignment */
flags = (params->flags & UCX_PERF_TEST_FLAG_MAP_NONBLOCK) ?
UCT_MD_MEM_FLAG_NONBLOCK : 0;
flags |= UCT_MD_MEM_ACCESS_ALL;
/* Allocate send buffer memory */
status = perf->allocator->uct_alloc(perf, buffer_size * params->thread_count,
flags, &perf->uct.send_mem);
if (status != UCS_OK) {
goto err;
}
perf->send_buffer = perf->uct.send_mem.address;
/* Allocate receive buffer memory */
status = perf->allocator->uct_alloc(perf, buffer_size * params->thread_count,
flags, &perf->uct.recv_mem);
if (status != UCS_OK) {
goto err_free_send;
}
perf->recv_buffer = perf->uct.recv_mem.address;
/* Allocate IOV datatype memory */
perf->params.msg_size_cnt = params->msg_size_cnt;
perf->uct.iov = malloc(sizeof(*perf->uct.iov) *
perf->params.msg_size_cnt *
params->thread_count);
if (NULL == perf->uct.iov) {
status = UCS_ERR_NO_MEMORY;
ucs_error("Failed allocate send IOV(%lu) buffer: %s",
perf->params.msg_size_cnt, ucs_status_string(status));
goto err_free_recv;
}
ucs_debug("allocated memory. Send buffer %p, Recv buffer %p",
perf->send_buffer, perf->recv_buffer);
return UCS_OK;
err_free_recv:
perf->allocator->uct_free(perf, &perf->uct.recv_mem);
err_free_send:
perf->allocator->uct_free(perf, &perf->uct.send_mem);
err:
return status;
}
static void uct_perf_test_free_mem(ucx_perf_context_t *perf)
{
perf->allocator->uct_free(perf, &perf->uct.send_mem);
perf->allocator->uct_free(perf, &perf->uct.recv_mem);
free(perf->uct.iov);
}
void ucx_perf_test_start_clock(ucx_perf_context_t *perf)
{
ucs_time_t start_time = ucs_get_time();
perf->start_time_acc = ucs_get_accurate_time();
perf->end_time = (perf->params.max_time == 0.0) ? UINT64_MAX :
ucs_time_from_sec(perf->params.max_time) + start_time;
perf->prev_time = start_time;
perf->prev.time = start_time;
perf->prev.time_acc = perf->start_time_acc;
perf->current.time_acc = perf->start_time_acc;
}
/* Initialize/reset all parameters that could be modified by the warm-up run */
static void ucx_perf_test_prepare_new_run(ucx_perf_context_t *perf,
const ucx_perf_params_t *params)
{
unsigned i;
perf->max_iter = (perf->params.max_iter == 0) ? UINT64_MAX :
perf->params.max_iter;
perf->report_interval = ucs_time_from_sec(perf->params.report_interval);
perf->current.time = 0;
perf->current.msgs = 0;
perf->current.bytes = 0;
perf->current.iters = 0;
perf->prev.msgs = 0;
perf->prev.bytes = 0;
perf->prev.iters = 0;
perf->timing_queue_head = 0;
for (i = 0; i < TIMING_QUEUE_SIZE; ++i) {
perf->timing_queue[i] = 0;
}
ucx_perf_test_start_clock(perf);
}
static void ucx_perf_test_init(ucx_perf_context_t *perf,
const ucx_perf_params_t *params)
{
unsigned group_index;
perf->params = *params;
group_index = rte_call(perf, group_index);
if (0 == group_index) {
perf->allocator = ucx_perf_mem_type_allocators[params->send_mem_type];
} else {
perf->allocator = ucx_perf_mem_type_allocators[params->recv_mem_type];
}
ucx_perf_test_prepare_new_run(perf, params);
}
void ucx_perf_calc_result(ucx_perf_context_t *perf, ucx_perf_result_t *result)
{
ucs_time_t median;
double factor;
if (perf->params.test_type == UCX_PERF_TEST_TYPE_PINGPONG) {
factor = 2.0;
} else {
factor = 1.0;
}
result->iters = perf->current.iters;
result->bytes = perf->current.bytes;
result->elapsed_time = perf->current.time_acc - perf->start_time_acc;
/* Latency */
median = __find_median_quick_select(perf->timing_queue, TIMING_QUEUE_SIZE);
result->latency.typical = ucs_time_to_sec(median) / factor;
result->latency.moment_average =
(perf->current.time_acc - perf->prev.time_acc)
/ (perf->current.iters - perf->prev.iters)
/ factor;
result->latency.total_average =
(perf->current.time_acc - perf->start_time_acc)
/ perf->current.iters
/ factor;
/* Bandwidth */
result->bandwidth.typical = 0.0; // Undefined
result->bandwidth.moment_average =
(perf->current.bytes - perf->prev.bytes) /
(perf->current.time_acc - perf->prev.time_acc) * factor;
result->bandwidth.total_average =
perf->current.bytes /
(perf->current.time_acc - perf->start_time_acc) * factor;
/* Packet rate */
result->msgrate.typical = 0.0; // Undefined
result->msgrate.moment_average =
(perf->current.msgs - perf->prev.msgs) /
(perf->current.time_acc - perf->prev.time_acc) * factor;
result->msgrate.total_average =
perf->current.msgs /
(perf->current.time_acc - perf->start_time_acc) * factor;
}
static ucs_status_t ucx_perf_test_check_params(ucx_perf_params_t *params)
{
size_t it;
/* check if zero-size messages are requested and supported */
if ((/* they are not supported by: */
/* - UCT tests, except UCT AM Short/Bcopy */
(params->api == UCX_PERF_API_UCT) ||
(/* - UCP RMA and AMO tests */
(params->api == UCX_PERF_API_UCP) &&
(params->command != UCX_PERF_CMD_AM) &&
(params->command != UCX_PERF_CMD_TAG) &&
(params->command != UCX_PERF_CMD_TAG_SYNC) &&
(params->command != UCX_PERF_CMD_STREAM))) &&
ucx_perf_get_message_size(params) < 1) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size too small, need to be at least 1");
}
return UCS_ERR_INVALID_PARAM;
}
if ((params->api == UCX_PERF_API_UCP) &&
((params->send_mem_type != UCS_MEMORY_TYPE_HOST) ||
(params->recv_mem_type != UCS_MEMORY_TYPE_HOST)) &&
((params->command == UCX_PERF_CMD_PUT) ||
(params->command == UCX_PERF_CMD_GET) ||
(params->command == UCX_PERF_CMD_ADD) ||
(params->command == UCX_PERF_CMD_FADD) ||
(params->command == UCX_PERF_CMD_SWAP) ||
(params->command == UCX_PERF_CMD_CSWAP))) {
/* TODO: remove when support for non-HOST memory types will be added */
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("UCP doesn't support RMA/AMO for \"%s\"<->\"%s\" memory types",
ucs_memory_type_names[params->send_mem_type],
ucs_memory_type_names[params->recv_mem_type]);
}
return UCS_ERR_INVALID_PARAM;
}
if (params->max_outstanding < 1) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("max_outstanding, need to be at least 1");
}
return UCS_ERR_INVALID_PARAM;
}
/* check if particular message size fit into stride size */
if (params->iov_stride) {
for (it = 0; it < params->msg_size_cnt; ++it) {
if (params->msg_size_list[it] > params->iov_stride) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Buffer size %lu bigger than stride %lu",
params->msg_size_list[it], params->iov_stride);
}
return UCS_ERR_INVALID_PARAM;
}
}
}
return UCS_OK;
}
void uct_perf_ep_flush_b(ucx_perf_context_t *perf, int peer_index)
{
uct_ep_h ep = perf->uct.peers[peer_index].ep;
uct_completion_t comp;
ucs_status_t status;
int started;
started = 0;
comp.func = NULL;
comp.count = 2;
do {
if (!started) {
status = uct_ep_flush(ep, 0, &comp);
if (status == UCS_OK) {
--comp.count;
} else if (status == UCS_INPROGRESS) {
started = 1;
} else if (status != UCS_ERR_NO_RESOURCE) {
ucs_error("uct_ep_flush() failed: %s", ucs_status_string(status));
return;
}
}
uct_worker_progress(perf->uct.worker);
} while (comp.count > 1);
}
void uct_perf_iface_flush_b(ucx_perf_context_t *perf)
{
ucs_status_t status;
do {
status = uct_iface_flush(perf->uct.iface, 0, NULL);
uct_worker_progress(perf->uct.worker);
} while (status == UCS_INPROGRESS);
if (status != UCS_OK) {
ucs_error("uct_iface_flush() failed: %s", ucs_status_string(status));
}
}
static inline uint64_t __get_flag(uct_perf_data_layout_t layout, uint64_t short_f,
uint64_t bcopy_f, uint64_t zcopy_f)
{
return (layout == UCT_PERF_DATA_LAYOUT_SHORT) ? short_f :
(layout == UCT_PERF_DATA_LAYOUT_BCOPY) ? bcopy_f :
(layout == UCT_PERF_DATA_LAYOUT_ZCOPY) ? zcopy_f :
0;
}
static inline ucs_status_t __get_atomic_flag(size_t size, uint64_t *op32,
uint64_t *op64, uint64_t op)
{
if (size == sizeof(uint32_t)) {
*op32 = UCS_BIT(op);
return UCS_OK;
} else if (size == sizeof(uint64_t)) {
*op64 = UCS_BIT(op);
return UCS_OK;
}
return UCS_ERR_UNSUPPORTED;
}
static inline size_t __get_max_size(uct_perf_data_layout_t layout, size_t short_m,
size_t bcopy_m, uint64_t zcopy_m)
{
return (layout == UCT_PERF_DATA_LAYOUT_SHORT) ? short_m :
(layout == UCT_PERF_DATA_LAYOUT_BCOPY) ? bcopy_m :
(layout == UCT_PERF_DATA_LAYOUT_ZCOPY) ? zcopy_m :
0;
}
static ucs_status_t uct_perf_test_check_md_support(ucx_perf_params_t *params,
ucs_memory_type_t mem_type,
uct_md_attr_t *md_attr)
{
if (!(md_attr->cap.access_mem_types & UCS_BIT(mem_type)) &&
!(md_attr->cap.reg_mem_types & UCS_BIT(mem_type))) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Unsupported memory type %s by "UCT_PERF_TEST_PARAMS_FMT,
ucs_memory_type_names[mem_type],
UCT_PERF_TEST_PARAMS_ARG(params));
return UCS_ERR_INVALID_PARAM;
}
}
return UCS_OK;
}
static ucs_status_t uct_perf_test_check_capabilities(ucx_perf_params_t *params,
uct_iface_h iface, uct_md_h md)
{
uint64_t required_flags = 0;
uint64_t atomic_op32 = 0;
uint64_t atomic_op64 = 0;
uint64_t atomic_fop32 = 0;
uint64_t atomic_fop64 = 0;
uct_md_attr_t md_attr;
uct_iface_attr_t attr;
ucs_status_t status;
size_t min_size, max_size, max_iov, message_size;
status = uct_md_query(md, &md_attr);
if (status != UCS_OK) {
ucs_error("uct_md_query(%s) failed: %s",
params->uct.md_name, ucs_status_string(status));
return status;
}
status = uct_iface_query(iface, &attr);
if (status != UCS_OK) {
ucs_error("uct_iface_query("UCT_PERF_TEST_PARAMS_FMT") failed: %s",
UCT_PERF_TEST_PARAMS_ARG(params),
ucs_status_string(status));
return status;
}
min_size = 0;
max_iov = 1;
message_size = ucx_perf_get_message_size(params);
switch (params->command) {
case UCX_PERF_CMD_AM:
required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_AM_SHORT,
UCT_IFACE_FLAG_AM_BCOPY, UCT_IFACE_FLAG_AM_ZCOPY);
required_flags |= UCT_IFACE_FLAG_CB_SYNC;
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.am.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, attr.cap.am.max_short,
attr.cap.am.max_bcopy, attr.cap.am.max_zcopy);
max_iov = attr.cap.am.max_iov;
break;
case UCX_PERF_CMD_PUT:
required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_PUT_SHORT,
UCT_IFACE_FLAG_PUT_BCOPY, UCT_IFACE_FLAG_PUT_ZCOPY);
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.put.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, attr.cap.put.max_short,
attr.cap.put.max_bcopy, attr.cap.put.max_zcopy);
max_iov = attr.cap.put.max_iov;
break;
case UCX_PERF_CMD_GET:
required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_GET_SHORT,
UCT_IFACE_FLAG_GET_BCOPY, UCT_IFACE_FLAG_GET_ZCOPY);
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.get.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, attr.cap.get.max_short,
attr.cap.get.max_bcopy, attr.cap.get.max_zcopy);
max_iov = attr.cap.get.max_iov;
break;
case UCX_PERF_CMD_ADD:
ATOMIC_OP_CONFIG(message_size, &atomic_op32, &atomic_op64, UCT_ATOMIC_OP_ADD,
perf_atomic_op, params, status);
max_size = 8;
break;
case UCX_PERF_CMD_FADD:
ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_ADD,
perf_atomic_fop, params, status);
max_size = 8;
break;
case UCX_PERF_CMD_SWAP:
ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_SWAP,
perf_atomic_fop, params, status);
max_size = 8;
break;
case UCX_PERF_CMD_CSWAP:
ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_CSWAP,
perf_atomic_fop, params, status);
max_size = 8;
break;
default:
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Invalid test command");
}
return UCS_ERR_INVALID_PARAM;
}
status = ucx_perf_test_check_params(params);
if (status != UCS_OK) {
return status;
}
/* check atomics first */
ATOMIC_OP_CHECK(32, attr.cap.atomic32.op_flags, atomic_op32, params, perf_atomic_op);
ATOMIC_OP_CHECK(64, attr.cap.atomic64.op_flags, atomic_op64, params, perf_atomic_op);
ATOMIC_OP_CHECK(32, attr.cap.atomic32.fop_flags, atomic_fop32, params, perf_atomic_fop);
ATOMIC_OP_CHECK(64, attr.cap.atomic64.fop_flags, atomic_fop64, params, perf_atomic_fop);
/* check iface flags */
if (!(atomic_op32 | atomic_op64 | atomic_fop32 | atomic_fop64) &&
(!ucs_test_all_flags(attr.cap.flags, required_flags) || !required_flags)) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error(UCT_PERF_TEST_PARAMS_FMT" does not support operation %s",
UCT_PERF_TEST_PARAMS_ARG(params),
perf_iface_ops[ucs_ffs64(~attr.cap.flags & required_flags)]);
}
return UCS_ERR_UNSUPPORTED;
}
if (message_size < min_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size (%zu) is smaller than min supported (%zu)",
message_size, min_size);
}
return UCS_ERR_UNSUPPORTED;
}
if (message_size > max_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size (%zu) is larger than max supported (%zu)",
message_size, max_size);
}
return UCS_ERR_UNSUPPORTED;
}
if (params->command == UCX_PERF_CMD_AM) {
if ((params->uct.data_layout == UCT_PERF_DATA_LAYOUT_SHORT) &&
(params->am_hdr_size != sizeof(uint64_t)))
{
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Short AM header size must be 8 bytes");
}
return UCS_ERR_INVALID_PARAM;
}
if ((params->uct.data_layout == UCT_PERF_DATA_LAYOUT_ZCOPY) &&
(params->am_hdr_size > attr.cap.am.max_hdr))
{
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size (%zu) is larger than max supported (%zu)",
params->am_hdr_size, attr.cap.am.max_hdr);
}
return UCS_ERR_UNSUPPORTED;
}
if (params->am_hdr_size > message_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size (%zu) is larger than message size (%zu)",
params->am_hdr_size, message_size);
}
return UCS_ERR_INVALID_PARAM;
}
if (params->uct.fc_window > UCT_PERF_TEST_MAX_FC_WINDOW) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM flow-control window (%d) too large (should be <= %d)",
params->uct.fc_window, UCT_PERF_TEST_MAX_FC_WINDOW);
}
return UCS_ERR_INVALID_PARAM;
}
if ((params->flags & UCX_PERF_TEST_FLAG_ONE_SIDED) &&
(params->flags & UCX_PERF_TEST_FLAG_VERBOSE))
{
ucs_warn("Running active-message test with on-sided progress");
}
}
if (UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) {
if (params->msg_size_cnt > max_iov) {
if ((params->flags & UCX_PERF_TEST_FLAG_VERBOSE) ||
!params->msg_size_cnt) {
ucs_error("Wrong number of IOV entries. Requested is %lu, "
"should be in the range 1...%lu", params->msg_size_cnt,
max_iov);
}
return UCS_ERR_UNSUPPORTED;
}
/* if msg_size_cnt == 1 the message size checked above */
if ((UCX_PERF_CMD_AM == params->command) && (params->msg_size_cnt > 1)) {
if (params->am_hdr_size > params->msg_size_list[0]) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size (%lu) larger than the first IOV "
"message size (%lu)", params->am_hdr_size,
params->msg_size_list[0]);
}
return UCS_ERR_INVALID_PARAM;
}
}
}
status = uct_perf_test_check_md_support(params, params->send_mem_type, &md_attr);
if (status != UCS_OK) {
return status;
}
status = uct_perf_test_check_md_support(params, params->recv_mem_type, &md_attr);
if (status != UCS_OK) {
return status;
}
return UCS_OK;
}
static ucs_status_t uct_perf_test_setup_endpoints(ucx_perf_context_t *perf)
{
const size_t buffer_size = ADDR_BUF_SIZE;
ucx_perf_ep_info_t info, *remote_info;
unsigned group_size, i, group_index;
uct_device_addr_t *dev_addr;
uct_iface_addr_t *iface_addr;
uct_ep_addr_t *ep_addr;
uct_iface_attr_t iface_attr;
uct_md_attr_t md_attr;
uct_ep_params_t ep_params;
void *rkey_buffer;
ucs_status_t status;
struct iovec vec[5];
void *buffer;
void *req;
buffer = malloc(buffer_size);
if (buffer == NULL) {
ucs_error("Failed to allocate RTE buffer");
status = UCS_ERR_NO_MEMORY;
goto err;
}
status = uct_iface_query(perf->uct.iface, &iface_attr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_query: %s", ucs_status_string(status));
goto err_free;
}
status = uct_md_query(perf->uct.md, &md_attr);
if (status != UCS_OK) {
ucs_error("Failed to uct_md_query: %s", ucs_status_string(status));
goto err_free;
}
if (md_attr.cap.flags & (UCT_MD_FLAG_ALLOC|UCT_MD_FLAG_REG)) {
info.rkey_size = md_attr.rkey_packed_size;
} else {
info.rkey_size = 0;
}
info.uct.dev_addr_len = iface_attr.device_addr_len;
info.uct.iface_addr_len = iface_attr.iface_addr_len;
info.uct.ep_addr_len = iface_attr.ep_addr_len;
info.recv_buffer = (uintptr_t)perf->recv_buffer;
rkey_buffer = buffer;
dev_addr = UCS_PTR_BYTE_OFFSET(rkey_buffer, info.rkey_size);
iface_addr = UCS_PTR_BYTE_OFFSET(dev_addr, info.uct.dev_addr_len);
ep_addr = UCS_PTR_BYTE_OFFSET(iface_addr, info.uct.iface_addr_len);
ucs_assert_always(UCS_PTR_BYTE_OFFSET(ep_addr, info.uct.ep_addr_len) <=
UCS_PTR_BYTE_OFFSET(buffer, buffer_size));
status = uct_iface_get_device_address(perf->uct.iface, dev_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_get_device_address: %s",
ucs_status_string(status));
goto err_free;
}
status = uct_iface_get_address(perf->uct.iface, iface_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_get_address: %s", ucs_status_string(status));
goto err_free;
}
if (info.rkey_size > 0) {
memset(rkey_buffer, 0, info.rkey_size);
status = uct_md_mkey_pack(perf->uct.md, perf->uct.recv_mem.memh, rkey_buffer);
if (status != UCS_OK) {
ucs_error("Failed to uct_rkey_pack: %s", ucs_status_string(status));
goto err_free;
}
}
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
perf->uct.peers = calloc(group_size, sizeof(*perf->uct.peers));
if (perf->uct.peers == NULL) {
goto err_free;
}
ep_params.field_mask = UCT_EP_PARAM_FIELD_IFACE;
ep_params.iface = perf->uct.iface;
if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) {
for (i = 0; i < group_size; ++i) {
if (i == group_index) {
continue;
}
status = uct_ep_create(&ep_params, &perf->uct.peers[i].ep);
if (status != UCS_OK) {
ucs_error("Failed to uct_ep_create: %s", ucs_status_string(status));
goto err_destroy_eps;
}
status = uct_ep_get_address(perf->uct.peers[i].ep, ep_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_ep_get_address: %s", ucs_status_string(status));
goto err_destroy_eps;
}
}
} else if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) {
ep_params.field_mask |= UCT_EP_PARAM_FIELD_DEV_ADDR |
UCT_EP_PARAM_FIELD_IFACE_ADDR;
}
vec[0].iov_base = &info;
vec[0].iov_len = sizeof(info);
vec[1].iov_base = buffer;
vec[1].iov_len = info.rkey_size + info.uct.dev_addr_len +
info.uct.iface_addr_len + info.uct.ep_addr_len;
rte_call(perf, post_vec, vec, 2, &req);
rte_call(perf, exchange_vec, req);
for (i = 0; i < group_size; ++i) {
if (i == group_index) {
continue;
}
rte_call(perf, recv, i, buffer, buffer_size, req);
remote_info = buffer;
rkey_buffer = remote_info + 1;
dev_addr = UCS_PTR_BYTE_OFFSET(rkey_buffer, remote_info->rkey_size);
iface_addr = UCS_PTR_BYTE_OFFSET(dev_addr, remote_info->uct.dev_addr_len);
ep_addr = UCS_PTR_BYTE_OFFSET(iface_addr, remote_info->uct.iface_addr_len);
perf->uct.peers[i].remote_addr = remote_info->recv_buffer;
if (!uct_iface_is_reachable(perf->uct.iface, dev_addr,
remote_info->uct.iface_addr_len ?
iface_addr : NULL)) {
ucs_error("Destination is unreachable");
status = UCS_ERR_UNREACHABLE;
goto err_destroy_eps;
}
if (remote_info->rkey_size > 0) {
status = uct_rkey_unpack(perf->uct.cmpt, rkey_buffer,
&perf->uct.peers[i].rkey);
if (status != UCS_OK) {
ucs_error("Failed to uct_rkey_unpack: %s", ucs_status_string(status));
goto err_destroy_eps;
}
} else {
perf->uct.peers[i].rkey.handle = NULL;
perf->uct.peers[i].rkey.rkey = UCT_INVALID_RKEY;
}
if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) {
status = uct_ep_connect_to_ep(perf->uct.peers[i].ep, dev_addr, ep_addr);
} else if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) {
ep_params.dev_addr = dev_addr;
ep_params.iface_addr = iface_addr;
status = uct_ep_create(&ep_params, &perf->uct.peers[i].ep);
} else {
status = UCS_ERR_UNSUPPORTED;
}
if (status != UCS_OK) {
ucs_error("Failed to connect endpoint: %s", ucs_status_string(status));
goto err_destroy_eps;
}
}
uct_perf_iface_flush_b(perf);
free(buffer);
uct_perf_barrier(perf);
return UCS_OK;
err_destroy_eps:
for (i = 0; i < group_size; ++i) {
if (perf->uct.peers[i].rkey.rkey != UCT_INVALID_RKEY) {
uct_rkey_release(perf->uct.cmpt, &perf->uct.peers[i].rkey);
}
if (perf->uct.peers[i].ep != NULL) {
uct_ep_destroy(perf->uct.peers[i].ep);
}
}
free(perf->uct.peers);
err_free:
free(buffer);
err:
return status;
}
static void uct_perf_test_cleanup_endpoints(ucx_perf_context_t *perf)
{
unsigned group_size, group_index, i;
uct_perf_barrier(perf);
uct_iface_set_am_handler(perf->uct.iface, UCT_PERF_TEST_AM_ID, NULL, NULL, 0);
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
for (i = 0; i < group_size; ++i) {
if (i != group_index) {
if (perf->uct.peers[i].rkey.rkey != UCT_INVALID_RKEY) {
uct_rkey_release(perf->uct.cmpt, &perf->uct.peers[i].rkey);
}
if (perf->uct.peers[i].ep) {
uct_ep_destroy(perf->uct.peers[i].ep);
}
}
}
free(perf->uct.peers);
}
static ucs_status_t ucp_perf_test_fill_params(ucx_perf_params_t *params,
ucp_params_t *ucp_params)
{
ucs_status_t status;
size_t message_size;
message_size = ucx_perf_get_message_size(params);
switch (params->command) {
case UCX_PERF_CMD_PUT:
case UCX_PERF_CMD_GET:
ucp_params->features |= UCP_FEATURE_RMA;
break;
case UCX_PERF_CMD_ADD:
case UCX_PERF_CMD_FADD:
case UCX_PERF_CMD_SWAP:
case UCX_PERF_CMD_CSWAP:
if (message_size == sizeof(uint32_t)) {
ucp_params->features |= UCP_FEATURE_AMO32;
} else if (message_size == sizeof(uint64_t)) {
ucp_params->features |= UCP_FEATURE_AMO64;
} else {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Atomic size should be either 32 or 64 bit");
}
return UCS_ERR_INVALID_PARAM;
}
break;
case UCX_PERF_CMD_TAG:
case UCX_PERF_CMD_TAG_SYNC:
ucp_params->features |= UCP_FEATURE_TAG;
break;
case UCX_PERF_CMD_STREAM:
ucp_params->features |= UCP_FEATURE_STREAM;
break;
default:
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Invalid test command");
}
return UCS_ERR_INVALID_PARAM;
}
if (params->flags & UCX_PERF_TEST_FLAG_WAKEUP) {
ucp_params->features |= UCP_FEATURE_WAKEUP;
}
status = ucx_perf_test_check_params(params);
if (status != UCS_OK) {
return status;
}
return UCS_OK;
}
static ucs_status_t ucp_perf_test_alloc_iov_mem(ucp_perf_datatype_t datatype,
size_t iovcnt, unsigned thread_count,
ucp_dt_iov_t **iov_p)
{
ucp_dt_iov_t *iov;
if (UCP_PERF_DATATYPE_IOV == datatype) {
iov = malloc(sizeof(*iov) * iovcnt * thread_count);
if (NULL == iov) {
ucs_error("Failed allocate IOV buffer with iovcnt=%lu", iovcnt);
return UCS_ERR_NO_MEMORY;
}
*iov_p = iov;
}
return UCS_OK;
}
static ucs_status_t
ucp_perf_test_alloc_host(const ucx_perf_context_t *perf, size_t length,
void **address_p, ucp_mem_h *memh, int non_blk_flag)
{
ucp_mem_map_params_t mem_map_params;
ucp_mem_attr_t mem_attr;
ucs_status_t status;
mem_map_params.field_mask = UCP_MEM_MAP_PARAM_FIELD_ADDRESS |
UCP_MEM_MAP_PARAM_FIELD_LENGTH |
UCP_MEM_MAP_PARAM_FIELD_FLAGS;
mem_map_params.address = *address_p;
mem_map_params.length = length;
mem_map_params.flags = UCP_MEM_MAP_ALLOCATE;
if (perf->params.flags & UCX_PERF_TEST_FLAG_MAP_NONBLOCK) {
mem_map_params.flags |= non_blk_flag;
}
status = ucp_mem_map(perf->ucp.context, &mem_map_params, memh);
if (status != UCS_OK) {
goto err;
}
mem_attr.field_mask = UCP_MEM_ATTR_FIELD_ADDRESS;
status = ucp_mem_query(*memh, &mem_attr);
if (status != UCS_OK) {
goto err;
}
*address_p = mem_attr.address;
return UCS_OK;
err:
return status;
}
static void ucp_perf_test_free_host(const ucx_perf_context_t *perf,
void *address, ucp_mem_h memh)
{
ucs_status_t status;
status = ucp_mem_unmap(perf->ucp.context, memh);
if (status != UCS_OK) {
ucs_warn("ucp_mem_unmap() failed: %s", ucs_status_string(status));
}
}
static ucs_status_t ucp_perf_test_alloc_mem(ucx_perf_context_t *perf)
{
ucx_perf_params_t *params = &perf->params;
ucs_status_t status;
size_t buffer_size;
if (params->iov_stride) {
buffer_size = params->msg_size_cnt * params->iov_stride;
} else {
buffer_size = ucx_perf_get_message_size(params);
}
/* Allocate send buffer memory */
perf->send_buffer = NULL;
status = perf->allocator->ucp_alloc(perf, buffer_size * params->thread_count,
&perf->send_buffer, &perf->ucp.send_memh,
UCP_MEM_MAP_NONBLOCK);
if (status != UCS_OK) {
goto err;
}
/* Allocate receive buffer memory */
perf->recv_buffer = NULL;
status = perf->allocator->ucp_alloc(perf, buffer_size * params->thread_count,
&perf->recv_buffer, &perf->ucp.recv_memh,
0);
if (status != UCS_OK) {
goto err_free_send_buffer;
}
/* Allocate IOV datatype memory */
perf->ucp.send_iov = NULL;
status = ucp_perf_test_alloc_iov_mem(params->ucp.send_datatype,
perf->params.msg_size_cnt,
params->thread_count,
&perf->ucp.send_iov);
if (UCS_OK != status) {
goto err_free_buffers;
}
perf->ucp.recv_iov = NULL;
status = ucp_perf_test_alloc_iov_mem(params->ucp.recv_datatype,
perf->params.msg_size_cnt,
params->thread_count,
&perf->ucp.recv_iov);
if (UCS_OK != status) {
goto err_free_send_iov_buffers;
}
return UCS_OK;
err_free_send_iov_buffers:
free(perf->ucp.send_iov);
err_free_buffers:
perf->allocator->ucp_free(perf, perf->recv_buffer, perf->ucp.recv_memh);
err_free_send_buffer:
perf->allocator->ucp_free(perf, perf->send_buffer, perf->ucp.send_memh);
err:
return UCS_ERR_NO_MEMORY;
}
static void ucp_perf_test_free_mem(ucx_perf_context_t *perf)
{
free(perf->ucp.recv_iov);
free(perf->ucp.send_iov);
perf->allocator->ucp_free(perf, perf->recv_buffer, perf->ucp.recv_memh);
perf->allocator->ucp_free(perf, perf->send_buffer, perf->ucp.send_memh);
}
static void ucp_perf_test_destroy_eps(ucx_perf_context_t* perf)
{
unsigned i, thread_count = perf->params.thread_count;
ucs_status_ptr_t *req;
ucs_status_t status;
for (i = 0; i < thread_count; ++i) {
if (perf->ucp.tctx[i].perf.ucp.rkey != NULL) {
ucp_rkey_destroy(perf->ucp.tctx[i].perf.ucp.rkey);
}
if (perf->ucp.tctx[i].perf.ucp.ep != NULL) {
req = ucp_ep_close_nb(perf->ucp.tctx[i].perf.ucp.ep,
UCP_EP_CLOSE_MODE_FLUSH);
if (UCS_PTR_IS_PTR(req)) {
do {
ucp_worker_progress(perf->ucp.tctx[i].perf.ucp.worker);
status = ucp_request_check_status(req);
} while (status == UCS_INPROGRESS);
ucp_request_release(req);
} else if (UCS_PTR_STATUS(req) != UCS_OK) {
ucs_warn("failed to close ep %p on thread %d: %s\n",
perf->ucp.tctx[i].perf.ucp.ep, i,
ucs_status_string(UCS_PTR_STATUS(req)));
}
}
}
}
static ucs_status_t ucp_perf_test_exchange_status(ucx_perf_context_t *perf,
ucs_status_t status)
{
unsigned group_size = rte_call(perf, group_size);
ucs_status_t collective_status = status;
struct iovec vec;
void *req = NULL;
unsigned i;
vec.iov_base = &status;
vec.iov_len = sizeof(status);
rte_call(perf, post_vec, &vec, 1, &req);
rte_call(perf, exchange_vec, req);
for (i = 0; i < group_size; ++i) {
rte_call(perf, recv, i, &status, sizeof(status), req);
if (status != UCS_OK) {
collective_status = status;
}
}
return collective_status;
}
static ucs_status_t ucp_perf_test_receive_remote_data(ucx_perf_context_t *perf)
{
unsigned thread_count = perf->params.thread_count;
void *rkey_buffer = NULL;
void *req = NULL;
unsigned group_size, group_index, i;
ucx_perf_ep_info_t *remote_info;
ucp_ep_params_t ep_params;
ucp_address_t *address;
ucs_status_t status;
size_t buffer_size;
void *buffer;
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
if (group_size != 2) {
ucs_error("perftest requires group size to be exactly 2 "
"(actual group size: %u)", group_size);
return UCS_ERR_UNSUPPORTED;
}
buffer_size = ADDR_BUF_SIZE * thread_count;
buffer = malloc(buffer_size);
if (buffer == NULL) {
ucs_error("failed to allocate RTE receive buffer");
status = UCS_ERR_NO_MEMORY;
goto err;
}
/* Initialize all endpoints and rkeys to NULL to handle error flow */
for (i = 0; i < thread_count; i++) {
perf->ucp.tctx[i].perf.ucp.ep = NULL;
perf->ucp.tctx[i].perf.ucp.rkey = NULL;
}
/* receive the data from the remote peer, extract the address from it
* (along with additional wireup info) and create an endpoint to the peer */
rte_call(perf, recv, 1 - group_index, buffer, buffer_size, req);
remote_info = buffer;
for (i = 0; i < thread_count; i++) {
address = (ucp_address_t*)(remote_info + 1);
rkey_buffer = UCS_PTR_BYTE_OFFSET(address,
remote_info->ucp.worker_addr_len);
perf->ucp.tctx[i].perf.ucp.remote_addr = remote_info->recv_buffer;
ep_params.field_mask = UCP_EP_PARAM_FIELD_REMOTE_ADDRESS;
ep_params.address = address;
status = ucp_ep_create(perf->ucp.tctx[i].perf.ucp.worker, &ep_params,
&perf->ucp.tctx[i].perf.ucp.ep);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_ep_create() failed: %s", ucs_status_string(status));
}
goto err_free_eps_buffer;
}
if (remote_info->rkey_size > 0) {
status = ucp_ep_rkey_unpack(perf->ucp.tctx[i].perf.ucp.ep, rkey_buffer,
&perf->ucp.tctx[i].perf.ucp.rkey);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_fatal("ucp_rkey_unpack() failed: %s", ucs_status_string(status));
}
goto err_free_eps_buffer;
}
} else {
perf->ucp.tctx[i].perf.ucp.rkey = NULL;
}
remote_info = UCS_PTR_BYTE_OFFSET(remote_info,
remote_info->ucp.total_wireup_len);
}
free(buffer);
return UCS_OK;
err_free_eps_buffer:
ucp_perf_test_destroy_eps(perf);
free(buffer);
err:
return status;
}
static ucs_status_t ucp_perf_test_send_local_data(ucx_perf_context_t *perf,
uint64_t features)
{
unsigned i, j, thread_count = perf->params.thread_count;
size_t address_length = 0;
void *rkey_buffer = NULL;
void *req = NULL;
ucx_perf_ep_info_t *info;
ucp_address_t *address;
ucs_status_t status;
struct iovec *vec;
size_t rkey_size;
if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) {
status = ucp_rkey_pack(perf->ucp.context, perf->ucp.recv_memh,
&rkey_buffer, &rkey_size);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_rkey_pack() failed: %s", ucs_status_string(status));
}
goto err;
}
} else {
rkey_size = 0;
}
/* each thread has an iovec with 3 entries to send to the remote peer:
* ep_info, worker_address and rkey buffer */
vec = calloc(3 * thread_count, sizeof(struct iovec));
if (vec == NULL) {
ucs_error("failed to allocate iovec");
status = UCS_ERR_NO_MEMORY;
goto err_rkey_release;
}
/* get the worker address created for every thread and send it to the remote
* peer */
for (i = 0; i < thread_count; i++) {
status = ucp_worker_get_address(perf->ucp.tctx[i].perf.ucp.worker,
&address, &address_length);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_worker_get_address() failed: %s",
ucs_status_string(status));
}
goto err_free_workers_vec;
}
vec[i * 3].iov_base = malloc(sizeof(*info));
if (vec[i * 3].iov_base == NULL) {
ucs_error("failed to allocate vec entry for info");
status = UCS_ERR_NO_MEMORY;
ucp_worker_destroy(perf->ucp.tctx[i].perf.ucp.worker);
goto err_free_workers_vec;
}
info = vec[i * 3].iov_base;
info->ucp.worker_addr_len = address_length;
info->ucp.total_wireup_len = sizeof(*info) + address_length + rkey_size;
info->rkey_size = rkey_size;
info->recv_buffer = (uintptr_t)perf->ucp.tctx[i].perf.recv_buffer;
vec[(i * 3) + 0].iov_len = sizeof(*info);
vec[(i * 3) + 1].iov_base = address;
vec[(i * 3) + 1].iov_len = address_length;
vec[(i * 3) + 2].iov_base = rkey_buffer;
vec[(i * 3) + 2].iov_len = info->rkey_size;
address_length = 0;
}
/* send to the remote peer */
rte_call(perf, post_vec, vec, 3 * thread_count, &req);
rte_call(perf, exchange_vec, req);
if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) {
ucp_rkey_buffer_release(rkey_buffer);
}
for (i = 0; i < thread_count; i++) {
free(vec[i * 3].iov_base);
ucp_worker_release_address(perf->ucp.tctx[i].perf.ucp.worker,
vec[(i * 3) + 1].iov_base);
}
free(vec);
return UCS_OK;
err_free_workers_vec:
for (j = 0; j < i; j++) {
ucp_worker_destroy(perf->ucp.tctx[i].perf.ucp.worker);
}
free(vec);
err_rkey_release:
if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) {
ucp_rkey_buffer_release(rkey_buffer);
}
err:
return status;
}
static ucs_status_t ucp_perf_test_setup_endpoints(ucx_perf_context_t *perf,
uint64_t features)
{
ucs_status_t status;
unsigned i;
/* pack the local endpoints data and send to the remote peer */
status = ucp_perf_test_send_local_data(perf, features);
if (status != UCS_OK) {
goto err;
}
/* receive remote peer's endpoints' data and connect to them */
status = ucp_perf_test_receive_remote_data(perf);
if (status != UCS_OK) {
goto err;
}
/* sync status across all processes */
status = ucp_perf_test_exchange_status(perf, UCS_OK);
if (status != UCS_OK) {
goto err_destroy_eps;
}
/* force wireup completion */
for (i = 0; i < perf->params.thread_count; i++) {
status = ucp_worker_flush(perf->ucp.tctx[i].perf.ucp.worker);
if (status != UCS_OK) {
ucs_warn("ucp_worker_flush() failed on theread %d: %s",
i, ucs_status_string(status));
}
}
return status;
err_destroy_eps:
ucp_perf_test_destroy_eps(perf);
err:
(void)ucp_perf_test_exchange_status(perf, status);
return status;
}
static void ucp_perf_test_cleanup_endpoints(ucx_perf_context_t *perf)
{
ucp_perf_barrier(perf);
ucp_perf_test_destroy_eps(perf);
}
static void ucp_perf_test_destroy_workers(ucx_perf_context_t *perf)
{
unsigned i;
for (i = 0; i < perf->params.thread_count; i++) {
if (perf->ucp.tctx[i].perf.ucp.worker != NULL) {
ucp_worker_destroy(perf->ucp.tctx[i].perf.ucp.worker);
}
}
}
static void ucx_perf_set_warmup(ucx_perf_context_t* perf,
const ucx_perf_params_t* params)
{
perf->max_iter = ucs_min(params->warmup_iter,
ucs_div_round_up(params->max_iter, 10));
perf->report_interval = ULONG_MAX;
}
static ucs_status_t uct_perf_create_md(ucx_perf_context_t *perf)
{
uct_component_h *uct_components;
uct_component_attr_t component_attr;
uct_tl_resource_desc_t *tl_resources;
unsigned md_index, num_components;
unsigned tl_index, num_tl_resources;
unsigned cmpt_index;
ucs_status_t status;
uct_md_h md;
uct_md_config_t *md_config;
status = uct_query_components(&uct_components, &num_components);
if (status != UCS_OK) {
goto out;
}
for (cmpt_index = 0; cmpt_index < num_components; ++cmpt_index) {
component_attr.field_mask = UCT_COMPONENT_ATTR_FIELD_MD_RESOURCE_COUNT;
status = uct_component_query(uct_components[cmpt_index], &component_attr);
if (status != UCS_OK) {
goto out_release_components_list;
}
component_attr.field_mask = UCT_COMPONENT_ATTR_FIELD_MD_RESOURCES;
component_attr.md_resources = alloca(sizeof(*component_attr.md_resources) *
component_attr.md_resource_count);
status = uct_component_query(uct_components[cmpt_index], &component_attr);
if (status != UCS_OK) {
goto out_release_components_list;
}
for (md_index = 0; md_index < component_attr.md_resource_count; ++md_index) {
status = uct_md_config_read(uct_components[cmpt_index], NULL, NULL,
&md_config);
if (status != UCS_OK) {
goto out_release_components_list;
}
ucs_strncpy_zero(perf->params.uct.md_name,
component_attr.md_resources[md_index].md_name,
UCT_MD_NAME_MAX);
status = uct_md_open(uct_components[cmpt_index],
component_attr.md_resources[md_index].md_name,
md_config, &md);
uct_config_release(md_config);
if (status != UCS_OK) {
goto out_release_components_list;
}
status = uct_md_query_tl_resources(md, &tl_resources, &num_tl_resources);
if (status != UCS_OK) {
uct_md_close(md);
goto out_release_components_list;
}
for (tl_index = 0; tl_index < num_tl_resources; ++tl_index) {
if (!strcmp(perf->params.uct.tl_name, tl_resources[tl_index].tl_name) &&
!strcmp(perf->params.uct.dev_name, tl_resources[tl_index].dev_name))
{
uct_release_tl_resource_list(tl_resources);
perf->uct.cmpt = uct_components[cmpt_index];
perf->uct.md = md;
status = UCS_OK;
goto out_release_components_list;
}
}
uct_md_close(md);
uct_release_tl_resource_list(tl_resources);
}
}
ucs_error("Cannot use "UCT_PERF_TEST_PARAMS_FMT,
UCT_PERF_TEST_PARAMS_ARG(&perf->params));
status = UCS_ERR_NO_DEVICE;
out_release_components_list:
uct_release_component_list(uct_components);
out:
return status;
}
void uct_perf_barrier(ucx_perf_context_t *perf)
{
rte_call(perf, barrier, (void(*)(void*))uct_worker_progress,
(void*)perf->uct.worker);
}
void ucp_perf_barrier(ucx_perf_context_t *perf)
{
rte_call(perf, barrier, (void(*)(void*))ucp_worker_progress,
#if _OPENMP
(void*)perf->ucp.tctx[omp_get_thread_num()].perf.ucp.worker);
#else
(void*)perf->ucp.tctx[0].perf.ucp.worker);
#endif
}
static ucs_status_t uct_perf_setup(ucx_perf_context_t *perf)
{
ucx_perf_params_t *params = &perf->params;
uct_iface_config_t *iface_config;
ucs_status_t status;
uct_iface_params_t iface_params = {
.field_mask = UCT_IFACE_PARAM_FIELD_OPEN_MODE |
UCT_IFACE_PARAM_FIELD_STATS_ROOT |
UCT_IFACE_PARAM_FIELD_RX_HEADROOM |
UCT_IFACE_PARAM_FIELD_CPU_MASK,
.open_mode = UCT_IFACE_OPEN_MODE_DEVICE,
.mode.device.tl_name = params->uct.tl_name,
.mode.device.dev_name = params->uct.dev_name,
.stats_root = ucs_stats_get_root(),
.rx_headroom = 0
};
UCS_CPU_ZERO(&iface_params.cpu_mask);
status = ucs_async_context_init(&perf->uct.async, params->async_mode);
if (status != UCS_OK) {
goto out;
}
status = uct_worker_create(&perf->uct.async, params->thread_mode,
&perf->uct.worker);
if (status != UCS_OK) {
goto out_cleanup_async;
}
status = uct_perf_create_md(perf);
if (status != UCS_OK) {
goto out_destroy_worker;
}
status = uct_md_iface_config_read(perf->uct.md, params->uct.tl_name, NULL,
NULL, &iface_config);
if (status != UCS_OK) {
goto out_destroy_md;
}
status = uct_iface_open(perf->uct.md, perf->uct.worker, &iface_params,
iface_config, &perf->uct.iface);
uct_config_release(iface_config);
if (status != UCS_OK) {
ucs_error("Failed to open iface: %s", ucs_status_string(status));
goto out_destroy_md;
}
status = uct_perf_test_check_capabilities(params, perf->uct.iface,
perf->uct.md);
/* sync status across all processes */
status = ucp_perf_test_exchange_status(perf, status);
if (status != UCS_OK) {
goto out_iface_close;
}
status = uct_perf_test_alloc_mem(perf);
if (status != UCS_OK) {
goto out_iface_close;
}
/* Enable progress before `uct_iface_flush` and `uct_worker_progress` called
* to give a chance to finish connection for some tranports (ib/ud, tcp).
* They may return UCS_INPROGRESS from `uct_iface_flush` when connections are
* in progress */
uct_iface_progress_enable(perf->uct.iface,
UCT_PROGRESS_SEND | UCT_PROGRESS_RECV);
status = uct_perf_test_setup_endpoints(perf);
if (status != UCS_OK) {
ucs_error("Failed to setup endpoints: %s", ucs_status_string(status));
goto out_free_mem;
}
return UCS_OK;
out_free_mem:
uct_perf_test_free_mem(perf);
out_iface_close:
uct_iface_close(perf->uct.iface);
out_destroy_md:
uct_md_close(perf->uct.md);
out_destroy_worker:
uct_worker_destroy(perf->uct.worker);
out_cleanup_async:
ucs_async_context_cleanup(&perf->uct.async);
out:
return status;
}
static void uct_perf_cleanup(ucx_perf_context_t *perf)
{
uct_perf_test_cleanup_endpoints(perf);
uct_perf_test_free_mem(perf);
uct_iface_close(perf->uct.iface);
uct_md_close(perf->uct.md);
uct_worker_destroy(perf->uct.worker);
ucs_async_context_cleanup(&perf->uct.async);
}
static void ucp_perf_request_init(void *req)
{
ucp_perf_request_t *request = req;
request->context = NULL;
}
static ucs_status_t ucp_perf_setup(ucx_perf_context_t *perf)
{
ucp_params_t ucp_params;
ucp_worker_params_t worker_params;
ucp_config_t *config;
ucs_status_t status;
unsigned i, thread_count;
size_t message_size;
ucp_params.field_mask = UCP_PARAM_FIELD_FEATURES |
UCP_PARAM_FIELD_REQUEST_SIZE |
UCP_PARAM_FIELD_REQUEST_INIT;
ucp_params.features = 0;
ucp_params.request_size = sizeof(ucp_perf_request_t);
ucp_params.request_init = ucp_perf_request_init;
if (perf->params.thread_count > 1) {
/* when there is more than one thread, a ucp_worker would be created for
* each. all of them will share the same ucp_context */
ucp_params.features |= UCP_PARAM_FIELD_MT_WORKERS_SHARED;
ucp_params.mt_workers_shared = 1;
}
status = ucp_perf_test_fill_params(&perf->params, &ucp_params);
if (status != UCS_OK) {
goto err;
}
status = ucp_config_read(NULL, NULL, &config);
if (status != UCS_OK) {
goto err;
}
status = ucp_init(&ucp_params, config, &perf->ucp.context);
ucp_config_release(config);
if (status != UCS_OK) {
goto err;
}
thread_count = perf->params.thread_count;
message_size = ucx_perf_get_message_size(&perf->params);
status = ucp_perf_test_alloc_mem(perf);
if (status != UCS_OK) {
ucs_warn("ucp test failed to allocate memory");
goto err_cleanup;
}
perf->ucp.tctx = calloc(thread_count, sizeof(ucx_perf_thread_context_t));
if (perf->ucp.tctx == NULL) {
ucs_warn("ucp test failed to allocate memory for thread contexts");
goto err_free_mem;
}
worker_params.field_mask = UCP_WORKER_PARAM_FIELD_THREAD_MODE;
worker_params.thread_mode = perf->params.thread_mode;
for (i = 0; i < thread_count; i++) {
perf->ucp.tctx[i].tid = i;
perf->ucp.tctx[i].perf = *perf;
/* Doctor the src and dst buffers to make them thread specific */
perf->ucp.tctx[i].perf.send_buffer =
UCS_PTR_BYTE_OFFSET(perf->send_buffer, i * message_size);
perf->ucp.tctx[i].perf.recv_buffer =
UCS_PTR_BYTE_OFFSET(perf->recv_buffer, i * message_size);
status = ucp_worker_create(perf->ucp.context, &worker_params,
&perf->ucp.tctx[i].perf.ucp.worker);
if (status != UCS_OK) {
goto err_free_tctx_destroy_workers;
}
}
status = ucp_perf_test_setup_endpoints(perf, ucp_params.features);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Failed to setup endpoints: %s", ucs_status_string(status));
}
goto err_free_tctx_destroy_workers;
}
return UCS_OK;
err_free_tctx_destroy_workers:
ucp_perf_test_destroy_workers(perf);
free(perf->ucp.tctx);
err_free_mem:
ucp_perf_test_free_mem(perf);
err_cleanup:
ucp_cleanup(perf->ucp.context);
err:
return status;
}
static void ucp_perf_cleanup(ucx_perf_context_t *perf)
{
ucp_perf_test_cleanup_endpoints(perf);
ucp_perf_barrier(perf);
ucp_perf_test_free_mem(perf);
ucp_perf_test_destroy_workers(perf);
free(perf->ucp.tctx);
ucp_cleanup(perf->ucp.context);
}
static struct {
ucs_status_t (*setup)(ucx_perf_context_t *perf);
void (*cleanup)(ucx_perf_context_t *perf);
ucs_status_t (*run)(ucx_perf_context_t *perf);
void (*barrier)(ucx_perf_context_t *perf);
} ucx_perf_funcs[] = {
[UCX_PERF_API_UCT] = {uct_perf_setup, uct_perf_cleanup,
uct_perf_test_dispatch, uct_perf_barrier},
[UCX_PERF_API_UCP] = {ucp_perf_setup, ucp_perf_cleanup,
ucp_perf_test_dispatch, ucp_perf_barrier}
};
static ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result);
ucs_status_t ucx_perf_run(const ucx_perf_params_t *params,
ucx_perf_result_t *result)
{
ucx_perf_context_t *perf;
ucs_status_t status;
ucx_perf_global_init();
if (params->command == UCX_PERF_CMD_LAST) {
ucs_error("Test is not selected");
status = UCS_ERR_INVALID_PARAM;
goto out;
}
if ((params->api != UCX_PERF_API_UCT) && (params->api != UCX_PERF_API_UCP)) {
ucs_error("Invalid test API parameter (should be UCT or UCP)");
status = UCS_ERR_INVALID_PARAM;
goto out;
}
perf = malloc(sizeof(*perf));
if (perf == NULL) {
status = UCS_ERR_NO_MEMORY;
goto out;
}
ucx_perf_test_init(perf, params);
if (perf->allocator == NULL) {
ucs_error("Unsupported memory types %s<->%s",
ucs_memory_type_names[params->send_mem_type],
ucs_memory_type_names[params->recv_mem_type]);
status = UCS_ERR_UNSUPPORTED;
goto out_free;
}
if ((params->api == UCX_PERF_API_UCT) &&
(perf->allocator->mem_type != UCS_MEMORY_TYPE_HOST)) {
ucs_warn("UCT tests also copy 2-byte values from %s memory to "
"%s memory, which may impact performance results",
ucs_memory_type_names[perf->allocator->mem_type],
ucs_memory_type_names[UCS_MEMORY_TYPE_HOST]);
}
status = perf->allocator->init(perf);
if (status != UCS_OK) {
goto out_free;
}
status = ucx_perf_funcs[params->api].setup(perf);
if (status != UCS_OK) {
goto out_free;
}
if (params->thread_count == 1) {
if (params->api == UCX_PERF_API_UCP) {
perf->ucp.worker = perf->ucp.tctx[0].perf.ucp.worker;
perf->ucp.ep = perf->ucp.tctx[0].perf.ucp.ep;
perf->ucp.remote_addr = perf->ucp.tctx[0].perf.ucp.remote_addr;
perf->ucp.rkey = perf->ucp.tctx[0].perf.ucp.rkey;
}
if (params->warmup_iter > 0) {
ucx_perf_set_warmup(perf, params);
status = ucx_perf_funcs[params->api].run(perf);
if (status != UCS_OK) {
goto out_cleanup;
}
ucx_perf_funcs[params->api].barrier(perf);
ucx_perf_test_prepare_new_run(perf, params);
}
/* Run test */
status = ucx_perf_funcs[params->api].run(perf);
ucx_perf_funcs[params->api].barrier(perf);
if (status == UCS_OK) {
ucx_perf_calc_result(perf, result);
rte_call(perf, report, result, perf->params.report_arg, 1, 0);
}
} else {
status = ucx_perf_thread_spawn(perf, result);
}
out_cleanup:
ucx_perf_funcs[params->api].cleanup(perf);
out_free:
free(perf);
out:
return status;
}
#if _OPENMP
static ucs_status_t ucx_perf_thread_run_test(void* arg)
{
ucx_perf_thread_context_t* tctx = (ucx_perf_thread_context_t*) arg; /* a single thread context */
ucx_perf_result_t* result = &tctx->result;
ucx_perf_context_t* perf = &tctx->perf;
ucx_perf_params_t* params = &perf->params;
ucs_status_t status;
if (params->warmup_iter > 0) {
ucx_perf_set_warmup(perf, params);
status = ucx_perf_funcs[params->api].run(perf);
ucx_perf_funcs[params->api].barrier(perf);
if (UCS_OK != status) {
goto out;
}
ucx_perf_test_prepare_new_run(perf, params);
}
/* Run test */
#pragma omp barrier
status = ucx_perf_funcs[params->api].run(perf);
ucx_perf_funcs[params->api].barrier(perf);
if (UCS_OK != status) {
goto out;
}
ucx_perf_calc_result(perf, result);
out:
return status;
}
static void ucx_perf_thread_report_aggregated_results(ucx_perf_context_t *perf)
{
ucx_perf_thread_context_t* tctx = perf->ucp.tctx; /* all the thread contexts on perf */
unsigned i, thread_count = perf->params.thread_count;
double lat_sum_total_avegare = 0.0;
ucx_perf_result_t agg_result;
agg_result.iters = tctx[0].result.iters;
agg_result.bytes = tctx[0].result.bytes;
agg_result.elapsed_time = tctx[0].result.elapsed_time;
agg_result.bandwidth.total_average = 0.0;
agg_result.bandwidth.typical = 0.0; /* Undefined since used only for latency calculations */
agg_result.latency.total_average = 0.0;
agg_result.msgrate.total_average = 0.0;
agg_result.msgrate.typical = 0.0; /* Undefined since used only for latency calculations */
/* when running with multiple threads, the moment average value is
* undefined since we don't capture the values of the last iteration */
agg_result.msgrate.moment_average = 0.0;
agg_result.bandwidth.moment_average = 0.0;
agg_result.latency.moment_average = 0.0;
agg_result.latency.typical = 0.0;
/* in case of multiple threads, we have to aggregate the results so that the
* final output of the result would show the performance numbers that were
* collected from all the threads.
* BW and message rate values will be the sum of their values from all
* the threads, while the latency value is the average latency from the
* threads. */
for (i = 0; i < thread_count; i++) {
agg_result.bandwidth.total_average += tctx[i].result.bandwidth.total_average;
agg_result.msgrate.total_average += tctx[i].result.msgrate.total_average;
lat_sum_total_avegare += tctx[i].result.latency.total_average;
}
agg_result.latency.total_average = lat_sum_total_avegare / thread_count;
rte_call(perf, report, &agg_result, perf->params.report_arg, 1, 1);
}
static ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result)
{
ucx_perf_thread_context_t* tctx = perf->ucp.tctx; /* all the thread contexts on perf */
int ti, thread_count = perf->params.thread_count;
ucs_status_t* statuses;
ucs_status_t status;
omp_set_num_threads(thread_count);
statuses = calloc(thread_count, sizeof(ucs_status_t));
if (statuses == NULL) {
status = UCS_ERR_NO_MEMORY;
goto out;
}
#pragma omp parallel private(ti)
{
ti = omp_get_thread_num();
tctx[ti].status = ucx_perf_thread_run_test((void*)&tctx[ti]);
}
status = UCS_OK;
for (ti = 0; ti < thread_count; ti++) {
if (UCS_OK != tctx[ti].status) {
ucs_error("Thread %d failed to run test: %s", tctx[ti].tid,
ucs_status_string(tctx[ti].status));
status = tctx[ti].status;
}
}
ucx_perf_thread_report_aggregated_results(perf);
free(statuses);
out:
return status;
}
#else
static ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result) {
ucs_error("Invalid test parameter (thread mode requested without OpenMP capabilities)");
return UCS_ERR_INVALID_PARAM;
}
#endif /* _OPENMP */
void ucx_perf_global_init()
{
static ucx_perf_allocator_t host_allocator = {
.mem_type = UCS_MEMORY_TYPE_HOST,
.init = ucs_empty_function_return_success,
.ucp_alloc = ucp_perf_test_alloc_host,
.ucp_free = ucp_perf_test_free_host,
.uct_alloc = uct_perf_test_alloc_host,
.uct_free = uct_perf_test_free_host,
.memcpy = ucx_perf_test_memcpy_host,
.memset = memset
};
UCS_MODULE_FRAMEWORK_DECLARE(ucx_perftest);
ucx_perf_mem_type_allocators[UCS_MEMORY_TYPE_HOST] = &host_allocator;
/* FIXME Memtype allocator modules must be loaded to global scope, otherwise
* alloc hooks, which are using dlsym() to get pointer to original function,
* do not work. Need to use bistro for memtype hooks to fix it.
*/
UCS_MODULE_FRAMEWORK_LOAD(ucx_perftest, UCS_MODULE_LOAD_FLAG_GLOBAL);
}
|
threadpool.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <string>
#include <vector>
#include <functional>
#include <memory>
#if defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#else
#pragma warning(push)
#pragma warning(disable : 4267)
#endif
#include <unsupported/Eigen/CXX11/ThreadPool>
#if defined(__GNUC__)
#pragma GCC diagnostic pop
#else
#pragma warning(pop)
#endif
namespace onnxruntime {
namespace concurrency {
/**
* Generic class for instantiating thread pools.
* Don't put any object of this type into a global variable in a Win32 DLL.
*/
class ThreadPool {
public:
/*
Initializes a thread pool given the current environment.
*/
ThreadPool(const std::string& name, int num_threads);
/*
Enqueue a unit of work.
*/
void Schedule(std::function<void()> fn);
/*
Schedule work in the interval [0, total).
*/
void ParallelFor(int32_t total, std::function<void(int32_t)> fn);
/*
Schedule work in the interval [0, total), with calls split into (num_batches) batches.
*/
void BatchParallelFor(int32_t total, std::function<void(int32_t)> fn, int32_t num_batches = 0);
/*
Schedule work in the interval [first, last].
*/
void ParallelForRange(int64_t first, int64_t last, std::function<void(int64_t, int64_t)> fn);
// This is not supported until the latest Eigen
// void SetStealPartitions(const std::vector<std::pair<unsigned, unsigned>>& partitions);
/**
Tries to call the given function in parallel, with calls split into (num_batches) batches.
**/
template <typename F>
inline static void TryBatchParallelFor(concurrency::ThreadPool* tp, int32_t total, F&& fn, int32_t num_batches = 0) {
if (tp != nullptr) {
if (num_batches <= 0) {
num_batches = tp->NumThreads() + 1;
}
tp->BatchParallelFor(total, std::forward<F>(fn), num_batches);
} else {
#ifdef USE_OPENMP
#pragma omp parallel for
#endif
for (int32_t i = 0; i < total; ++i) {
fn(i);
}
}
}
/**
Tries to call the given function in parallel.
**/
template <typename F>
inline static void TryParallelFor(concurrency::ThreadPool* tp, int32_t total, F&& fn) {
if (tp != nullptr) {
tp->ParallelFor(total, std::forward<F>(fn));
} else {
#ifdef USE_OPENMP
#pragma omp parallel for
#endif
for (int32_t i = 0; i < total; ++i) {
fn(i);
}
}
}
int NumThreads() const;
int CurrentThreadId() const;
Eigen::ThreadPool& GetHandler() { return impl_; }
private:
Eigen::ThreadPool impl_;
};
} // namespace concurrency
} // namespace onnxruntime
|
thread_thread_threadid.c | // RUN: %libomp-compile-and-run
// REQUIRES: abt
#include "omp_testsuite.h"
#include <string.h>
#include <stdio.h>
int test_thread_thread_threadid(int num_threads) {
int i, vals[num_threads];
memset(vals, 0, sizeof(int) * num_threads);
omp_set_max_active_levels(2);
#pragma omp parallel for num_threads(num_threads)
for (i = 0; i < num_threads; i++) {
int omp_thread_id = omp_get_thread_num();
ABT_thread abt_thread;
ABT_EXIT_IF_FAIL(ABT_thread_self(&abt_thread));
int local_vals[num_threads];
memset(local_vals, 0, sizeof(int) * num_threads);
int j;
#pragma omp parallel for num_threads(num_threads)
for (j = 0; j < num_threads; j++) {
int l2_omp_thread_id = omp_get_thread_num();
ABT_thread l2_abt_thread;
ABT_EXIT_IF_FAIL(ABT_thread_self(&l2_abt_thread));
// Context switching in OpenMP.
#pragma omp taskyield
int l2_omp_thread_id2 = omp_get_thread_num();
if (l2_omp_thread_id == l2_omp_thread_id2) {
local_vals[j] += 1;
}
ABT_thread l2_abt_thread2;
ABT_EXIT_IF_FAIL(ABT_thread_self(&l2_abt_thread2));
ABT_bool l2_abt_thread_equal;
ABT_EXIT_IF_FAIL(ABT_thread_equal(l2_abt_thread, l2_abt_thread2,
&l2_abt_thread_equal));
if (l2_abt_thread_equal == ABT_TRUE) {
local_vals[j] += 2;
}
// Context switching in Argobots.
ABT_EXIT_IF_FAIL(ABT_thread_yield());
int l2_omp_thread_id3 = omp_get_thread_num();
if (l2_omp_thread_id2 == l2_omp_thread_id3) {
local_vals[j] += 4;
}
}
// Check child threads.
int child_fail = 0;
for (j = 0; j < num_threads; j++) {
if (local_vals[i] != 7) {
child_fail = 1;
}
}
if (!child_fail) {
vals[i] += 1;
}
int omp_thread_id2 = omp_get_thread_num();
if (omp_thread_id == omp_thread_id2) {
vals[i] += 2;
}
ABT_thread abt_thread2;
ABT_EXIT_IF_FAIL(ABT_thread_self(&abt_thread2));
ABT_bool abt_thread_equal;
ABT_EXIT_IF_FAIL(ABT_thread_equal(abt_thread, abt_thread2,
&abt_thread_equal));
if (abt_thread_equal == ABT_TRUE) {
vals[i] += 4;
}
}
for (i = 0; i < num_threads; i++) {
if (vals[i] != 7) {
printf("vals[%d] == %d\n", i, vals[i]);
return 0;
}
}
return 1;
}
int main() {
int i, num_failed = 0;
for (i = 0; i < REPETITIONS; i++) {
if (!test_thread_thread_threadid(i + 1)) {
num_failed++;
}
}
return num_failed;
}
|
SSE41search.c | #include "SSE41search.h"
// CPU search using SSE instrucions and Score Profile technique
void search_sse41_sp (char * query_sequences, unsigned short int * query_sequences_lengths, unsigned long int query_sequences_count, unsigned int * query_disp,
char * vect_sequences_db, unsigned short int * vect_sequences_db_lengths, unsigned short int * vect_sequences_db_blocks, unsigned long int vect_sequences_db_count,
unsigned long int * vect_sequences_db_disp, char * submat, int open_gap, int extend_gap, int n_threads, int block_size, int * scores, double * workTime){
long int i, j, k;
double tick;
char *a, * b;
unsigned int * a_disp;
unsigned long int * b_disp = NULL;
unsigned short int * m, *n, *nbbs, sequences_db_max_length, query_sequences_max_length;
a = query_sequences;
m = query_sequences_lengths;
a_disp = query_disp;
query_sequences_max_length = query_sequences_lengths[query_sequences_count-1];
sequences_db_max_length = vect_sequences_db_lengths[vect_sequences_db_count-1];
b = vect_sequences_db;
n = vect_sequences_db_lengths;
nbbs = vect_sequences_db_blocks;
b_disp = vect_sequences_db_disp;
tick = dwalltime();
#pragma omp parallel default(none) shared(block_size, a, b, n, nbbs, m, a_disp, b_disp, submat, scores, query_sequences_count, vect_sequences_db_count, open_gap, extend_gap, sequences_db_max_length, query_sequences_max_length) num_threads(n_threads)
{
__m128i *row1, *row2, *maxCol, *maxRow, *lastCol, * ptr_scores, *tmp;
__m128i*ptr_scoreProfile1, *ptr_scoreProfile2, *ptr_scoreProfile3, *ptr_scoreProfile4;
char * ptr_a, * ptr_b, * scoreProfile;
__declspec(align(MEMALIGN)) __m128i score, auxBlosum[2], auxLastCol, b_values;
__declspec(align(MEMALIGN)) __m128i current1, current2, current3, current4, previous2, previous3, previous4;
__declspec(align(MEMALIGN)) __m128i aux0, aux1, aux2, aux3, aux4, aux5, aux6, aux7, aux8;
__declspec(align(MEMALIGN)) __m128i vextend_gap_epi8 = _mm_set1_epi8(extend_gap), vopen_extend_gap_epi8 = _mm_set1_epi8(open_gap+extend_gap), vzero_epi8 = _mm_set1_epi8(0);
__declspec(align(MEMALIGN)) __m128i vextend_gap_epi16 = _mm_set1_epi16(extend_gap), vopen_extend_gap_epi16 = _mm_set1_epi16(open_gap+extend_gap), vzero_epi16 = _mm_set1_epi16(0);
__declspec(align(MEMALIGN)) __m128i vextend_gap_epi32 = _mm_set1_epi32(extend_gap), vopen_extend_gap_epi32 = _mm_set1_epi32(open_gap+extend_gap), vzero_epi32 = _mm_set1_epi32(0);
// SP
__declspec(align(MEMALIGN)) __m128i v15 = _mm_set1_epi8(15), v16 = _mm_set1_epi8(16), vneg32 = _mm_set1_epi8(-32);
// overflow
__declspec(align(MEMALIGN)) __m128i v127 = _mm_set1_epi8(127), v32767 = _mm_set1_epi16(32767);
// bias
__declspec(align(MEMALIGN)) __m128i v128 = _mm_set1_epi32(128), v32768 = _mm_set1_epi32(32768);
unsigned int i, j, ii, jj, k, disp_1, disp_2, disp_3, disp_4, disp_5, dim1, dim2, nbb;
unsigned long int t, s, q;
int overflow_flag, bb1, bb1_start, bb1_end, bb2, bb2_start, bb2_end;
// allocate memory for auxiliary buffers
row1 = (__m128i *) _mm_malloc((block_size+1)*sizeof(__m128i), MEMALIGN);
row2 = (__m128i *) _mm_malloc((block_size+1)*sizeof(__m128i), MEMALIGN);
maxCol = (__m128i *) _mm_malloc((block_size+1)*sizeof(__m128i), MEMALIGN);
maxRow = (__m128i *) _mm_malloc((query_sequences_max_length)*sizeof(__m128i), MEMALIGN);
lastCol = (__m128i *) _mm_malloc((query_sequences_max_length)*sizeof(__m128i), MEMALIGN);
scoreProfile = (char *) _mm_malloc((SUBMAT_ROWS_x_SSE_INT8_VECTOR_LENGTH*block_size)*sizeof(char), MEMALIGN);
// calculate alignment score
#pragma omp for schedule(dynamic) nowait
for (t=0; t< query_sequences_count*vect_sequences_db_count; t++) {
q = (query_sequences_count-1) - (t % query_sequences_count);
s = (vect_sequences_db_count-1) - (t / query_sequences_count);
ptr_a = a + a_disp[q];
ptr_b = b + b_disp[s];
ptr_scores = (__m128i *) (scores + (q*vect_sequences_db_count+s)*SSE_INT8_VECTOR_LENGTH);
// caluclate number of blocks
nbb = nbbs[s];
// init buffers
#pragma unroll(SSE_UNROLL_COUNT)
for (i=0; i<m[q] ; i++ ) maxRow[i] = _mm_set1_epi8(-128);
#pragma unroll(SSE_UNROLL_COUNT)
for (i=0; i<m[q] ; i++ ) lastCol[i] = _mm_set1_epi8(-128);
// set score to 0
score = _mm_set1_epi8(-128);
for (k=0; k < nbb; k++){
// calculate dim1
disp_4 = k*block_size;
dim1 = n[s]-disp_4;
dim1 = (block_size < dim1 ? block_size : dim1);
// calculate dim2
dim2 = dim1 / DB_SEQ_LEN_MULT;
// calculate a[i] displacement
disp_1 = dim1*SSE_INT8_VECTOR_LENGTH;
// init buffers
#pragma unroll(SSE_UNROLL_COUNT)
for (i=0; i<dim1+1 ; i++ ) maxCol[i] = _mm_set1_epi8(-128);
#pragma unroll(SSE_UNROLL_COUNT)
for (i=0; i<dim1+1 ; i++ ) row1[i] = _mm_set1_epi8(-128);
auxLastCol = _mm_set1_epi8(-128);
// build score profile
for (i=0; i< dim1 ;i++ ) {
// indexes
b_values = _mm_loadu_si128((__m128i *) (ptr_b + (disp_4+i)*SSE_INT8_VECTOR_LENGTH));
// indexes >= 16
aux1 = _mm_sub_epi8(b_values, v16);
// indexes < 16
aux2 = _mm_cmpgt_epi8(b_values,v15);
aux3 = _mm_and_si128(aux2,vneg32);
aux4 = _mm_add_epi8(b_values,aux3);
ptr_scoreProfile1 = (__m128i*)(scoreProfile) + i;
#pragma unroll
for (j=0; j< SUBMAT_ROWS-1; j++) {
tmp = (__m128i *) (submat + j*SUBMAT_COLS);
auxBlosum[0] = _mm_load_si128(tmp);
auxBlosum[1] = _mm_load_si128(tmp+1);
aux5 = _mm_shuffle_epi8(auxBlosum[0], aux4);
aux6 = _mm_shuffle_epi8(auxBlosum[1], aux1);
aux7 = _mm_add_epi8(aux5, aux6);
_mm_store_si128(ptr_scoreProfile1+j*dim1, aux7);
}
_mm_store_si128(ptr_scoreProfile1+(SUBMAT_ROWS-1)*dim1, vzero_epi8);
}
for( i = 0; i < m[q]; i+=QUERY_SEQ_LEN_MULT){
// update row[0] with lastCol[i-1]
row1[0] = lastCol[i];
previous2 = lastCol[i+1];
previous3 = lastCol[i+2];
previous4 = lastCol[i+3];
// calculate score profile displacement
ptr_scoreProfile1 = (__m128i *)(scoreProfile+((int)(ptr_a[i]))*disp_1);
ptr_scoreProfile2 = (__m128i *)(scoreProfile+((int)(ptr_a[i+1]))*disp_1);
ptr_scoreProfile3 = (__m128i *)(scoreProfile+((int)(ptr_a[i+2]))*disp_1);
ptr_scoreProfile4 = (__m128i *)(scoreProfile+((int)(ptr_a[i+3]))*disp_1);
// store maxRow in auxiliars
aux1 = maxRow[i];
aux2 = maxRow[i+1];
aux3 = maxRow[i+2];
aux4 = maxRow[i+3];
for (ii=0; ii<dim2 ; ii++) {
#pragma unroll(DB_SEQ_LEN_MULT)
for( j=ii*DB_SEQ_LEN_MULT+1, jj=0; jj < DB_SEQ_LEN_MULT; jj++, j++) {
//calcuate the diagonal value
current1 = _mm_adds_epi8(row1[j-1], _mm_load_si128(ptr_scoreProfile1+(j-1)));
// calculate current1 max value
current1 = _mm_max_epi8(current1, aux1);
current1 = _mm_max_epi8(current1, maxCol[j]);
//current1 = _mm_max_epi8(current1, vzero_epi8);
// update maxRow and maxCol
aux1 = _mm_subs_epi8(aux1, vextend_gap_epi8);
maxCol[j] = _mm_subs_epi8(maxCol[j], vextend_gap_epi8);
aux0 = _mm_subs_epi8(current1, vopen_extend_gap_epi8);
aux1 = _mm_max_epi8(aux1, aux0);
maxCol[j] = _mm_max_epi8(maxCol[j], aux0);
// update max score
score = _mm_max_epi8(score,current1);
//calcuate the diagonal value
current2 = _mm_adds_epi8(previous2, _mm_load_si128(ptr_scoreProfile2+(j-1)));
// update previous
previous2 = current1;
// calculate current2 max value
current2 = _mm_max_epi8(current2, aux2);
current2 = _mm_max_epi8(current2, maxCol[j]);
//current2 = _mm_max_epi8(current2, vzero_epi8);
// update maxRow and maxCol
aux2 = _mm_subs_epi8(aux2, vextend_gap_epi8);
maxCol[j] = _mm_subs_epi8(maxCol[j], vextend_gap_epi8);
aux0 = _mm_subs_epi8(current2, vopen_extend_gap_epi8);
aux2 = _mm_max_epi8(aux2, aux0);
maxCol[j] = _mm_max_epi8(maxCol[j], aux0);
// update max score
score = _mm_max_epi8(score,current2);
//calcuate the diagonal value
current3 = _mm_adds_epi8(previous3, _mm_load_si128(ptr_scoreProfile3+(j-1)));
// update previous
previous3 = current2;
// calculate current3 max value
current3 = _mm_max_epi8(current3, aux3);
current3 = _mm_max_epi8(current3, maxCol[j]);
//current3 = _mm_max_epi8(current3, vzero_epi8);
// update maxRow and maxCol
aux3 = _mm_subs_epi8(aux3, vextend_gap_epi8);
maxCol[j] = _mm_subs_epi8(maxCol[j], vextend_gap_epi8);
aux0 = _mm_subs_epi8(current3, vopen_extend_gap_epi8);
aux3 = _mm_max_epi8(aux3, aux0);
maxCol[j] = _mm_max_epi8(maxCol[j], aux0);
// update max score
score = _mm_max_epi8(score,current3);
//calcuate the diagonal value
current4 = _mm_adds_epi8(previous4, _mm_load_si128(ptr_scoreProfile4+(j-1)));
// update previous
previous4 = current3;
// calculate current4 max value
current4 = _mm_max_epi8(current4, aux4);
current4 = _mm_max_epi8(current4, maxCol[j]);
//current4 = _mm_max_epi8(current4, vzero_epi8);
// update maxRow and maxCol
aux4 = _mm_subs_epi8(aux4, vextend_gap_epi8);
maxCol[j] = _mm_subs_epi8(maxCol[j], vextend_gap_epi8);
aux0 = _mm_subs_epi8(current4, vopen_extend_gap_epi8);
aux4 = _mm_max_epi8(aux4, aux0);
maxCol[j] = _mm_max_epi8(maxCol[j], aux0);
// update max score
score = _mm_max_epi8(score,current4);
// update row buffer
row2[j] = current4;
}
}
// update maxRow
maxRow[i] = aux1;
maxRow[i+1] = aux2;
maxRow[i+2] = aux3;
maxRow[i+3] = aux4;
// update lastCol
lastCol[i] = auxLastCol;
lastCol[i+1] = current1;
lastCol[i+2] = current2;
lastCol[i+3] = current3;
auxLastCol = current4;
// swap buffers
tmp = row1;
row1 = row2;
row2 = tmp;
}
}
// store max value
aux1 = _mm_add_epi32(_mm_cvtepi8_epi32(score),v128);
_mm_store_si128 (ptr_scores,aux1);
aux1 = _mm_add_epi32(_mm_cvtepi8_epi32(_mm_srli_si128(score,4)),v128);
_mm_store_si128 (ptr_scores+1,aux1);
aux1 = _mm_add_epi32(_mm_cvtepi8_epi32(_mm_srli_si128(score,8)),v128);
_mm_store_si128 (ptr_scores+2,aux1);
aux1 = _mm_add_epi32(_mm_cvtepi8_epi32(_mm_srli_si128(score,12)),v128);
_mm_store_si128 (ptr_scores+3,aux1);
// overflow detection
aux1 = _mm_cmpeq_epi8(score,v127);
overflow_flag = _mm_test_all_zeros(aux1,v127);
// if overflow
if (overflow_flag == 0){
// detect if overflow occurred in low-half, high-half or both halves
aux1 = _mm_cmpeq_epi8(_mm_slli_si128(score,8),v127);
bb1_start = _mm_test_all_zeros(aux1,v127);
aux1 = _mm_cmpeq_epi8(_mm_srli_si128(score,8),v127);
bb1_end = 2 - _mm_test_all_zeros(aux1,v127);
// recalculate using 16-bit signed integer precision
for (bb1=bb1_start; bb1<bb1_end ; bb1++){
// init buffers
#pragma unroll(SSE_UNROLL_COUNT)
for (i=0; i<m[q] ; i++ ) maxRow[i] = _mm_set1_epi16(-32768);
#pragma unroll(SSE_UNROLL_COUNT)
for (i=0; i<m[q] ; i++ ) lastCol[i] = _mm_set1_epi16(-32768);
// set score to 0
score = _mm_set1_epi16(-32768);
disp_2 = bb1*SSE_INT16_VECTOR_LENGTH;
for (k=0; k < nbb; k++){
// calculate dim1
disp_4 = k*block_size;
dim1 = n[s]-disp_4;
dim1 = (block_size < dim1 ? block_size : dim1);
// calculate dim2
dim2 = dim1 / DB_SEQ_LEN_MULT;
// calculate a[i] displacement
disp_1 = dim1*SSE_INT8_VECTOR_LENGTH;
// init buffers
#pragma unroll(SSE_UNROLL_COUNT)
for (i=0; i<dim1+1 ; i++ ) maxCol[i] = _mm_set1_epi16(-32768);
#pragma unroll(SSE_UNROLL_COUNT)
for (i=0; i<dim1+1 ; i++ ) row1[i] = _mm_set1_epi16(-32768);
auxLastCol = _mm_set1_epi16(-32768);
// build score profile
for (i=0; i< dim1 ;i++ ) {
// indexes
b_values = _mm_loadu_si128((__m128i *) (ptr_b + (disp_4+i)*SSE_INT8_VECTOR_LENGTH));
// indexes >= 16
aux1 = _mm_sub_epi8(b_values, v16);
// indexes < 16
aux2 = _mm_cmpgt_epi8(b_values,v15);
aux3 = _mm_and_si128(aux2,vneg32);
aux4 = _mm_add_epi8(b_values,aux3);
ptr_scoreProfile1 = (__m128i*)(scoreProfile) + i;
#pragma unroll
for (j=0; j< SUBMAT_ROWS-1; j++) {
tmp = (__m128i *) (submat + j*SUBMAT_COLS);
auxBlosum[0] = _mm_load_si128(tmp);
auxBlosum[1] = _mm_load_si128(tmp+1);
aux5 = _mm_shuffle_epi8(auxBlosum[0], aux4);
aux6 = _mm_shuffle_epi8(auxBlosum[1], aux1);
aux7 = _mm_add_epi8(aux5, aux6);
_mm_store_si128(ptr_scoreProfile1+j*dim1, aux7);
}
_mm_store_si128(ptr_scoreProfile1+(SUBMAT_ROWS-1)*dim1, vzero_epi8);
}
for( i = 0; i < m[q]; i+=QUERY_SEQ_LEN_MULT){
// update row[0] with lastCol[i-1]
row1[0] = lastCol[i];
previous2 = lastCol[i+1];
previous3 = lastCol[i+2];
previous4 = lastCol[i+3];
// calculate score profile displacement
ptr_scoreProfile1 = (__m128i *)(scoreProfile+((int)(ptr_a[i]))*disp_1+disp_2);
ptr_scoreProfile2 = (__m128i *)(scoreProfile+((int)(ptr_a[i+1]))*disp_1+disp_2);
ptr_scoreProfile3 = (__m128i *)(scoreProfile+((int)(ptr_a[i+2]))*disp_1+disp_2);
ptr_scoreProfile4 = (__m128i *)(scoreProfile+((int)(ptr_a[i+3]))*disp_1+disp_2);
// store maxRow in auxiliars
aux1 = maxRow[i];
aux2 = maxRow[i+1];
aux3 = maxRow[i+2];
aux4 = maxRow[i+3];
for (ii=0; ii<dim2 ; ii++) {
#pragma unroll(DB_SEQ_LEN_MULT)
for( j=ii*DB_SEQ_LEN_MULT+1, jj=0; jj < DB_SEQ_LEN_MULT; jj++, j++) {
//calcuate the diagonal value
current1 = _mm_adds_epi16(row1[j-1], _mm_cvtepi8_epi16(_mm_loadu_si128(ptr_scoreProfile1+(j-1))));
// calculate current1 max value
current1 = _mm_max_epi16(current1, aux1);
current1 = _mm_max_epi16(current1, maxCol[j]);
//current1 = _mm_max_epi16(current1, vzero_epi16);
// update maxRow and maxCol
aux1 = _mm_subs_epi16(aux1, vextend_gap_epi16);
maxCol[j] = _mm_subs_epi16(maxCol[j], vextend_gap_epi16);
aux0 = _mm_subs_epi16(current1, vopen_extend_gap_epi16);
aux1 = _mm_max_epi16(aux1, aux0);
maxCol[j] = _mm_max_epi16(maxCol[j], aux0);
// update max score
score = _mm_max_epi16(score,current1);
//calcuate the diagonal value
current2 = _mm_adds_epi16(previous2, _mm_cvtepi8_epi16(_mm_loadu_si128(ptr_scoreProfile2+(j-1))));
// update previous
previous2 = current1;
// calculate current2 max value
current2 = _mm_max_epi16(current2, aux2);
current2 = _mm_max_epi16(current2, maxCol[j]);
//current2 = _mm_max_epi16(current2, vzero_epi16);
// update maxRow and maxCol
aux2 = _mm_subs_epi16(aux2, vextend_gap_epi16);
maxCol[j] = _mm_subs_epi16(maxCol[j], vextend_gap_epi16);
aux0 = _mm_subs_epi16(current2, vopen_extend_gap_epi16);
aux2 = _mm_max_epi16(aux2, aux0);
maxCol[j] = _mm_max_epi16(maxCol[j], aux0);
// update max score
score = _mm_max_epi16(score,current2);
//calcuate the diagonal value
current3 = _mm_adds_epi16(previous3, _mm_cvtepi8_epi16(_mm_loadu_si128(ptr_scoreProfile3+(j-1))));
// update previous
previous3 = current2;
// calculate current3 max value
current3 = _mm_max_epi16(current3, aux3);
current3 = _mm_max_epi16(current3, maxCol[j]);
//current3 = _mm_max_epi16(current3, vzero_epi16);
// update maxRow and maxCol
aux3 = _mm_subs_epi16(aux3, vextend_gap_epi16);
maxCol[j] = _mm_subs_epi16(maxCol[j], vextend_gap_epi16);
aux0 = _mm_subs_epi16(current3, vopen_extend_gap_epi16);
aux3 = _mm_max_epi16(aux3, aux0);
maxCol[j] = _mm_max_epi16(maxCol[j], aux0);
// update max score
score = _mm_max_epi16(score,current3);
//calcuate the diagonal value
current4 = _mm_adds_epi16(previous4, _mm_cvtepi8_epi16(_mm_loadu_si128(ptr_scoreProfile4+(j-1))));
// update previous
previous4 = current3;
// calculate current4 max value
current4 = _mm_max_epi16(current4, aux4);
current4 = _mm_max_epi16(current4, maxCol[j]);
//current4 = _mm_max_epi16(current4, vzero_epi16);
// update maxRow and maxCol
aux4 = _mm_subs_epi16(aux4, vextend_gap_epi16);
maxCol[j] = _mm_subs_epi16(maxCol[j], vextend_gap_epi16);
aux0 = _mm_subs_epi16(current4, vopen_extend_gap_epi16);
aux4 = _mm_max_epi16(aux4, aux0);
maxCol[j] = _mm_max_epi16(maxCol[j], aux0);
// update row buffer
row2[j] = current4;
// update max score
score = _mm_max_epi16(score,current4);
}
}
// update maxRow
maxRow[i] = aux1;
maxRow[i+1] = aux2;
maxRow[i+2] = aux3;
maxRow[i+3] = aux4;
// update lastCol
lastCol[i] = auxLastCol;
lastCol[i+1] = current1;
lastCol[i+2] = current2;
lastCol[i+3] = current3;
auxLastCol = current4;
// swap buffers
tmp = row1;
row1 = row2;
row2 = tmp;
}
}
// store max value
aux1 = _mm_add_epi32(_mm_cvtepi16_epi32(score),v32768);
_mm_store_si128 (ptr_scores+bb1*2,aux1);
aux1 = _mm_add_epi32(_mm_cvtepi16_epi32(_mm_srli_si128(score,8)),v32768);
_mm_store_si128 (ptr_scores+bb1*2+1,aux1);
// overflow detection
aux1 = _mm_cmpeq_epi16(score,v32767);
overflow_flag = _mm_test_all_zeros(aux1,v32767);
// if overflow
if (overflow_flag == 0){
// detect if overflow occurred in low-half, high-half or both halves
aux1 = _mm_cmpeq_epi16(_mm_slli_si128(score,8),v32767);
bb2_start = _mm_test_all_zeros(aux1,v32767);
aux1 = _mm_cmpeq_epi16(_mm_srli_si128(score,8),v32767);
bb2_end = 2 - _mm_test_all_zeros(aux1,v32767);
// recalculate using 32-bit signed integer precision
for (bb2=bb2_start; bb2<bb2_end ; bb2++){
// init buffers
#pragma unroll(SSE_UNROLL_COUNT)
for (i=0; i<m[q] ; i++ ) maxRow[i] = _mm_set1_epi32(0);
#pragma unroll(SSE_UNROLL_COUNT)
for (i=0; i<m[q] ; i++ ) lastCol[i] = _mm_set1_epi32(0);
// set score to 0
score = _mm_set1_epi32(0);
disp_3 = disp_2 + bb2*SSE_INT32_VECTOR_LENGTH;
for (k=0; k < nbb; k++){
// calculate dim1
disp_4 = k*block_size;
dim1 = n[s]-disp_4;
dim1 = (block_size < dim1 ? block_size : dim1);
// calculate dim2
dim2 = dim1 / DB_SEQ_LEN_MULT;
// calculate a[i] displacement
disp_1 = dim1*SSE_INT8_VECTOR_LENGTH;
// init buffers
#pragma unroll(SSE_UNROLL_COUNT)
for (i=0; i<dim1+1 ; i++ ) maxCol[i] = _mm_set1_epi32(0);
#pragma unroll(SSE_UNROLL_COUNT)
for (i=0; i<dim1+1 ; i++ ) row1[i] = _mm_set1_epi32(0);
auxLastCol = _mm_set1_epi32(0);
// build score profile
for (i=0; i< dim1 ;i++ ) {
// indexes
b_values = _mm_loadu_si128((__m128i *) (ptr_b + (disp_4+i)*SSE_INT8_VECTOR_LENGTH));
// indexes >= 16
aux1 = _mm_sub_epi8(b_values, v16);
// indexes < 16
aux2 = _mm_cmpgt_epi8(b_values,v15);
aux3 = _mm_and_si128(aux2,vneg32);
aux4 = _mm_add_epi8(b_values,aux3);
ptr_scoreProfile1 = (__m128i*)(scoreProfile) + i;
#pragma unroll
for (j=0; j< SUBMAT_ROWS-1; j++) {
tmp = (__m128i *) (submat + j*SUBMAT_COLS);
auxBlosum[0] = _mm_load_si128(tmp);
auxBlosum[1] = _mm_load_si128(tmp+1);
aux5 = _mm_shuffle_epi8(auxBlosum[0], aux4);
aux6 = _mm_shuffle_epi8(auxBlosum[1], aux1);
aux7 = _mm_add_epi8(aux5, aux6);
_mm_store_si128(ptr_scoreProfile1+j*dim1, aux7);
}
_mm_store_si128(ptr_scoreProfile1+(SUBMAT_ROWS-1)*dim1, vzero_epi8);
}
for( i = 0; i < m[q]; i+=QUERY_SEQ_LEN_MULT){
// update row[0] with lastCol[i-1]
row1[0] = lastCol[i];
previous2 = lastCol[i+1];
previous3 = lastCol[i+2];
previous4 = lastCol[i+3];
// calculate score profile displacement
ptr_scoreProfile1 = (__m128i *)(scoreProfile+((int)(ptr_a[i]))*disp_1+disp_3);
ptr_scoreProfile2 = (__m128i *)(scoreProfile+((int)(ptr_a[i+1]))*disp_1+disp_3);
ptr_scoreProfile3 = (__m128i *)(scoreProfile+((int)(ptr_a[i+2]))*disp_1+disp_3);
ptr_scoreProfile4 = (__m128i *)(scoreProfile+((int)(ptr_a[i+3]))*disp_1+disp_3);
// store maxRow in auxiliars
aux1 = maxRow[i];
aux2 = maxRow[i+1];
aux3 = maxRow[i+2];
aux4 = maxRow[i+3];
for (ii=0; ii<dim2 ; ii++) {
#pragma unroll(DB_SEQ_LEN_MULT)
for( j=ii*DB_SEQ_LEN_MULT+1, jj=0; jj < DB_SEQ_LEN_MULT; jj++, j++) {
//calcuate the diagonal value
current1 = _mm_add_epi32(row1[j-1], _mm_cvtepi8_epi32(_mm_loadu_si128(ptr_scoreProfile1+(j-1))));
// calculate current1 max value
current1 = _mm_max_epi32(current1, aux1);
current1 = _mm_max_epi32(current1, maxCol[j]);
current1 = _mm_max_epi32(current1, vzero_epi32);
// update maxRow and maxCol
aux1 = _mm_sub_epi32(aux1, vextend_gap_epi32);
maxCol[j] = _mm_sub_epi32(maxCol[j], vextend_gap_epi32);
aux0 = _mm_sub_epi32(current1, vopen_extend_gap_epi32);
aux1 = _mm_max_epi32(aux1, aux0);
maxCol[j] = _mm_max_epi32(maxCol[j], aux0);
// update max score
score = _mm_max_epi32(score,current1);
//calcuate the diagonal value
current2 = _mm_add_epi32(previous2, _mm_cvtepi8_epi32(_mm_loadu_si128(ptr_scoreProfile2+(j-1))));
// update previous
previous2 = current1;
// calculate current2 max value
current2 = _mm_max_epi32(current2, aux2);
current2 = _mm_max_epi32(current2, maxCol[j]);
current2 = _mm_max_epi32(current2, vzero_epi32);
// update maxRow and maxCol
aux2 = _mm_sub_epi32(aux2, vextend_gap_epi32);
maxCol[j] = _mm_sub_epi32(maxCol[j], vextend_gap_epi32);
aux0 = _mm_sub_epi32(current2, vopen_extend_gap_epi32);
aux2 = _mm_max_epi32(aux2, aux0);
maxCol[j] = _mm_max_epi32(maxCol[j], aux0);
// update max score
score = _mm_max_epi32(score,current2);
//calcuate the diagonal value
current3 = _mm_add_epi32(previous3, _mm_cvtepi8_epi32(_mm_loadu_si128(ptr_scoreProfile3+(j-1))));
// update previous
previous3 = current2;
// calculate current3 max value
current3 = _mm_max_epi32(current3, aux3);
current3 = _mm_max_epi32(current3, maxCol[j]);
current3 = _mm_max_epi32(current3, vzero_epi32);
// update maxRow and maxCol
aux3 = _mm_sub_epi32(aux3, vextend_gap_epi32);
maxCol[j] = _mm_sub_epi32(maxCol[j], vextend_gap_epi32);
aux0 = _mm_sub_epi32(current3, vopen_extend_gap_epi32);
aux3 = _mm_max_epi32(aux3, aux0);
maxCol[j] = _mm_max_epi32(maxCol[j], aux0);
// update max score
score = _mm_max_epi32(score,current3);
//calcuate the diagonal value
current4 = _mm_add_epi32(previous4, _mm_cvtepi8_epi32(_mm_loadu_si128(ptr_scoreProfile4+(j-1))));
// update previous
previous4 = current3;
// calculate current4 max value
current4 = _mm_max_epi32(current4, aux4);
current4 = _mm_max_epi32(current4, maxCol[j]);
current4 = _mm_max_epi32(current4, vzero_epi32);
// update maxRow and maxCol
aux4 = _mm_sub_epi32(aux4, vextend_gap_epi32);
maxCol[j] = _mm_sub_epi32(maxCol[j], vextend_gap_epi32);
aux0 = _mm_sub_epi32(current4, vopen_extend_gap_epi32);
aux4 = _mm_max_epi32(aux4, aux0);
maxCol[j] = _mm_max_epi32(maxCol[j], aux0);
// update row buffer
row2[j] = current4;
// update max score
score = _mm_max_epi32(score,current4); }
}
// update maxRow
maxRow[i] = aux1;
maxRow[i+1] = aux2;
maxRow[i+2] = aux3;
maxRow[i+3] = aux4;
// update lastCol
lastCol[i] = auxLastCol;
lastCol[i+1] = current1;
lastCol[i+2] = current2;
lastCol[i+3] = current3;
auxLastCol = current4;
// swap buffers
tmp = row1;
row1 = row2;
row2 = tmp;
}
}
// store max value
_mm_store_si128 (ptr_scores+bb1*2+bb2,score);
}
}
}
}
}
_mm_free(row1); _mm_free(row2); _mm_free(maxCol); _mm_free(maxRow); _mm_free(lastCol); _mm_free(scoreProfile);
}
*workTime = dwalltime()-tick;
}
|
residualbased_linear_strategy.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
//
//
#if !defined(KRATOS_RESIDUALBASED_LINEAR_STRATEGY )
#define KRATOS_RESIDUALBASED_LINEAR_STRATEGY
// System includes
// External includes
// Project includes
#include "includes/define.h"
#include "solving_strategies/strategies/solving_strategy.h"
#include "utilities/builtin_timer.h"
//default builder and solver
#include "solving_strategies/builder_and_solvers/builder_and_solver.h"
#include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedLinearStrategy
* @ingroup KratosCore
* @brief This is a very simple strategy to solve linearly the problem
* @details As a linear strategy the check on the convergence is not done and just one non linear iteration will be performed
* @author Riccardo Rossi
*/
template<class TSparseSpace,
class TDenseSpace, //= DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class ResidualBasedLinearStrategy
: public SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>
{
public:
///@name Type Definitions */
///@{
/** Counted pointer of ClassName */
KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedLinearStrategy);
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef ResidualBasedLinearStrategy<TSparseSpace,TDenseSpace,TLinearSolver> ClassType;
typedef typename BaseType::TDataType TDataType;
typedef TSparseSpace SparseSpaceType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor
*/
explicit ResidualBasedLinearStrategy() : BaseType()
{
}
/**
* @brief Default constructor. (with parameters)
* @param rModelPart The model part of the problem
* @param ThisParameters The configuration parameters
*/
explicit ResidualBasedLinearStrategy(ModelPart& rModelPart, Parameters ThisParameters)
: BaseType(rModelPart)
{
// Validate and assign defaults
ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters());
this->AssignSettings(ThisParameters);
// Set flags to start correcty the calculations
mSolutionStepIsInitialized = false;
mInitializeWasPerformed = false;
// Tells to the builder and solver if the reactions have to be Calculated or not
GetBuilderAndSolver()->SetCalculateReactionsFlag(mCalculateReactionsFlag);
// Tells to the Builder And Solver if the system matrix and vectors need to
// be reshaped at each step or not
GetBuilderAndSolver()->SetReshapeMatrixFlag(mReformDofSetAtEachStep);
}
/**
* @brief Default constructor
* @param rModelPart The model part of the problem
* @param pScheme The integration scheme
* @param pNewLinearSolver The linear solver employed
* @param CalculateReactionFlag The flag for the reaction calculation
* @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF
* @param CalculateNormDxFlag The flag sets if the norm of Dx is computed
* @param MoveMeshFlag The flag that allows to move the mesh
*/
explicit ResidualBasedLinearStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TLinearSolver::Pointer pNewLinearSolver,
bool CalculateReactionFlag = false,
bool ReformDofSetAtEachStep = false,
bool CalculateNormDxFlag = false,
bool MoveMeshFlag = false
) : BaseType(rModelPart, MoveMeshFlag),
mpScheme(pScheme),
mReformDofSetAtEachStep(ReformDofSetAtEachStep),
mCalculateNormDxFlag(CalculateNormDxFlag),
mCalculateReactionsFlag(CalculateReactionFlag)
{
KRATOS_TRY
// Setting up the default builder and solver
mpBuilderAndSolver = typename TBuilderAndSolverType::Pointer
(
new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver > (pNewLinearSolver)
);
// Set flag to start correcty the calculations
mSolutionStepIsInitialized = false;
mInitializeWasPerformed = false;
// Tells to the builder and solver if the reactions have to be Calculated or not
GetBuilderAndSolver()->SetCalculateReactionsFlag(mCalculateReactionsFlag);
// Tells to the Builder And Solver if the system matrix and vectors need to
//be reshaped at each step or not
GetBuilderAndSolver()->SetReshapeMatrixFlag(mReformDofSetAtEachStep);
// Set EchoLevel to the default value (only time is displayed)
this->SetEchoLevel(1);
// By default the matrices are rebuilt at each solution step
BaseType::SetRebuildLevel(1);
KRATOS_CATCH("")
}
/**
* @brief Constructor specifying the builder and solver
* @param rModelPart The model part of the problem
* @param pScheme The integration scheme
* @param pNewLinearSolver The linear solver employed
* @param pNewBuilderAndSolver The builder and solver employed
* @param CalculateReactionFlag The flag for the reaction calculation
* @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF
* @param CalculateNormDxFlag The flag sets if the norm of Dx is computed
* @param MoveMeshFlag The flag that allows to move the mesh
*/
explicit ResidualBasedLinearStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver,
bool CalculateReactionFlag = false,
bool ReformDofSetAtEachStep = false,
bool CalculateNormDxFlag = false,
bool MoveMeshFlag = false
) : BaseType(rModelPart, MoveMeshFlag),
mpScheme(pScheme),
mpBuilderAndSolver(pNewBuilderAndSolver),
mReformDofSetAtEachStep(ReformDofSetAtEachStep),
mCalculateNormDxFlag(CalculateNormDxFlag),
mCalculateReactionsFlag(CalculateReactionFlag)
{
KRATOS_TRY
// Set flag to start correcty the calculations
mSolutionStepIsInitialized = false;
mInitializeWasPerformed = false;
// Tells to the builder and solver if the reactions have to be Calculated or not
GetBuilderAndSolver()->SetCalculateReactionsFlag(mCalculateReactionsFlag);
// Tells to the Builder And Solver if the system matrix and vectors need to
//be reshaped at each step or not
GetBuilderAndSolver()->SetReshapeMatrixFlag(mReformDofSetAtEachStep);
//set EchoLevel to the default value (only time is displayed)
this->SetEchoLevel(1);
// By default the matrices are rebuilt at each solution step
BaseType::SetRebuildLevel(1);
KRATOS_CATCH("")
}
/**
* @brief Constructor specifying the builder and solver
* @param rModelPart The model part of the problem
* @param pScheme The integration scheme
* @param pNewLinearSolver The linear solver employed
* @param pNewBuilderAndSolver The builder and solver employed
* @param CalculateReactionFlag The flag for the reaction calculation
* @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF
* @param CalculateNormDxFlag The flag sets if the norm of Dx is computed
* @param MoveMeshFlag The flag that allows to move the mesh
*/
KRATOS_DEPRECATED_MESSAGE("Constructor deprecated, please use the constructor without linear solver")
explicit ResidualBasedLinearStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TLinearSolver::Pointer pNewLinearSolver,
typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver,
bool CalculateReactionFlag = false,
bool ReformDofSetAtEachStep = false,
bool CalculateNormDxFlag = false,
bool MoveMeshFlag = false
) : ResidualBasedLinearStrategy(rModelPart, pScheme, pNewBuilderAndSolver, CalculateReactionFlag, ReformDofSetAtEachStep, CalculateNormDxFlag, MoveMeshFlag)
{
KRATOS_TRY
KRATOS_WARNING("ResidualBasedLinearStrategy") << "This constructor is deprecated, please use the constructor without linear solver" << std::endl;
// We check if the linear solver considered for the builder and solver is consistent
auto p_linear_solver = pNewBuilderAndSolver->GetLinearSystemSolver();
KRATOS_ERROR_IF(p_linear_solver != pNewLinearSolver) << "Inconsistent linear solver in strategy and builder and solver. Considering the linear solver assigned to builder and solver :\n" << p_linear_solver->Info() << "\n instead of:\n" << pNewLinearSolver->Info() << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Destructor.
* @details In trilinos third party library, the linear solver's preconditioner should be freed before the system matrix. We control the deallocation order with Clear().
*/
~ResidualBasedLinearStrategy() override
{
// If the linear solver has not been deallocated, clean it before
// deallocating mpA. This prevents a memory error with the the ML
// solver (which holds a reference to it).
// NOTE: The linear solver is hold by the B&S
auto p_builder_and_solver = this->GetBuilderAndSolver();
if (p_builder_and_solver != nullptr) {
p_builder_and_solver->Clear();
}
// Deallocating system vectors to avoid errors in MPI. Clear calls
// TrilinosSpace::Clear for the vectors, which preserves the Map of
// current vectors, performing MPI calls in the process. Due to the
// way Python garbage collection works, this may happen after
// MPI_Finalize has already been called and is an error. Resetting
// the pointers here prevents Clear from operating with the
// (now deallocated) vectors.
mpA.reset();
mpDx.reset();
mpb.reset();
this->Clear();
}
/**
* @brief Set method for the time scheme
* @param pScheme The pointer to the time scheme considered
*/
void SetScheme(typename TSchemeType::Pointer pScheme)
{
mpScheme = pScheme;
};
/**
* @brief Get method for the time scheme
* @return mpScheme: The pointer to the time scheme considered
*/
typename TSchemeType::Pointer GetScheme()
{
return mpScheme;
};
/**
* @brief Set method for the builder and solver
* @param pNewBuilderAndSolver The pointer to the builder and solver considered
*/
void SetBuilderAndSolver(typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver)
{
mpBuilderAndSolver = pNewBuilderAndSolver;
};
/**
* @brief Get method for the builder and solver
* @return mpBuilderAndSolver: The pointer to the builder and solver considered
*/
typename TBuilderAndSolverType::Pointer GetBuilderAndSolver()
{
return mpBuilderAndSolver;
};
/**
* @brief This method sets the flag mCalculateReactionsFlag
* @param CalculateReactionsFlag The flag that tells if the reactions are computed
*/
void SetCalculateReactionsFlag(bool CalculateReactionsFlag)
{
mCalculateReactionsFlag = CalculateReactionsFlag;
GetBuilderAndSolver()->SetCalculateReactionsFlag(mCalculateReactionsFlag);
}
/**
* @brief This method returns the flag mCalculateReactionsFlag
* @return The flag that tells if the reactions are computed
*/
bool GetCalculateReactionsFlag()
{
return mCalculateReactionsFlag;
}
/**
* @brief This method sets the flag mReformDofSetAtEachStep
* @param Flag The flag that tells if each time step the system is rebuilt
*/
void SetReformDofSetAtEachStepFlag(bool Flag)
{
mReformDofSetAtEachStep = Flag;
GetBuilderAndSolver()->SetReshapeMatrixFlag(mReformDofSetAtEachStep);
}
/**
* @brief This method returns the flag mReformDofSetAtEachStep
* @return The flag that tells if each time step the system is rebuilt
*/
bool GetReformDofSetAtEachStepFlag()
{
return mReformDofSetAtEachStep;
}
/**
* @brief It sets the level of echo for the solving strategy
* @param Level The level to set
* @details The different levels of echo are:
* - 0: Mute... no echo at all
* - 1: Printing time and basic informations
* - 2: Printing linear solver data
* - 3: Print of debug informations: Echo of stiffness matrix, Dx, b...
*/
void SetEchoLevel(int Level) override
{
BaseType::SetEchoLevel(Level);
GetBuilderAndSolver()->SetEchoLevel(Level);
}
//*********************************************************************************
/**OPERATIONS ACCESSIBLE FROM THE INPUT:*/
/**
* @brief Create method
* @param rModelPart The model part of the problem
* @param ThisParameters The configuration parameters
*/
typename BaseType::Pointer Create(
ModelPart& rModelPart,
Parameters ThisParameters
) const override
{
return Kratos::make_shared<ClassType>(rModelPart, ThisParameters);
}
/**
* @brief Operation to predict the solution ... if it is not called a trivial predictor is used in which the
values of the solution step of interest are assumed equal to the old values
*/
void Predict() override
{
KRATOS_TRY
const DataCommunicator &r_comm = BaseType::GetModelPart().GetCommunicator().GetDataCommunicator();
//OPERATIONS THAT SHOULD BE DONE ONCE - internal check to avoid repetitions
//if the operations needed were already performed this does nothing
if(mInitializeWasPerformed == false)
Initialize();
//initialize solution step
if (mSolutionStepIsInitialized == false)
InitializeSolutionStep();
TSystemMatrixType& rA = *mpA;
TSystemVectorType& rDx = *mpDx;
TSystemVectorType& rb = *mpb;
DofsArrayType& r_dof_set = GetBuilderAndSolver()->GetDofSet();
this->GetScheme()->Predict(BaseType::GetModelPart(), r_dof_set, rA, rDx, rb);
auto& r_constraints_array = BaseType::GetModelPart().MasterSlaveConstraints();
const int local_number_of_constraints = r_constraints_array.size();
const int global_number_of_constraints = r_comm.SumAll(local_number_of_constraints);
if(global_number_of_constraints != 0) {
const auto& rProcessInfo = BaseType::GetModelPart().GetProcessInfo();
auto it_begin = BaseType::GetModelPart().MasterSlaveConstraints().begin();
#pragma omp parallel for firstprivate(it_begin)
for(int i=0; i<static_cast<int>(local_number_of_constraints); ++i)
(it_begin+i)->ResetSlaveDofs(rProcessInfo);
#pragma omp parallel for firstprivate(it_begin)
for(int i=0; i<static_cast<int>(local_number_of_constraints); ++i)
(it_begin+i)->Apply(rProcessInfo);
//the following is needed since we need to eventually compute time derivatives after applying
//Master slave relations
TSparseSpace::SetToZero(rDx);
this->GetScheme()->Update(BaseType::GetModelPart(), r_dof_set, rA, rDx, rb);
}
if (BaseType::MoveMeshFlag() == true) BaseType::MoveMesh();
KRATOS_CATCH("")
}
/**
* @brief Initialization of member variables and prior operations
*/
void Initialize() override
{
KRATOS_TRY
if (mInitializeWasPerformed == false)
{
//pointers needed in the solution
typename TSchemeType::Pointer p_scheme = GetScheme();
//Initialize The Scheme - OPERATIONS TO BE DONE ONCE
if (p_scheme->SchemeIsInitialized() == false)
p_scheme->Initialize(BaseType::GetModelPart());
//Initialize The Elements - OPERATIONS TO BE DONE ONCE
if (p_scheme->ElementsAreInitialized() == false)
p_scheme->InitializeElements(BaseType::GetModelPart());
//Initialize The Conditions - OPERATIONS TO BE DONE ONCE
if (p_scheme->ConditionsAreInitialized() == false)
p_scheme->InitializeConditions(BaseType::GetModelPart());
mInitializeWasPerformed = true;
}
KRATOS_CATCH("")
}
/**
* @brief The problem of interest is solved
* @details a double containing norm(Dx) is returned if CalculateNormDxFlag == true, else 0 is returned
* @return norm(Dx)
*/
double Solve() override
{
BaseType::Solve();
//calculate if needed the norm of Dx
double norm_dx = 0.00;
if (mCalculateNormDxFlag == true)
norm_dx = TSparseSpace::TwoNorm(*mpDx);
return norm_dx;
}
/**
* @brief Clears the internal storage
* @note NULL could be changed to nullptr in the future (c++11)
*/
void Clear() override
{
KRATOS_TRY;
// Setting to zero the internal flag to ensure that the dof sets are recalculated. Also clear the linear solver stored in the B&S
auto p_builder_and_solver = GetBuilderAndSolver();
if (p_builder_and_solver != nullptr) {
p_builder_and_solver->SetDofSetIsInitializedFlag(false);
p_builder_and_solver->Clear();
}
// Clearing the system of equations
if (mpA != nullptr)
SparseSpaceType::Clear(mpA);
if (mpDx != nullptr)
SparseSpaceType::Clear(mpDx);
if (mpb != nullptr)
SparseSpaceType::Clear(mpb);
// Clearing scheme
auto p_scheme = GetScheme();
if (p_scheme != nullptr) {
GetScheme()->Clear();
}
mInitializeWasPerformed = false;
mSolutionStepIsInitialized = false;
KRATOS_CATCH("");
}
/**
* @brief This operations should be called before printing the results when non trivial results (e.g. stresses)
need to be calculated given the solution of the step
*@details This operations should be called only when needed, before printing as it can involve a non negligible cost
*/
void CalculateOutputData() override
{
TSystemMatrixType& rA = *mpA;
TSystemVectorType& rDx = *mpDx;
TSystemVectorType& rb = *mpb;
GetScheme()->CalculateOutputData(BaseType::GetModelPart(),
GetBuilderAndSolver()->GetDofSet(),
rA, rDx, rb);
}
/**
* @brief Performs all the required operations that should be done (for each step) before solving the solution step.
* @details A member variable should be used as a flag to make sure this function is called only once per step.
* @todo Boost dependencies should be replaced by std equivalent
*/
void InitializeSolutionStep() override
{
KRATOS_TRY
if (mSolutionStepIsInitialized == false)
{
//pointers needed in the solution
typename TSchemeType::Pointer p_scheme = GetScheme();
typename TBuilderAndSolverType::Pointer p_builder_and_solver = GetBuilderAndSolver();
//set up the system, operation performed just once unless it is required
//to reform the dof set at each iteration
BuiltinTimer system_construction_time;
if (p_builder_and_solver->GetDofSetIsInitializedFlag() == false ||
mReformDofSetAtEachStep == true)
{
//setting up the list of the DOFs to be solved
BuiltinTimer setup_dofs_time;
p_builder_and_solver->SetUpDofSet(p_scheme, BaseType::GetModelPart());
KRATOS_INFO_IF("ResidualBasedLinearStrategy", BaseType::GetEchoLevel() > 0) << "Setup Dofs Time" << setup_dofs_time.ElapsedSeconds() << std::endl;
//shaping correctly the system
BuiltinTimer setup_system_time;
p_builder_and_solver->SetUpSystem(BaseType::GetModelPart());
KRATOS_INFO_IF("ResidualBasedLinearStrategy", BaseType::GetEchoLevel() > 0) << "Setup System Time" << setup_system_time.ElapsedSeconds() << std::endl;
//setting up the Vectors involved to the correct size
BuiltinTimer system_matrix_resize_time;
p_builder_and_solver->ResizeAndInitializeVectors(p_scheme, mpA, mpDx, mpb,
BaseType::GetModelPart());
KRATOS_INFO_IF("ResidualBasedLinearStrategy", BaseType::GetEchoLevel() > 0) << "System Matrix Resize Time" << system_matrix_resize_time.ElapsedSeconds() << std::endl;
}
KRATOS_INFO_IF("ResidualBasedLinearStrategy", BaseType::GetEchoLevel() > 0) << "System Construction Time" << system_construction_time.ElapsedSeconds() << std::endl;
TSystemMatrixType& rA = *mpA;
TSystemVectorType& rDx = *mpDx;
TSystemVectorType& rb = *mpb;
//initial operations ... things that are constant over the Solution Step
p_builder_and_solver->InitializeSolutionStep(BaseType::GetModelPart(), rA, rDx, rb);
//initial operations ... things that are constant over the Solution Step
p_scheme->InitializeSolutionStep(BaseType::GetModelPart(), rA, rDx, rb);
mSolutionStepIsInitialized = true;
}
KRATOS_CATCH("")
}
/**
* @brief Performs all the required operations that should be done (for each step) after solving the solution step.
* @details A member variable should be used as a flag to make sure this function is called only once per step.
*/
void FinalizeSolutionStep() override
{
KRATOS_TRY;
typename TSchemeType::Pointer p_scheme = GetScheme();
typename TBuilderAndSolverType::Pointer p_builder_and_solver = GetBuilderAndSolver();
TSystemMatrixType &rA = *mpA;
TSystemVectorType &rDx = *mpDx;
TSystemVectorType &rb = *mpb;
//Finalisation of the solution step,
//operations to be done after achieving convergence, for example the
//Final Residual Vector (mb) has to be saved in there
//to avoid error accumulation
p_scheme->FinalizeSolutionStep(BaseType::GetModelPart(), rA, rDx, rb);
p_builder_and_solver->FinalizeSolutionStep(BaseType::GetModelPart(), rA, rDx, rb);
//Cleaning memory after the solution
p_scheme->Clean();
//reset flags for next step
mSolutionStepIsInitialized = false;
//deallocate the systemvectors if needed
if (mReformDofSetAtEachStep == true)
{
SparseSpaceType::Clear(mpA);
SparseSpaceType::Clear(mpDx);
SparseSpaceType::Clear(mpb);
this->Clear();
}
KRATOS_CATCH("");
}
/**
* @brief Solves the current step. This function returns true if a solution has been found, false otherwise.
*/
bool SolveSolutionStep() override
{
//pointers needed in the solution
typename TSchemeType::Pointer p_scheme = GetScheme();
typename TBuilderAndSolverType::Pointer p_builder_and_solver = GetBuilderAndSolver();
TSystemMatrixType& rA = *mpA;
TSystemVectorType& rDx = *mpDx;
TSystemVectorType& rb = *mpb;
p_scheme->InitializeNonLinIteration(BaseType::GetModelPart(), rA, rDx, rb);
if (BaseType::mRebuildLevel > 0 || BaseType::mStiffnessMatrixIsBuilt == false)
{
TSparseSpace::SetToZero(rA);
TSparseSpace::SetToZero(rDx);
TSparseSpace::SetToZero(rb);
// passing smart pointers instead of references here
// to prevent dangling pointer to system matrix when
// reusing ml preconditioners in the trilinos tpl
p_builder_and_solver->BuildAndSolve(p_scheme, BaseType::GetModelPart(), rA, rDx, rb);
BaseType::mStiffnessMatrixIsBuilt = true;
}
else
{
TSparseSpace::SetToZero(rDx);
TSparseSpace::SetToZero(rb);
p_builder_and_solver->BuildRHSAndSolve(p_scheme, BaseType::GetModelPart(), rA, rDx, rb);
}
// Debugging info
EchoInfo();
//update results
DofsArrayType& r_dof_set = p_builder_and_solver->GetDofSet();
p_scheme->Update(BaseType::GetModelPart(), r_dof_set, rA, rDx, rb);
//move the mesh if needed
if (BaseType::MoveMeshFlag() == true) BaseType::MoveMesh();
p_scheme->FinalizeNonLinIteration(BaseType::GetModelPart(), rA, rDx, rb);
// Calculate reactions if required
if (mCalculateReactionsFlag == true)
p_builder_and_solver->CalculateReactions(p_scheme,
BaseType::GetModelPart(),
rA, rDx, rb);
return true;
}
/**
* @brief This method returns the LHS matrix
* @return The LHS matrix
*/
TSystemMatrixType& GetSystemMatrix() override
{
TSystemMatrixType& mA = *mpA;
return mA;
}
/**
* @brief This method returns the RHS vector
* @return The RHS vector
*/
TSystemVectorType& GetSystemVector() override
{
TSystemVectorType& mb = *mpb;
return mb;
}
/**
* @brief This method returns the solution vector
* @return The Dx vector
*/
TSystemVectorType& GetSolutionVector() override
{
TSystemVectorType& mDx = *mpDx;
return mDx;
}
/**
* @brief This method returns the residual norm
* @return The residual norm
*/
double GetResidualNorm() override
{
if (TSparseSpace::Size(*mpb) != 0)
return TSparseSpace::TwoNorm(*mpb);
else
return 0.0;
}
/**
* @brief Function to perform expensive checks.
* @details It is designed to be called ONCE to verify that the input is correct.
*/
int Check() override
{
KRATOS_TRY
BaseType::Check();
GetBuilderAndSolver()->Check(BaseType::GetModelPart());
GetScheme()->Check(BaseType::GetModelPart());
return 0;
KRATOS_CATCH("")
}
/**
* @brief This method provides the defaults parameters to avoid conflicts between the different constructors
* @return The default parameters
*/
Parameters GetDefaultParameters() const override
{
Parameters default_parameters = Parameters(R"(
{
"name" : "linear_strategy",
"compute_norm_dx" : false,
"reform_dofs_at_each_step" : false,
"compute_reactions" : false,
"builder_and_solver_settings" : {},
"linear_solver_settings" : {},
"scheme_settings" : {}
})");
// Getting base class default parameters
const Parameters base_default_parameters = BaseType::GetDefaultParameters();
default_parameters.RecursivelyAddMissingParameters(base_default_parameters);
return default_parameters;
}
/**
* @brief Returns the name of the class as used in the settings (snake_case format)
* @return The name of the class
*/
static std::string Name()
{
return "linear_strategy";
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedLinearStrategy";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief This method assigns settings to member variables
* @param ThisParameters Parameters that are assigned to the member variables
*/
void AssignSettings(const Parameters ThisParameters) override
{
BaseType::AssignSettings(ThisParameters);
mCalculateNormDxFlag = ThisParameters["compute_norm_dx"].GetBool();
mReformDofSetAtEachStep = ThisParameters["reform_dofs_at_each_step"].GetBool();
mCalculateReactionsFlag = ThisParameters["compute_reactions"].GetBool();
// Saving the scheme
if (ThisParameters["scheme_settings"].Has("name")) {
KRATOS_ERROR << "IMPLEMENTATION PENDING IN CONSTRUCTOR WITH PARAMETERS" << std::endl;
}
// Setting up the default builder and solver
if (ThisParameters["builder_and_solver_settings"].Has("name")) {
KRATOS_ERROR << "IMPLEMENTATION PENDING IN CONSTRUCTOR WITH PARAMETERS" << std::endl;
}
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
typename TSchemeType::Pointer mpScheme = nullptr; /// The pointer to the linear solver considered
typename TBuilderAndSolverType::Pointer mpBuilderAndSolver = nullptr; /// The pointer to the builder and solver employed
TSystemVectorPointerType mpDx; /// The incremement in the solution
TSystemVectorPointerType mpb; /// The RHS vector of the system of equations
TSystemMatrixPointerType mpA; /// The LHS matrix of the system of equations
/**
* @brief Flag telling if it is needed to reform the DofSet at each
solution step or if it is possible to form it just once
* @details Default = false
- true : Reforme at each time step
- false : Form just one (more efficient)
*/
bool mReformDofSetAtEachStep;
bool mCalculateNormDxFlag; /// Calculates if required the norm of the correction term Dx
/**
* @brief Flag telling if it is needed or not to compute the reactions
* @details default = true
*/
bool mCalculateReactionsFlag;
bool mSolutionStepIsInitialized; /// Flag to set as initialized the solution step
bool mInitializeWasPerformed; /// Flag to set as initialized the strategy
///@}
///@name Private Operators*/
///@{
/**
* @brief This method returns the components of the system of equations depending of the echo level
*/
virtual void EchoInfo()
{
TSystemMatrixType& rA = *mpA;
TSystemVectorType& rDx = *mpDx;
TSystemVectorType& rb = *mpb;
if (BaseType::GetEchoLevel() == 3) //if it is needed to print the debug info
{
KRATOS_INFO("LHS") << "SystemMatrix = " << rA << std::endl;
KRATOS_INFO("Dx") << "Solution obtained = " << rDx << std::endl;
KRATOS_INFO("RHS") << "RHS = " << rb << std::endl;
}
if (this->GetEchoLevel() == 4) //print to matrix market file
{
std::stringstream matrix_market_name;
matrix_market_name << "A_" << BaseType::GetModelPart().GetProcessInfo()[TIME] << ".mm";
TSparseSpace::WriteMatrixMarketMatrix((char*) (matrix_market_name.str()).c_str(), rA, false);
std::stringstream matrix_market_vectname;
matrix_market_vectname << "b_" << BaseType::GetModelPart().GetProcessInfo()[TIME] << ".mm.rhs";
TSparseSpace::WriteMatrixMarketVector((char*) (matrix_market_vectname.str()).c_str(), rb);
}
}
///@}
///@name Private Operations*/
///@{
///@}
///@name Private Access */
///@{
///@}
///@name Private Inquiry */
///@{
///@}
///@name Un accessible methods */
///@{
/** Copy constructor.
*/
ResidualBasedLinearStrategy(const ResidualBasedLinearStrategy& Other);
///@}
}; /* Class ResidualBasedLinearStrategy */
///@}
///@name Type Definitions */
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUALBASED_LINEAR_STRATEGY defined */
|
Searching.202006191549.nested_parallel.h | //
// Created by Zhen Peng on 6/19/2020.
//
#ifndef BATCH_SEARCHING_SEARCHING_H
#define BATCH_SEARCHING_SEARCHING_H
#include <vector>
#include <boost/dynamic_bitset.hpp>
//#include <boost/sort/sort.hpp>
#include <iostream>
#include <fstream>
#include <unordered_map>
#include <immintrin.h>
#include <cstring>
#include <unordered_set>
#include <set>
#include <cfloat>
//#include <omp.h>
#include "../include/definitions.h"
//#include "../include/efanna2e/neighbor.h"
#include "../include/utils.h"
#include "../include/Candidate.h"
#include "../include/parallelization.h"
#include "../include/bitvector.h"
namespace PANNS {
class Searching {
//private:
public:
idi num_v_ = 0;
edgei num_e_ = 0;
idi num_queries_ = 0;
uint64_t dimension_ = 0;
idi width_ = 0; // NSG largest degree
idi ep_ = 0; // Start point
// std::vector<dataf> data_load_;
// std::vector<dataf> queries_load_;
// std::vector< std::vector<dataf> > data_load_;
// std::vector< std::vector<dataf> > queries_load_;
// std::vector<distf> norms_;
dataf *data_load_ = nullptr;
dataf *queries_load_ = nullptr;
// dataf *norms_;
// std::vector< std::vector<idi> > nsg_graph_;
// idi *nsg_graph_indices_;
// idi *nsg_graph_out_edges_;
// std::vector< std::vector<idi> > edge_list_;
char *opt_nsg_graph_ = nullptr;
uint64_t data_bytes_;
uint64_t neighbor_bytes_;
uint64_t vertex_bytes_;
// For multithreads
int num_threads_ = 1;
// int num_real_threads_ = 1;
int num_threads_intra_ = 1;
int num_threads_inter_ = 1;
dataf compute_norm(
const dataf *data) const;
// idi vertex_id);
// const std::vector<PANNS::dataf> &data);
// size_t loc_start,
// idi dimension)
dataf compute_distance_with_norm(
const dataf *v_data,
const dataf *q_data,
// idi vertex_id,
// idi query_id,
// const std::vector<dataf> &d_data,
// const std::vector<dataf> &q_data,
// PANNS::idi d_start,
// PANNS::idi q_start,
const dataf vertex_norm) const;
// idi dimension)
static idi insert_into_queue(
std::vector<Candidate> &c_queue,
idi c_queue_top,
Candidate cand);
static idi add_into_queue(
std::vector<PANNS::Candidate> &queue,
idi &queue_top,
const idi queue_size,
const PANNS::Candidate &cand);
static idi add_into_queue(
std::vector<PANNS::Candidate> &queue,
const idi queue_start,
idi &queue_top,
const idi queue_size,
const PANNS::Candidate &cand);
static void add_into_queue_at(
const Candidate &cand,
std::vector<Candidate> &queue,
const idi insert_index, // The insertion location, independent with queue_start
const idi queue_start,
idi &queue_top, // The number of elements in queue, independent with queue_start
const idi queue_size); // The maximum capacity of queue, independent with queue_start.
static void insert_one_element_at(
// const T &cand,
// T *queue_base,
const Candidate &cand,
std::vector<Candidate> &queue_base,
const idi insert_index,
const idi queue_start,
const idi queue_size);
// idi insert_into_queue_nsg(
// std::vector< Candidate > &c_queue,
// idi c_queue_top,
// Candidate cand);
static idi merge_two_queues_into_1st_queue_seq_fixed(
std::vector<Candidate> &queue1,
const idi queue1_start,
const idi queue1_size,
std::vector<Candidate> &queue2,
const idi queue2_start,
const idi queue2_size);
static void merge_two_queues_into_1st_queue_seq_incr(
std::vector<Candidate> &queue1,
const idi queue1_start,
idi &queue1_size, // The number of element in queue1, independent with queue1_start.
const idi queue1_length, // The maximum capacity of queue1, independent with queue1_start.
std::vector<Candidate> &queue2,
const idi queue2_start,
const idi queue2_size);
idi merge_all_queues_para_list(
std::vector< std::vector<Candidate> > &local_queues_list,
std::vector<idi> &local_queues_ends,
std::vector<Candidate> &set_L,
const idi L);
// idi merge_all_queues_para_array(
//// std::vector< std::vector<Candidate> > &local_queues_list,
// std::vector<Candidate> &local_queues_array,
// std::vector<idi> &local_queues_ends,
// const idi local_queue_length,
// std::vector<Candidate> &set_L,
// const idi L);
idi merge_all_queues_para_array(
std::vector<Candidate> &set_L,
// std::vector<Candidate> &local_queues_array,
std::vector<idi> &local_queues_ends,
const idi local_queue_length,
// std::vector<Candidate> &set_L,
const idi L);
idi merge_all_queues_queue_base(
// std::vector< std::vector<Candidate> > &local_queues_list,
std::vector<Candidate> &set_L,
// std::vector<Candidate> &local_queues_array,
std::vector<idi> &local_queues_ends,
const idi queue_base,
const int real_threads,
const idi local_queue_length,
// std::vector<Candidate> &set_L,
const idi L);
idi merge_all_queues_all_together_in_sequential(
std::vector<Candidate> &set_L,
std::vector<idi> &local_queues_ends,
const idi local_queue_length,
const idi L);
idi min_all_queues_at_heads(
const std::vector<Candidate> &set_L,
std::vector<idi> &queue_heads,
const std::vector<idi> &local_queues_ends,
const idi local_queue_length,
const idi L);
public:
// For Profiling
// L3CacheMissRate cache_miss_kernel;
uint64_t count_distance_computation_ = 0;
// uint64_t count_single_query_computation_ = 0;
// distf dist_min_ = 0;
// distf dist_max_ = 0;
double time_merge_ = 0;
// double time_initialization_ = 0;
// double time_sequential_phase_ = 0;
// double time_parallel_phase_ = 0;
// double time_insert_ = 0;
// double time_compare_minimum_ = 0;
// L3CacheMissRate profile_miss_rate;
// uint64_t number_local_elements_ = 0;
~Searching()
{
free(data_load_);
data_load_ = nullptr;
// free(queries_load_);
// _mm_free(data_load_);
free(queries_load_);
queries_load_ = nullptr;
// free(norms_);
// free(nsg_graph_indices_);
// free(nsg_graph_out_edges_);
free(opt_nsg_graph_);
opt_nsg_graph_ = nullptr;
}
void load_data_load(char *filename);
void load_queries_load(char *filename);
void load_nsg_graph(char *filename);
// void build_opt_graph();
void prepare_init_ids(
std::vector<unsigned> &init_ids,
const unsigned L) const;
// void prepare_candidate_queue_list(
// const float *query_load,
// std::vector<std::vector<efanna2e::Neighbor> > &retset_list,
// std::vector<boost::dynamic_bitset<> > &is_visited_list,
// const std::vector<unsigned> &init_ids,
// const boost::dynamic_bitset<> &flags,
// unsigned batch_start,
// unsigned batch_size,
// unsigned L);
// void search_in_batch(
//// const float *query_load,
// size_t K,
// size_t L,
// unsigned batch_start,
// unsigned batch_size,
// std::vector< std::vector<Candidate> > &set_L_list,
// std::vector< boost::dynamic_bitset<> > &is_visited_list,
// const std::vector<idi> &init_ids,
// const boost::dynamic_bitset<> &is_visited,
// std::vector<std::vector<idi> > &set_K_list);
void search_in_sequential(
idi query_id,
idi K,
idi L,
std::vector<Candidate> &set_L,
// boost::dynamic_bitset<> &is_visited,
// boost::dynamic_bitset<> is_visited,
// std::vector<idi> &init_ids,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K);
// void search_in_sequential_BitVector(
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// idi get_out_degree(idi v_id) const
// {
// if (v_id < num_v_ - 1) {
// return nsg_graph_indices_[v_id + 1] - nsg_graph_indices_[v_id];
// } else {
// return num_e_ - nsg_graph_indices_[v_id];
// }
// }
void search_with_top_m(
idi M,
idi query_id,
idi K,
idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K);
// std::vector< std::vector<idi> > &top_m_list);
void search_with_top_m_scale_m(
const PANNS::idi value_M_max,
const PANNS::idi query_id,
const PANNS::idi K,
const PANNS::idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited);
// void search_with_top_m_myths_M(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// void search_with_top_m_to_get_distance_range(
// const PANNS::idi M,
// const PANNS::idi query_id,
//// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids);
// void search_with_top_m_profile_bit_CAS(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// void search_with_top_m_no_local_arrays(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// boost::dynamic_bitset<> &is_visited);
void search_with_top_m_in_batch(
PANNS::idi M,
PANNS::idi batch_start,
PANNS::idi batch_size,
PANNS::idi K,
PANNS::idi L,
std::vector< std::vector<Candidate> > &set_L_list,
const std::vector<idi> &init_ids,
std::vector< std::vector<idi> > &set_K_list);
// void para_search_with_top_m_critical_area(
// idi M,
// idi query_id,
// idi K,
// idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// void para_search_with_top_m_critical_area_no_omp(
// idi M,
// idi query_id,
// idi K,
// idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// void para_search_with_top_m_critical_area_yes_omp(
// idi M,
// idi query_id,
// idi K,
// idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// void para_search_with_top_m_visited_array(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// std::vector<uint8_t> &is_visited);
// void para_search_with_top_m_merge_queues(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// void para_search_with_top_m_queues_seq_merge(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K);
// void para_search_with_top_m_merge_queues_no_CAS(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length,
// std::vector< std::vector<Candidate> > &local_queues_list,
// std::vector<idi> &local_queues_ends,
//// std::vector<uint8_t> &is_visited);
// boost::dynamic_bitset<> &is_visited);
// void para_search_with_top_m_merge_queues_in_array(
// void para_search_with_top_m_merge_queues_new_threshold(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
//// std::vector< std::vector<Candidate> > &local_queues_list,
// std::vector<Candidate> &local_queues_array,
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// BitVector &is_visited);
// void para_search_with_top_m_merge_queues_by_sort(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
//// std::vector<Candidate> &local_queues_array,
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// std::vector<idi> &dest_offsets,
// const std::vector<idi> &offsets_load_set_L, // Offsets for store into set_L.
// BitVector &is_visited);
void para_search_with_top_m_merge_queues_better_merge_v0(
const idi M,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
std::vector<idi> &local_queues_ends, // Sizes of local queue
// std::vector<Candidate> &top_m_candidates,
std::vector<idi> &top_m_candidates,
// std::vector<uint8_t> &is_visited);
boost::dynamic_bitset<> &is_visited);
// BitVector &is_visited);
void para_search_with_top_m_merge_queues_better_merge_v2(
const idi M,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
std::vector<idi> &local_queues_ends, // Sizes of local queue
// std::vector<Candidate> &top_m_candidates,
std::vector<idi> &top_m_candidates,
// std::vector<uint8_t> &is_visited)
boost::dynamic_bitset<> &is_visited,
std::vector<distf> &local_thresholds);
// BitVector &is_visited)
void para_search_with_top_m_merge_queues_better_merge_v1(
const idi M,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
std::vector<idi> &local_queues_ends, // Sizes of local queue
std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
// std::vector<uint8_t> &is_visited);
boost::dynamic_bitset<> &is_visited);
// BitVector &is_visited);
void para_search_with_top_m_merge_queues_better_merge_v0_0(
const idi M,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
std::vector<idi> &local_queues_ends, // Sizes of local queue
// std::vector<Candidate> &top_m_candidates,
std::vector<idi> &top_m_candidates,
// std::vector<uint8_t> &is_visited)
boost::dynamic_bitset<> &is_visited);
// BitVector &is_visited)
void para_search_with_top_m_merge_queues_less_merge(
const idi M,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
std::vector<idi> &local_queues_ends, // Sizes of local queue
// std::vector<Candidate> &top_m_candidates,
std::vector<idi> &top_m_candidates,
// std::vector<uint8_t> &is_visited)
boost::dynamic_bitset<> &is_visited,
std::vector<distf> &local_thresholds);
// BitVector &is_visited)
void para_search_with_top_m_merge_queues_no_merge(
const idi M,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
std::vector<idi> &local_queues_ends, // Sizes of local queue
// std::vector<Candidate> &top_m_candidates,
std::vector<idi> &top_m_candidates,
// std::vector<uint8_t> &is_visited)
boost::dynamic_bitset<> &is_visited,
std::vector<distf> &local_thresholds,
const uint64_t computation_threshold);
void para_search_with_top_m_merge_queues_scale_m_v0(
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
std::vector<idi> &local_queues_ends, // Sizes of local queue
// std::vector<Candidate> &top_m_candidates,
std::vector<idi> &top_m_candidates,
// std::vector<uint8_t> &is_visited);
boost::dynamic_bitset<> &is_visited);
void para_search_with_top_m_merge_queues_middle_m(
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
std::vector<idi> &local_queues_ends, // Sizes of local queue
// std::vector<Candidate> &top_m_candidates,
std::vector<idi> &top_m_candidates,
// std::vector<uint8_t> &is_visited)
boost::dynamic_bitset<> &is_visited);
// std::vector<distf> &local_thresholds);
// BitVector &is_visited)
void para_search_with_top_m_merge_queues_scale_m_v2(
const idi value_M_min,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
std::vector<idi> &local_queues_ends, // Sizes of local queue
// std::vector<Candidate> &top_m_candidates,
std::vector<idi> &top_m_candidates,
// std::vector<uint8_t> &is_visited)
boost::dynamic_bitset<> &is_visited);
void para_search_with_top_m_merge_queues_scale_m_v3(
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
std::vector<idi> &local_queues_ends, // Sizes of local queue
// std::vector<Candidate> &top_m_candidates,
std::vector<idi> &top_m_candidates,
// std::vector<uint8_t> &is_visited)
boost::dynamic_bitset<> &is_visited);
void para_search_with_top_m_merge_queues_middle_m_no_merge(
const uint64_t computation_threshold,
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
const idi init_size,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
std::vector<idi> &local_queues_ends, // Sizes of local queue
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited);
void para_search_with_top_m_merge_queues_sequential_merge(
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
std::vector<idi> &local_queues_ends, // Sizes of local queue
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited);
void para_search_with_top_m_nested_para(
const idi batch_start,
const idi batch_size,
const idi value_M_middle,
const idi value_M_max,
const idi K,
const idi L,
std::vector< std::vector<Candidate> > &set_L_list,
const std::vector<idi> &init_ids,
std::vector< std::vector<idi> > &set_K_list,
const idi local_queue_length, // Maximum size of local queue
const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
std::vector< std::vector<idi> > &local_queues_ends_list, // Sizes of local queue
std::vector< std::vector<idi> > &top_m_candidates_list,
std::vector< boost::dynamic_bitset<> > &is_visited_list);
// void para_search_with_top_m_merge_queues_global_threshold(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// std::vector<idi> &top_m_candidates,
// boost::dynamic_bitset<> &is_visited);
// void para_search_with_top_m_merge_queues_distance_threshold_m(
//// const idi value_M_middle,
//// const idi value_M_max,
// const distf relative_dist_threshold,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited);
//// std::vector<distf> &local_thresholds)
//// BitVector &is_visited)
// void para_search_with_top_m_merge_queues_distance_threshold_m_middle_iteration(
//// const idi value_M_middle,
//// const idi value_M_max,
// const distf relative_dist_threshold,
// const idi middle_iteration,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited);
// void para_search_with_top_m_merge_queues_collectors(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited);
// void para_search_with_top_m_merge_queues_selecting(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited);
// void para_search_with_top_m_merge_queues_myths(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
//// std::vector< std::vector<Candidate> > &local_queues_list,
// std::vector<Candidate> &local_queues_array,
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// BitVector &is_visited);
//// std::vector<uint8_t> &is_visited);
//// boost::dynamic_bitset<> &is_visited);
//// void para_prepare_init_ids(
//// std::vector<unsigned> &init_ids,
//// unsigned L) const;
// void para_search_with_top_m_in_batch_embarassing_para(
// const PANNS::idi M,
// const PANNS::idi batch_start,
// const PANNS::idi batch_size,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector< std::vector<Candidate> > &set_L_list,
// const std::vector<idi> &init_ids,
// std::vector< std::vector<idi> > &set_K_list,
// std::vector< boost::dynamic_bitset<> > &is_visited_list);
void test_neighbors_distance_to_father(
const idi num_selected) const;
void test_neighbors_normalized_distance_to_father(
const idi num_selected) const;
void load_true_NN(
const char *filename,
std::vector< std::vector<idi> > &true_nn_list);
void get_recall_for_all_queries(
const std::vector< std::vector<idi> > &true_nn_list,
const std::vector<std::vector<unsigned>> &set_K_list,
std::unordered_map<unsigned, double> &recalls) const;
}; // Class Searching
/**
* Input the data from the file.
* @param filename
*/
inline void Searching::load_data_load(char *filename)
{
auto old_d = dimension_;
DiskIO::load_data(
filename,
data_load_,
num_v_,
dimension_);
if (old_d) {
if (old_d != dimension_) {
std::cerr << "Error: data dimension " << dimension_
<< " is not equal to query dimension " << old_d << "." << std::endl;
exit(EXIT_FAILURE);
}
}
}
/**
* Input queries from the file.
* @param filename
*/
inline void Searching::load_queries_load(char *filename)
{
auto old_d = dimension_;
DiskIO::load_data(
filename,
queries_load_,
num_queries_,
dimension_);
if (old_d) {
if (old_d != dimension_) {
std::cerr << "Error: query dimension " << dimension_
<< " is not equal to data dimension " << old_d << "." << std::endl;
exit(EXIT_FAILURE);
}
}
}
/**
* Input the NSG graph from the file.
* Reference: https://github.com/ZJULearning/nsg/blob/master/src/index_nsg.cpp
* @param filename
*/
inline void Searching::load_nsg_graph(char *filename)
{
std::ifstream fin(filename);
if (!fin.is_open()) {
std::cerr << "Error: cannot read file " << filename << " ." << std::endl;
exit(EXIT_FAILURE);
}
fin.read(reinterpret_cast<char *>(&width_), sizeof(unsigned));
fin.read(reinterpret_cast<char *>(&ep_), sizeof(unsigned));
data_bytes_ = (1 + dimension_) * sizeof(dataf);
neighbor_bytes_ = (1 + width_) * sizeof(idi);
vertex_bytes_ = data_bytes_ + neighbor_bytes_;
opt_nsg_graph_ = (char *) malloc(num_v_ * vertex_bytes_);
if (!opt_nsg_graph_) {
std::cerr << "Error: no enough memory for opt_nsg_graph_." << std::endl;
exit(EXIT_FAILURE);
}
idi v_id = 0;
num_e_ = 0;
char *base_location = opt_nsg_graph_;
while (true) {
idi degree;
fin.read(reinterpret_cast<char *>(°ree), sizeof(unsigned));
if (fin.eof()) {
break;
}
num_e_ += degree;
// std::vector<idi> tmp_ngbrs(degree);
// fin.read(reinterpret_cast<char *>(tmp_ngbrs.data()), degree * sizeof(unsigned));
// Norm and data
distf norm = compute_norm(data_load_ + v_id * dimension_);
// distf norm = compute_norm(v_id);
std::memcpy(base_location, &norm, sizeof(distf)); // Norm
memcpy(base_location + sizeof(distf), data_load_ + v_id * dimension_, dimension_ * sizeof(dataf)); // Data
base_location += data_bytes_;
// Neighbors
memcpy(base_location, °ree, sizeof(idi)); // Number of neighbors
fin.read(base_location + sizeof(idi), degree * sizeof(unsigned)); // Neighbors
// memcpy(location + sizeof(idi), tmp_ngbrs.data(), degree * sizeof(unsigned));
base_location += neighbor_bytes_;
++v_id;
}
if (v_id != num_v_) {
std::cerr << "Error: NSG data has " << v_id
<< " vertices, but origin data has " << num_v_ << " vertices." << std::endl;
exit(EXIT_FAILURE);
}
free(data_load_);
data_load_ = nullptr;
// ////////////////////////
// idi v_id = 0;
// num_e_ = 0;
// while (true) {
// idi degree;
// fin.read(reinterpret_cast<char *>(°ree), sizeof(unsigned));
// if (fin.eof()) {
// break;
// }
// num_e_ += degree;
//
// std::vector<idi> ngbrs(degree);
// fin.read(reinterpret_cast<char *>(ngbrs.data()), degree * sizeof(unsigned));
//// nsg_graph_.push_back(ngbrs);
//// tmp_edge_list.push_back(ngbrs);
// edge_list_.push_back(ngbrs);
// ++v_id;
// }
// if (v_id != num_v_) {
// std::cerr << "Error: NSG data has " << v_id
// << " vertices, but origin data has " << num_v_ << " vertices." << std::endl;
// exit(EXIT_FAILURE);
// }
}
/**
* Load those true top-K neighbors (ground truth) of queries
* @param filename
* @param[out] true_nn_list
*/
inline void Searching::load_true_NN(
const char *filename,
std::vector< std::vector<idi> > &true_nn_list)
// unsigned &t_K)
{
std::ifstream fin(filename);
if (!fin.is_open()) {
fprintf(stderr, "Error: cannot open file %s\n", filename);
exit(EXIT_FAILURE);
}
idi t_query_num;
idi t_K;
// unsigned t_K;
fin.read(reinterpret_cast<char *>(&t_query_num), sizeof(t_query_num));
fin.read(reinterpret_cast<char *>(&t_K), sizeof(t_K));
// if (t_query_num != query_num) {
// fprintf(stderr, "Error: query_num %u is not equal to the record %u in true-NN file %s\n",
// query_num, t_query_num, filename);
// exit(EXIT_FAILURE);
// }
if (t_query_num < num_queries_) {
fprintf(stderr, "Error: t_query_num %u is smaller than num_queries_ %u\n", t_query_num, num_queries_);
exit(EXIT_FAILURE);
}
if (t_K < 100) {
fprintf(stderr, "Error: t_K %u is smaller than 100.\n", t_K);
exit(EXIT_FAILURE);
}
// data = new unsigned[(size_t) t_query_num * (size_t) t_K];
true_nn_list.resize(t_query_num);
for (idi q_i = 0; q_i < t_query_num; ++q_i) {
true_nn_list[q_i].resize(t_K);
}
for (unsigned q_i = 0; q_i < t_query_num; ++q_i) {
// size_t offset = q_i * t_K;
for (unsigned n_i = 0; n_i < t_K; ++n_i) {
unsigned id;
float dist;
fin.read(reinterpret_cast<char *>(&id), sizeof(id));
fin.read(reinterpret_cast<char *>(&dist), sizeof(dist));
// data[offset + n_i] = id;
true_nn_list[q_i][n_i] = id;
}
}
fin.close();
}
inline void Searching::get_recall_for_all_queries(
const std::vector< std::vector<idi> > &true_nn_list,
const std::vector<std::vector<unsigned>> &set_K_list,
std::unordered_map<unsigned, double> &recalls) const
{
// if (t_K < 100) {
// fprintf(stderr, "Error: t_K %u is smaller than 100.\n", t_K);
// exit(EXIT_FAILURE);
// }
if (true_nn_list[0].size() < 100) {
fprintf(stderr, "Error: Number of true nearest neighbors of a query is smaller than 100.\n");
exit(EXIT_FAILURE);
}
recalls[1] = 0.0;
recalls[5] = 0.0;
recalls[10] = 0.0;
recalls[20] = 0.0;
recalls[50] = 0.0;
recalls[100] = 0.0;
for (unsigned q_i = 0; q_i < num_queries_; ++q_i) {
// size_t offset = q_i * t_K;
for (unsigned top_i = 0; top_i < 100; ++top_i) {
unsigned true_id = true_nn_list[q_i][top_i];
for (unsigned n_i = 0; n_i < 100; ++n_i) {
if (set_K_list[q_i][n_i] == true_id) {
if (n_i < 1) recalls[1] += 1;
if (n_i < 5) recalls[5] += 1;
if (n_i < 10) recalls[10] += 1;
if (n_i < 20) recalls[20] += 1;
if (n_i < 50) recalls[50] += 1;
if (n_i < 100) recalls[100] += 1;
}
}
}
}
recalls[1] /= 1.0 * num_queries_;
recalls[5] /= 5.0 * num_queries_;
recalls[10] /= 10.0 * num_queries_;
recalls[20] /= 20.0 * num_queries_;
recalls[50] /= 50.0 * num_queries_;
recalls[100] /= 100.0 * num_queries_;
}
inline void Searching::search_in_sequential(
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K)
{
// {//test
// printf("Iteration: Relative_Distance:\n");
//// printf("Iteration: Relative_Distance:\n");
//// printf("----query: %u----\n", query_id);
// }
boost::dynamic_bitset<> is_visited(num_v_);
for (idi v_i = 0; v_i < L; ++v_i) {
is_visited[init_ids[v_i]] = true;
}
const dataf *query_data = queries_load_ + query_id * dimension_;
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L.begin(), set_L.begin() + L);
idi k = 0; // Index of every queue's first unchecked candidate.
idi tmp_count = 0; // for debug
// {// Print relative distance
//// distf top_dist = set_L[0].distance_;
// for (idi i_l = 0; i_l < L; ++i_l) {
// printf("%u %f\n",
// tmp_count, set_L[i_l].distance_);
//// tmp_count, set_L[i_l].distance_ - top_dist);
// }
// }
while (k < L) {
Candidate &top_cand = set_L[k];
unsigned nk = L;
if (!top_cand.is_checked_) {
++tmp_count;
top_cand.is_checked_ = true;
idi v_id = top_cand.id_; // Vertex ID.
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
// Traverse v_id's all neighbors, pushing them into the queue
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = true;
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// Compute the distance
++count_distance_computation_;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
// if (dist >= set_L[L-1].distance_) {
// continue;
// }
Candidate cand(nb_id, dist, false);
// Insert into the queue
idi r = insert_into_queue(set_L, L, cand);
if (r < nk) {
nk = r;
}
}
// {// Print relative distance
//// distf top_dist = set_L[0].distance_;
// for (idi i_l = 0; i_l < L; ++i_l) {
// printf("%u %f\n",
// tmp_count, set_L[i_l].distance_);
//// tmp_count, set_L[i_l].distance_ - top_dist);
// }
// }
}
if (nk <= k) {
k = nk;
} else {
++k;
}
}
// cache_miss_kernel.measure_stop();
for (size_t k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i].id_;
}
// {//test
// if (0 == query_id) {
// exit(1);
// }
// }
}
//inline void Searching::search_in_sequential_BitVector(
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//{
//// boost::dynamic_bitset<> is_visited(num_v_);
// BitVector is_visited(num_v_);
//
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
//// is_visited[init_ids[v_i]] = true;
// is_visited.atomic_set_bit(init_ids[v_i]);
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
// idi k = 0; // Index of every queue's first unchecked candidate.
// while (k < L) {
// Candidate &top_cand = set_L[k];
// unsigned nk = L;
// if (!top_cand.is_checked_) {
// top_cand.is_checked_ = true;
// idi v_id = top_cand.id_; // Vertex ID.
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// // Traverse v_id's all neighbors, pushing them into the queue
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = true;
//
// {// Self-defined BitVector
// if (is_visited.atomic_is_bit_set(nb_id)) {
// continue;
// }
// is_visited.atomic_set_bit(nb_id);
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// // Compute the distance
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// // Insert into the queue
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// if (nk <= k) {
// k = nk;
// } else {
// ++k;
// }
// }
//// cache_miss_kernel.measure_stop();
//#pragma omp parallel for
// for (size_t k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//}
/**
* Prepare init_ids and flags, as they are constant for all queries.
* @param[out] init_ids
* @param L
*/
inline void Searching::prepare_init_ids(
std::vector<unsigned int> &init_ids,
const unsigned L) const
{
// idi num_ngbrs = get_out_degree(ep_);
// edgei edge_start = nsg_graph_indices_[ep_];
// // Store ep_'s neighbors as candidates
// idi tmp_l = 0;
// for (; tmp_l < L && tmp_l < num_ngbrs; tmp_l++) {
// init_ids[tmp_l] = nsg_graph_out_edges_[edge_start + tmp_l];
// }
// std::unordered_set<idi> visited_ids;
boost::dynamic_bitset<> is_selected(num_v_);
idi *out_edges = (idi *) (opt_nsg_graph_ + ep_ * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
idi init_ids_end = 0;
// for (; tmp_l < L && tmp_l < out_degree; tmp_l++) {
for (idi e_i = 0; e_i < out_degree && init_ids_end < L; ++e_i) {
// idi v_id = out_edges[tmp_l];
idi v_id = out_edges[e_i];
if(is_selected[v_id]) {
continue;
}
is_selected[v_id] = true;
// init_ids[tmp_l] = v_id;
init_ids[init_ids_end++] = v_id;
// init_ids[tmp_l] = out_edges[tmp_l];
// visited_ids.insert(init_ids[tmp_l]);
}
// for (idi i = 0; i < tmp_l; ++i) {
// is_visited[init_ids[i]] = true;
// }
// If ep_'s neighbors are not enough, add other random vertices
idi tmp_id = ep_ + 1; // use tmp_id to replace rand().
while (init_ids_end < L) {
tmp_id %= num_v_;
idi v_id = tmp_id++;
if (is_selected[v_id]) {
continue;
}
// if (visited_ids.find(id) != visited_ids.end()) {
// continue;
// }
is_selected[v_id] = true;
// visited_ids.insert(id);
init_ids[init_ids_end++] = v_id;
// tmp_l++;
}
}
// TODO: re-code in AVX-512
inline dataf Searching::compute_norm(
const dataf *data) const
// idi vertex_id)
// const std::vector<PANNS::dataf> &data)
// size_t loc_start,
// idi dimension)
{
// const dataf *a = data.data() + loc_start;
// const dataf *a = data_load_ + vertex_id * dimension_;
// idi size = dimension_;
dataf result = 0;
//#define AVX_L2NORM(addr, dest, tmp) \
// tmp = _mm256_load_ps(addr); \
// tmp = _mm256_mul_ps(tmp, tmp); \
// dest = _mm256_add_ps(dest, tmp);
#define AVX_L2NORM(addr, dest, tmp) \
tmp = _mm256_loadu_ps(addr); \
tmp = _mm256_mul_ps(tmp, tmp); \
dest = _mm256_add_ps(dest, tmp);
__m256 sum;
__m256 l0, l1;
unsigned D = (dimension_ + 7) & ~7U;
unsigned DR = D % 16;
unsigned DD = D - DR;
const float *l = data;
const float *e_l = l + DD;
float unpack[8] __attribute__ ((aligned (32))) = {0, 0, 0, 0, 0, 0, 0, 0};
sum = _mm256_load_ps(unpack);
// sum = _mm256_loadu_ps(unpack);
if (DR) { AVX_L2NORM(e_l, sum, l0); }
for (unsigned i = 0; i < DD; i += 16, l += 16) {
AVX_L2NORM(l, sum, l0);
AVX_L2NORM(l + 8, sum, l1);
}
_mm256_store_ps(unpack, sum);
// _mm256_storeu_ps(unpack, sum);
result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] + unpack[5] + unpack[6] + unpack[7];
return result;
}
inline dataf Searching::compute_distance_with_norm(
const dataf *v_data,
const dataf *q_data,
// idi vertex_id,
// idi query_id,
// const std::vector<PANNS::dataf> &d_data,
// const std::vector<PANNS::dataf> &q_data,
// PANNS::idi d_start,
// PANNS::idi q_start,
const dataf vertex_norm) const
// idi dimension)
{
// idi size = dimension_;
float result = 0;
//#define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \
// tmp1 = _mm256_load_ps(addr1);\
// tmp2 = _mm256_load_ps(addr2);\
// tmp1 = _mm256_mul_ps(tmp1, tmp2); \
// dest = _mm256_add_ps(dest, tmp1);
#define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \
tmp1 = _mm256_loadu_ps(addr1);\
tmp2 = _mm256_loadu_ps(addr2);\
tmp1 = _mm256_mul_ps(tmp1, tmp2); \
dest = _mm256_add_ps(dest, tmp1);
__m256 sum;
__m256 l0, l1;
__m256 r0, r1;
unsigned D = (dimension_ + 7) & ~7U;
unsigned DR = D % 16;
unsigned DD = D - DR;
const float *l = v_data;
const float *r = q_data;
// const float *l = (float *) (opt_nsg_graph_ + vertex_id * vertex_bytes_ + sizeof(distf));
// const float *r = queries_load_ + query_id * dimension_;
const float *e_l = l + DD;
const float *e_r = r + DD;
float unpack[8] __attribute__ ((aligned (32))) = {0, 0, 0, 0, 0, 0, 0, 0};
sum = _mm256_load_ps(unpack);
// sum = _mm256_loadu_ps(unpack);
if (DR) { AVX_DOT(e_l, e_r, sum, l0, r0); }
for (unsigned i = 0; i < DD; i += 16, l += 16, r += 16) {
AVX_DOT(l, r, sum, l0, r0);
AVX_DOT(l + 8, r + 8, sum, l1, r1);
}
_mm256_store_ps(unpack, sum);
// _mm256_storeu_ps(unpack, sum);
result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] + unpack[5] + unpack[6] + unpack[7];
result = -2 * result + vertex_norm;
return result;
}
//// DEPRECATED.
// The difference from insert_into_queue is that add_into_queue will increase the queue size by 1.
//inline idi Searching::add_into_queue(
// std::vector<PANNS::Candidate> &queue,
// idi &queue_top,
// const idi queue_size,
// const PANNS::Candidate &cand)
//{
// assert(queue_size > 1);
// if (0 == queue_top) {
// queue[queue_top++] = cand;
// return 0;
// } else if (1 == queue_top) {
// if (queue[0] < cand) {
// queue[queue_top++] = cand;
// return 1;
// } else {
// queue[++queue_top] = queue[0];
// queue[0] = cand;
// return 0;
// }
// }
//
// if (queue[queue_top - 1] < cand) {
// if (queue_top < queue_size) {
// queue[queue_top++] = cand;
// }
// return queue_top;
// }
//
// idi r = insert_into_queue(
// queue,
// queue_top - 1,
// cand);
//// {//test
//// printf("r: %u"
//// "queue_top: %u "
//// "queue_size: %u\n",
//// r,
//// queue_top,
//// queue_size);
//// }
// return r;
//
//// /////////////////////////////////////////////////////////////
//// // Find the insert location
//// auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_top, cand);
//// idi insert_loc = it_loc - queue.begin();
//// if (insert_loc == queue_size) {
//// return queue_size;
//// }
////
//// // Insert
////// if (queue_top == queue_size) {
////// // If full already
////// --queue_top;
////// }
//// memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1),
//// reinterpret_cast<char *>(queue.data() + insert_loc),
//// (queue_top - insert_loc) * sizeof(Candidate));
////// for (idi q_i = queue_top; q_i > insert_loc; --q_i) {
////// queue.at(q_i) = queue.at(q_i - 1);
////// }
//// queue[insert_loc] = cand;
//// ++queue_top;
//// return insert_loc;
//}
// The difference from insert_into_queue is that add_into_queue will increase the queue size by 1.
inline idi Searching::add_into_queue(
std::vector<PANNS::Candidate> &queue,
idi &queue_top,
const idi queue_size,
const PANNS::Candidate &cand)
{
if (0 == queue_top) {
queue[queue_top++] = cand;
return 0;
}
// Find the insert location
auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_top, cand);
idi insert_loc = it_loc - queue.begin();
if (insert_loc == queue_size) {
return queue_size;
}
// Insert
if (queue_top == queue_size) {
// If full already
--queue_top;
}
memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1),
reinterpret_cast<char *>(queue.data() + insert_loc),
(queue_top - insert_loc) * sizeof(Candidate));
// for (idi q_i = queue_top; q_i > insert_loc; --q_i) {
// queue.at(q_i) = queue.at(q_i - 1);
// }
queue[insert_loc] = cand;
++queue_top;
return insert_loc;
}
// The difference from insert_into_queue is that add_into_queue will increase the queue size by 1.
// add_into_queue with a queue_start
inline idi Searching::add_into_queue(
std::vector<PANNS::Candidate> &queue,
const idi queue_start,
idi &queue_top, // The insertion location starting from queue_start
const idi queue_size, // The maximum capacity of queue, independent with queue_start.
const PANNS::Candidate &cand)
{
if (0 == queue_top) {
queue[queue_start + queue_top++] = cand;
return 0;
}
idi queue_end = queue_start + queue_top;
// Find the insert location
auto it_loc = std::lower_bound(queue.begin() + queue_start, queue.begin() + queue_end, cand);
// auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_top, cand);
idi insert_loc = it_loc - queue.begin();
if (queue_top < queue_size) {
// Queue is not full
if (insert_loc == queue_end) {
// Insert at the end
queue[insert_loc] = cand;
++queue_top;
return queue_top - 1;
}
} else {
// Queue is full
if (insert_loc == queue_end) {
return queue_size;
}
--queue_top;
--queue_end;
}
if (cand.id_ == it_loc->id_) {
// Duplicate
return queue_size;
}
// Add into queue
memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1),
reinterpret_cast<char *>(queue.data() + insert_loc),
(queue_end - insert_loc) * sizeof(Candidate));
queue[insert_loc] = cand;
++queue_top;
return insert_loc - queue_start;
// ////////////////
// if (insert_loc == queue_size + queue_start) {
// return queue_size;
// }
//
// if (cand.id_ == it_loc->id_) {
// // Duplicate
// return queue_size;
// }
//
// // Insert
// if (queue_top == queue_size) {
// // If full already
// --queue_top;
// --queue_end;
// }
// memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1),
// reinterpret_cast<char *>(queue.data() + insert_loc),
// (queue_end - insert_loc) * sizeof(Candidate));
// queue[insert_loc] = cand;
// ++queue_top;
// return insert_loc - queue_start;
}
inline void Searching::add_into_queue_at(
const Candidate &cand,
std::vector<Candidate> &queue,
const idi insert_index, // The insertion location, independent with queue_start
const idi queue_start,
idi &queue_size, // The number of elements in queue, independent with queue_start
const idi queue_length) // The maximum capacity of queue, independent with queue_start.
{
const idi dest_index = queue_start + insert_index;
if (queue_size == queue_length) {
--queue_size;
}
memmove(reinterpret_cast<char *>(queue.data() + dest_index + 1),
reinterpret_cast<char *>(queue.data() + dest_index),
(queue_size - insert_index) * sizeof(Candidate));
queue[dest_index] = cand;
++queue_size;
}
inline void Searching::insert_one_element_at(
// const T &cand,
// T *queue_base,
const Candidate &cand,
std::vector<Candidate> &queue,
const idi insert_index,
const idi queue_start,
const idi queue_size)
{
const idi dest_index = queue_start + insert_index;
memmove(reinterpret_cast<char *>(queue.data() + dest_index + 1),
reinterpret_cast<char *>(queue.data() + dest_index),
(queue_size - insert_index - 1) * sizeof(Candidate));
queue[dest_index] = cand;
// memmove(reinterpret_cast<char *>(queue_base + dest_index + 1),
// reinterpret_cast<char *>(queue_base + dest_index),
// (queue_size - insert_index - 1) * sizeof(T));
// for (idi q_i = queue_size - 1; q_i > insert_index; --q_i) {
// queue_base.at(q_i + queue_start) = queue_base.at(q_i - 1 + queue_start);
// }
// queue_base[dest_index] = cand;
}
/**
* PANNS version of InsertIntoPool(): binary-search to find the insert place and then move.
* @param[out] c_queue
* @param c_queue_top
* @param cand
* @return
*/
inline idi Searching::insert_into_queue(
std::vector<PANNS::Candidate> &c_queue,
PANNS::idi c_queue_top,
PANNS::Candidate cand)
{
if (c_queue[0].distance_ > cand.distance_) {
// If the first
memmove(reinterpret_cast<char *>(c_queue.data() + 1),
reinterpret_cast<char *>(c_queue.data()),
c_queue_top * sizeof(Candidate));
c_queue[0] = cand;
return 0;
} else if (c_queue[c_queue_top - 1].distance_ == cand.distance_) {
// If the last
if (c_queue[c_queue_top - 1].id_ > cand.id_) {
// Use ID as the second metrics for ordering
c_queue[c_queue_top - 1] = cand;
return c_queue_top - 1;
} else {
return c_queue_top;
}
}
idi left = 0;
idi right = c_queue_top;
while (left < right) {
idi mid = (right - left) / 2 + left;
if (c_queue[mid].distance_ > cand.distance_) {
right = mid;
} else {
left = mid + 1;
}
}
// If the distance is the same
if (0 != left && c_queue[left - 1].distance_ != cand.distance_) {
;
} else {
while (0 != left
&& c_queue[left - 1].distance_ == cand.distance_
&& c_queue[left - 1].id_ > cand.id_) {
// Use ID as the second metrics for ordering
--left;
}
}
// Insert to left
memmove(reinterpret_cast<char *>(c_queue.data() + left + 1),
reinterpret_cast<char *>(c_queue.data() + left),
(c_queue_top - left) * sizeof(Candidate));
c_queue[left] = cand;
return left;
}
//inline void Searching::cand_pushes_ngbrs_into_queue(
// idi cand_id,
// const dataf *query_data,
// idi L,
// idi &new_k,
// boost::dynamic_bitset<> &is_visited,
// std::vector<Candidate> &set_L)
//{
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist >= set_L[L-1].distance_) {
// continue;
// }
// Candidate cand(nb_id, dist, false);
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
//}
//inline void Searching::search_in_sequential(
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K) const
//{
// boost::dynamic_bitset<> is_visited(num_v_);
//
// for (idi v_i = 0; v_i < L; ++v_i) {
// is_visited[init_ids[v_i]] = true;
// }
// const dataf *query_data = queries_load_ + query_id * dimension_;
//
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
// idi k = 0; // Index of every queue's first unchecked candidate.
// while (k < L) {
// Candidate &top_cand = set_L[k];
// unsigned nk = L;
// if (!top_cand.is_checked_) {
// top_cand.is_checked_ = true;
// idi v_id = top_cand.id_; // Vertex ID.
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// // Traverse v_id's all neighbors, pushing them into the queue
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// // Compute the distance
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
// Candidate cand(nb_id, dist, false);
// // Insert into the queue
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// if (nk <= k) {
// k = nk;
// } else {
// ++k;
// }
// }
//
// for (size_t k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//}
// Deprecated: cannot use std::set, because its element is constant.
//inline void Searching::search_in_sequential(
// const idi query_id,
// const idi K,
// const idi L,
//// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K) const
//{
// std::set<Candidate> set_L;
// boost::dynamic_bitset<> is_visited(num_v_);
//
// for (idi v_i = 0; v_i < L; ++v_i) {
// is_visited[init_ids[v_i]] = true;
// }
// const dataf *query_data = queries_load_ + query_id * dimension_;
//
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
//// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// set_L.emplace(v_id, dist, false);
// }
//// std::sort(set_L.begin(), set_L.begin() + L);
// idi k = 0; // Index of every queue's first unchecked candidate.
// while (k < L) {
//// Candidate &top_cand = set_L[k];
// std::set<Candidate>::iterator top_cand = std::next(set_L.begin(), k);
// unsigned nk = L;
// if (!top_cand->is_checked_) {
// top_cand->is_checked_ = true;
// idi v_id = top_cand.id_; // Vertex ID.
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// // Traverse v_id's all neighbors, pushing them into the queue
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// // Compute the distance
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
// Candidate cand(nb_id, dist, false);
// // Insert into the queue
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// if (nk <= k) {
// k = nk;
// } else {
// ++k;
// }
// }
//
// for (size_t k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//}
/* Function:
* queue1_size is fixed.
*/
inline idi Searching::merge_two_queues_into_1st_queue_seq_fixed(
std::vector<Candidate> &queue1,
const idi queue1_start,
const idi queue1_size,
std::vector<Candidate> &queue2,
const idi queue2_start,
const idi queue2_size)
// const idi limit_size)
{
assert(queue1_size && queue2_size);
// Record the lowest insert location.
auto it_loc = std::lower_bound(
queue1.begin() + queue1_start,
queue1.begin() + queue1_start + queue1_size,
queue2[queue2_start]);
idi insert_index = it_loc - (queue1.begin() + queue1_start);
if (insert_index == queue1_size) {
return insert_index;
} else if (insert_index == queue1_size - 1) {
queue1[queue1_start + insert_index] = queue2[queue2_start];
return insert_index;
}
// Insert the 1st of queue2
if (queue2[queue2_start].id_ != it_loc->id_) {
// Not Duplicate
insert_one_element_at(
queue2[queue2_start],
queue1,
insert_index,
queue1_start,
queue1_size);
}
if (queue2_size == 1) {
return insert_index;
}
// Insert
idi q_i_1 = insert_index + 1 + queue1_start;
idi q_i_2 = queue2_start + 1;
const idi q_i_1_bound = queue1_start + queue1_size;
const idi q_i_2_bound = queue2_start + queue2_size;
// const idi insert_i_bound = queue1_start + limit_size;
for (idi insert_i = insert_index + 1; insert_i < queue1_size; ++insert_i) {
if (q_i_1 >= q_i_1_bound || q_i_2 >= q_i_2_bound) {
// queue1 or queue2 finished traverse. Rest o
break;
} else if (queue1[q_i_1] < queue2[q_i_2]) {
++q_i_1;
} else if (queue2[q_i_2] < queue1[q_i_1]) {
// Insert queue2[q_i_2] into queue1
insert_one_element_at(
queue2[q_i_2++],
queue1,
insert_i,
queue1_start,
queue1_size);
++q_i_1;
} else {
// Duplicate
++q_i_2;
++q_i_1;
}
}
return insert_index;
}
/* Function:
* queue1_size should be updated.
* queue1_length should be provided.
*/
inline void Searching::merge_two_queues_into_1st_queue_seq_incr(
std::vector<Candidate> &queue1,
const idi queue1_start,
idi &queue1_size, // The number of element in queue1, independent with queue1_start.
const idi queue1_length, // The maximum capacity of queue1, independent with queue1_start.
std::vector<Candidate> &queue2,
const idi queue2_start,
const idi queue2_size)
// const idi limit_size)
{
assert(queue1_size && queue2_size);
// Record the lowest insert location.
auto it_loc = std::lower_bound(
queue1.begin() + queue1_start,
queue1.begin() + queue1_start + queue1_size,
queue2[queue2_start]);
idi insert_index = it_loc - (queue1.begin() + queue1_start);
if (insert_index == queue1_size) {
idi copy_count = (queue1_size + queue2_size > queue1_length) ?
queue1_length - queue1_size :
queue2_size;
memmove(queue1.data() + queue1_start + queue1_size,
queue2.data() + queue2_start,
copy_count * sizeof(Candidate));
queue1_size += copy_count;
return;
}
if (queue2[queue2_start].id_ != it_loc->id_) {
// Not Duplicate
add_into_queue_at(
queue2[queue2_start],
queue1,
insert_index,
queue1_start,
queue1_size,
queue1_length);
}
if (queue2_size == 1) {
return;
}
// Insert
idi q_i_1 = insert_index + 1 + queue1_start;
idi q_i_2 = queue2_start + 1;
idi q_i_1_bound = queue1_start + queue1_size; // When queue1_size is updated, so should be q_i_1_bound.
const idi q_i_2_bound = queue2_start + queue2_size;
// idi insert_i;
for (idi insert_i = insert_index + 1; insert_i < queue1_length; ++insert_i) {
if (q_i_1 >= q_i_1_bound) {
queue1_size += std::min(queue1_length - insert_i, q_i_2_bound - q_i_2);
for ( ; insert_i < queue1_size; ++insert_i) {
queue1[queue1_start + insert_i] = queue2[q_i_2++];
}
break;
} else if (q_i_2 >= q_i_2_bound) {
break;
} else if (queue1[q_i_1] < queue2[q_i_2]) {
++q_i_1;
} else if (queue2[q_i_2] < queue1[q_i_1]) {
add_into_queue_at(
queue2[q_i_2++],
queue1,
insert_i,
queue1_start,
queue1_size,
queue1_length);
++q_i_1;
q_i_1_bound = queue1_start + queue1_size;
} else {
// Duplicate
++q_i_2;
++q_i_1;
}
}
}
inline idi Searching::merge_all_queues_para_list(
std::vector< std::vector<Candidate> > &local_queues_list,
std::vector<idi> &local_queues_ends,
std::vector<Candidate> &set_L,
const idi L)
{
int size = 1 << (static_cast<idi>(log2(num_threads_)));
idi log2size = static_cast<idi>(log2(size));
for (idi d = 0; d < log2size; ++d) {
uint32_t by = 1 << (d + 1);
#pragma omp parallel for
for (int i = 0; i < size; i += by) {
idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1
idi bi = i + (1 << d) - 1; // i + 2^d - 1
if (0 == local_queues_ends[bi]) {
continue;
}
if (local_queues_ends[ai] == 0) {
local_queues_list[ai].swap(local_queues_list[bi]);
std::swap(local_queues_ends[ai], local_queues_ends[bi]);
continue;
}
// else if (local_queues_ends[ai] < L && local_queues_ends[bi] >= L) {
// local_queues_list[ai].swap(local_queues_list[bi]);
// std::swap(local_queues_ends[ai], local_queues_ends[bi]);
// }
// merge_two_queues_into_1st_queue_seq(
// local_queues_list[ai],
// 0,
// local_queues_ends[ai],
// local_queues_list[bi],
// 0,
// local_queues_ends[bi]);
idi tmp_length = local_queues_ends[ai] + local_queues_ends[bi];
std::vector<Candidate> tmp_queue(tmp_length);
std::merge(
local_queues_list[ai].begin(),
local_queues_list[ai].begin() + local_queues_ends[ai],
local_queues_list[bi].begin(),
local_queues_list[bi].begin() + local_queues_ends[bi],
tmp_queue.begin());
if (tmp_length > L) {
tmp_queue.resize(L);
tmp_length = L;
} else if (tmp_length < L) {
tmp_queue.resize(L);
}
local_queues_list[ai].swap(tmp_queue);
local_queues_ends[ai] = tmp_length;
// {// Print queue a
// printf("d: %u "
// "i: %u "
// "ai: %u "
// "local_queues_ends[%d]: %d\n",
// d,
// i,
// ai,
// ai,
// local_queues_ends[ai]);
// for (idi i_q = 0; i_q < local_queues_ends[ai]; ++i_q) {
// printf("[%u]: "
// "id: %u "
// "dist: %f\n",
// i_q,
// local_queues_list[ai][i_q].id_,
// local_queues_list[ai][i_q].distance_);
// }
// }
}
}
// Remain, prefix-sum-like merge
if (size != num_threads_) {
for (int i = size; i < num_threads_; ++i) {
idi ai = i;
idi bi = i - 1;
if (0 == local_queues_ends[bi]) {
continue;
}
if (local_queues_ends[ai] == 0) {
local_queues_list[ai].swap(local_queues_list[bi]);
std::swap(local_queues_ends[ai], local_queues_ends[bi]);
continue;
}
// else if (local_queues_ends[ai] < L && local_queues_ends[bi] >= L) {
// local_queues_list[ai].swap(local_queues_list[bi]);
// std::swap(local_queues_ends[ai], local_queues_ends[bi]);
// }
// merge_two_queues_into_1st_queue_seq(
// local_queues_list[ai],
// 0,
// local_queues_ends[ai],
// local_queues_list[bi],
// 0,
// local_queues_ends[bi]);
idi tmp_length = local_queues_ends[ai] + local_queues_ends[bi];
std::vector<Candidate> tmp_queue(tmp_length);
std::merge(
local_queues_list[ai].begin(),
local_queues_list[ai].begin() + local_queues_ends[ai],
local_queues_list[bi].begin(),
local_queues_list[bi].begin() + local_queues_ends[bi],
tmp_queue.begin());
if (tmp_length > L) {
tmp_queue.resize(L);
tmp_length = L;
} else if (tmp_length < L) {
tmp_queue.resize(L);
}
local_queues_list[ai].swap(tmp_queue);
local_queues_ends[ai] = tmp_length;
}
}
// Merge into set_L
idi r = L;
if (local_queues_ends[num_threads_ - 1]) {
r = merge_two_queues_into_1st_queue_seq_fixed(
set_L,
0,
L,
local_queues_list[num_threads_ - 1],
0,
local_queues_ends[num_threads_ - 1]);
}
// Reset local_queues_ends
std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
return r;
}
/* Function:
* Use large local_queues_array as a concatenation of all queues
*/
inline idi Searching::merge_all_queues_para_array(
std::vector<Candidate> &set_L,
std::vector<idi> &local_queues_ends,
const idi local_queue_length,
const idi L)
{
const idi num_queues = num_threads_intra_;
idi nk = L;
int size = 1 << (static_cast<idi>(log2(num_queues)));
idi log2size = static_cast<idi>(log2(size));
for (idi d = 0; d < log2size; ++d) {
uint32_t by = 1 << (d + 1);
#pragma omp parallel for num_threads(num_threads_intra_)
for (int i = 0; i < size; i += by) {
idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1
idi a_start = ai * local_queue_length;
idi bi = i + (1 << d) - 1; // i + 2^d - 1
idi b_start = bi * local_queue_length;
if (0 == local_queues_ends[bi]) {
continue;
}
if (local_queues_ends[ai] == 0) {
std::copy(set_L.begin() + b_start,
set_L.begin() + b_start + local_queues_ends[bi],
set_L.begin() + a_start); // Copy bi to ai
local_queues_ends[ai] = local_queues_ends[bi];
local_queues_ends[bi] = 0;
continue;
}
if (ai != static_cast<idi>(num_queues - 1)) {
merge_two_queues_into_1st_queue_seq_incr(
set_L,
a_start,
local_queues_ends[ai],
local_queue_length,
set_L,
b_start,
local_queues_ends[bi]);
} else {
idi r = merge_two_queues_into_1st_queue_seq_fixed(
set_L,
a_start,
L,
set_L,
b_start,
local_queues_ends[bi]);
if (r < nk) {
nk = r;
}
}
}
}
// Remain, prefix-sum-like merge
if (size != num_queues) {
for (int i = size; i < num_queues; ++i) {
idi ai = i;
idi a_start = ai * local_queue_length;
idi bi = i - 1;
idi b_start = bi * local_queue_length;
if (0 == local_queues_ends[bi]) {
continue;
}
if (local_queues_ends[ai] == 0) {
std::copy(set_L.begin() + b_start,
set_L.begin() + b_start + local_queues_ends[bi],
set_L.begin() + a_start); // Copy bi to ai
local_queues_ends[ai] = local_queues_ends[bi];
local_queues_ends[bi] = 0;
continue;
}
if (ai != static_cast<idi>(num_queues - 1)) {
merge_two_queues_into_1st_queue_seq_incr(
set_L,
a_start,
local_queues_ends[ai],
local_queue_length,
set_L,
b_start,
local_queues_ends[bi]);
} else {
idi r = merge_two_queues_into_1st_queue_seq_fixed(
set_L,
a_start,
L,
set_L,
b_start,
local_queues_ends[bi]);
if (r < nk) {
nk = r;
}
}
}
}
// Reset local_queues_ends
// Not do this for Collector Idea or Selecting Idea
std::fill(local_queues_ends.begin(), local_queues_ends.end() - 1, 0);
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
return nk;
// return r;
}
/* Function:
* When merge all queues (in an array, and [num_threads_ - 1] is the global queue),
* the starting local is at [queue_base]
*/
inline idi Searching::merge_all_queues_queue_base(
// std::vector< std::vector<Candidate> > &local_queues_list,
std::vector<Candidate> &set_L,
// std::vector<Candidate> &local_queues_array,
std::vector<idi> &local_queues_ends,
const idi queue_base,
const int real_threads,
const idi local_queue_length,
// std::vector<Candidate> &set_L,
const idi L)
{
idi nk = L;
int size = 1 << (static_cast<idi>(log2(real_threads)));
// int size = 1 << (static_cast<idi>(log2(num_threads_)));
idi log2size = static_cast<idi>(log2(size));
for (idi d = 0; d < log2size; ++d) {
idi by = 1 << (d + 1);
idi i_bound = size + queue_base;
#pragma omp parallel for num_threads(real_threads)
for (idi i = queue_base; i < i_bound; i += by) {
// for (int i = 0; i < size; i += by) {
// idi ai = i + (1 << (d + 1)) - 1 + queue_base; // i + 2^(d+1) - 1
idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1
idi a_start = ai * local_queue_length;
// idi bi = i + (1 << d) - 1 + queue_base; // i + 2^d - 1
idi bi = i + (1 << d) - 1; // i + 2^d - 1
idi b_start = bi * local_queue_length;
if (0 == local_queues_ends[bi]) {
continue;
}
if (local_queues_ends[ai] == 0) {
// local_queues_list[ai].swap(local_queues_list[bi]);
std::copy(set_L.begin() + b_start,
set_L.begin() + b_start + local_queues_ends[bi],
set_L.begin() + a_start); // Copy bi to ai
local_queues_ends[ai] = local_queues_ends[bi];
local_queues_ends[bi] = 0;
continue;
}
if (ai != static_cast<idi>(num_threads_ - 1)) {
merge_two_queues_into_1st_queue_seq_incr(
set_L,
a_start,
local_queues_ends[ai],
local_queue_length,
set_L,
b_start,
local_queues_ends[bi]);
} else {
idi r = merge_two_queues_into_1st_queue_seq_fixed(
set_L,
a_start,
L,
set_L,
b_start,
local_queues_ends[bi]);
if (r < nk) {
nk = r;
}
}
}
}
// Remain, prefix-sum-like merge
if (size != real_threads) {
// if (size != num_threads_) {
for (int i = size + queue_base; i < num_threads_; ++i) {
// for (int i = size; i < num_threads_; ++i) {
idi ai = i;
idi a_start = ai * local_queue_length;
idi bi = i - 1;
idi b_start = bi * local_queue_length;
if (0 == local_queues_ends[bi]) {
continue;
}
if (local_queues_ends[ai] == 0) {
std::copy(set_L.begin() + b_start,
set_L.begin() + b_start + local_queues_ends[bi],
set_L.begin() + a_start); // Copy bi to ai
local_queues_ends[ai] = local_queues_ends[bi];
local_queues_ends[bi] = 0;
continue;
}
if (ai != static_cast<idi>(num_threads_ - 1)) {
merge_two_queues_into_1st_queue_seq_incr(
set_L,
a_start,
local_queues_ends[ai],
local_queue_length,
set_L,
b_start,
local_queues_ends[bi]);
} else {
idi r = merge_two_queues_into_1st_queue_seq_fixed(
set_L,
a_start,
L,
set_L,
b_start,
local_queues_ends[bi]);
if (r < nk) {
nk = r;
}
}
}
}
// Reset local_queues_ends
std::fill(local_queues_ends.begin(), local_queues_ends.end() - 1, 0);
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
return nk;
// return r;
}
/* Function:
* Merge all queues to the global queue, in a two-queue-merge way
*/
inline idi Searching::merge_all_queues_all_together_in_sequential(
std::vector<Candidate> &set_L,
std::vector<idi> &local_queues_ends,
const idi local_queue_length,
const idi L)
{
const idi num_queues = num_threads_;
const idi global_queue_base = (num_queues - 1) * local_queue_length;
std::vector<idi> queue_heads(num_queues, 0);
idi queue_id_min;
// bool is_finished = false;
bool is_1st_selected = true;
idi nk = L; // The highest location of insertion.
{
for (idi q_i = 0; q_i < num_queues; ++q_i) {
if (0 == local_queues_ends[q_i]) {
continue;
}
_mm_prefetch(set_L.data() + q_i * local_queue_length, _MM_HINT_T0);
}
}
while (queue_heads[num_queues - 1] < L) {
// time_compare_minimum_ -= WallTimer::get_time_mark();
queue_id_min = min_all_queues_at_heads(
set_L,
queue_heads,
local_queues_ends,
local_queue_length,
L);
// time_compare_minimum_ += WallTimer::get_time_mark();
if (queue_id_min != num_queues - 1) { // Not in the global queue
// time_insert_ -= WallTimer::get_time_mark();
insert_one_element_at(
set_L[queue_heads[queue_id_min] + queue_id_min * local_queue_length],
set_L,
queue_heads[num_queues - 1],
global_queue_base,
L);
// time_insert_ += WallTimer::get_time_mark();
if (is_1st_selected) { // Get the highest inserting location
is_1st_selected = false;
nk = queue_heads[num_queues - 1];
}
++queue_heads[queue_id_min];
}
++queue_heads[num_queues - 1];
}
// Reset local_queues_ends
std::fill(local_queues_ends.begin(), local_queues_ends.end() - 1, 0);
return nk;
}
/* Function:
* Find the minimum among queues at their head locations
*/
inline idi Searching::min_all_queues_at_heads(
const std::vector<Candidate> &set_L,
std::vector<idi> &queue_heads,
const std::vector<idi> &local_queues_ends,
const idi local_queue_length,
const idi L)
{
const idi num_queues = num_threads_;
idi min_queue_id = num_queues - 1;
Candidate min_candidate = set_L[queue_heads[min_queue_id] + min_queue_id * local_queue_length];
for (idi q_i = 0; q_i < num_queues - 1; ++q_i) {
if (queue_heads[q_i] >= local_queues_ends[q_i]) { // q_i finished
continue;
}
const Candidate &ele = set_L[queue_heads[q_i] + q_i * local_queue_length];
if (ele < min_candidate) {
min_candidate = ele;
min_queue_id = q_i;
} else if (ele.id_ == min_candidate.id_) { // Redundant element
++queue_heads[q_i];
}
}
return min_queue_id;
}
inline void Searching::search_with_top_m(
const PANNS::idi M,
const PANNS::idi query_id,
const PANNS::idi K,
const PANNS::idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K)
{
boost::dynamic_bitset<> is_visited(num_v_);
{
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = true;
}
}
const dataf *query_data = queries_load_ + query_id * dimension_;
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L.begin(), set_L.begin() + L);
std::vector<idi> top_m_candidates(M);
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
while (k < L) {
++tmp_count;
unsigned nk = L;
// Select M candidates
idi last_k = L;
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
if (set_L[c_i].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[c_i].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
}
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = true;
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
idi r = insert_into_queue(set_L, L, cand);
if (r < nk) {
nk = r;
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
}
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i].id_;
}
}
inline void Searching::search_with_top_m_scale_m(
const PANNS::idi value_M_max,
const PANNS::idi query_id,
const PANNS::idi K,
const PANNS::idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited)
{
// boost::dynamic_bitset<> is_visited(num_v_);
{
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = true;
}
}
const dataf *query_data = queries_load_ + query_id * dimension_;
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L.begin(), set_L.begin() + L);
// std::vector<idi> top_m_candidates(M);
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
idi M = 1;
while (k < L) {
++tmp_count;
unsigned nk = L;
// Select M candidates
idi last_k = L;
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
if (set_L[c_i].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[c_i].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
}
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = true;
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
idi r = insert_into_queue(set_L, L, cand);
if (r < nk) {
nk = r;
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
if (M < value_M_max) {
M <<= 1;
}
}
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i].id_;
}
{// Reset
is_visited.reset();
}
}
////void Searching::search_with_top_m(
//inline void Searching::search_with_top_m_to_get_distance_range(
// const PANNS::idi M,
// const PANNS::idi query_id,
//// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids)
//// std::vector<idi> &set_K)
//{
// dist_max_ = -FLT_MAX;
// dist_min_ = FLT_MAX;
// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = true;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
//// {// For distance range
//// if (dist > dist_max_) {
//// dist_max_ = dist;
//// }
//// if (dist < dist_min_) {
//// dist_min_ = dist;
//// }
//// }
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
//// {// For distance range
//// if (dist > dist_max_) {
//// dist_max_ = dist;
//// }
//// if (dist < dist_min_) {
//// dist_min_ = dist;
//// }
//// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// {// For histogram
// for (idi i_l = 0; i_l < L; ++i_l) {
// distf dist = set_L[i_l].distance_;
// {// For distance range
// if (dist > dist_max_) {
// dist_max_ = dist;
// }
// if (dist < dist_min_) {
// dist_min_ = dist;
// }
// }
// }
// }
// }
//
//// for (idi k_i = 0; k_i < K; ++k_i) {
//// set_K[k_i] = set_L[k_i].id_;
//// }
//}
//
////void Searching::search_with_top_m(
//inline void Searching::search_with_top_m_myths_M(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//{
//// {//test
//// printf("query_id: %u\n", query_id);
//// }
// const idi loc_range = L / 3;
//
//
// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = true;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
//// {// For histogram
//// const distf dist_range = dist_max_ - dist_min_;
//// printf("iter:%u\n", 0);
//// for (idi i_l = 0; i_l < L; ++i_l) {
//// printf("%f\n", (set_L[i_l].distance_ - dist_min_) / dist_range * 100.0);
//// }
//// }
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// std::vector<idi> range_count(3, 0);
// idi zero_inserted_count = 0;
//// {//test
//// printf("tmp_count: %u\n", tmp_count);
//// }
// ++tmp_count;
//
// unsigned nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//// {//test
//// printf("top_m_candidates_ends: %u\n", top_m_candidates_end);
//// }
// {
// if (0 == top_m_candidates_end) {
// break;
// }
// }
//
//
// uint64_t count_neighbors = 0;
// uint64_t count_inserted = 0;
// std::vector<idi> locs_to_count(M);
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
//
// count_neighbors += out_degree;
// idi num_inserted = 0;
//
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// ++num_inserted;
// Candidate cand(nb_id, dist, false);
// idi r = insert_into_queue(set_L, L, cand);
//// {
//// printf("c_i: %u "
//// "count: %u "
//// "loc_inserted: %u\n",
//// c_i,
//// num_inserted,
//// r);
//// }
// if (r < nk) {
// nk = r;
// }
// {
// ++range_count[r / loc_range];
// }
// }
// {
// if (0 == num_inserted) {
// ++zero_inserted_count;
// }
// locs_to_count[c_i] = num_inserted;
// count_inserted += num_inserted;
// }
//// {
//// printf("c_i: %u "
//// "num_inserted: %u\n",
//// c_i,
//// num_inserted);
//// }
// }
//// {
//// for (idi c_i = top_m_candidates_end; c_i < M; ++c_i) {
//// locs_to_count[c_i] = 0;
//// }
//// printf("iter:%u\n", tmp_count);
//// for (idi c_i = 0; c_i < M; ++c_i) {
//// printf("%u %u\n", c_i, locs_to_count[c_i]);
//// }
//// }
//// {//test
//// idi sum = 0;
//// for (const idi ct : range_count) sum += ct;
//// printf("tmp_count: %u "
//// "k: %u "
//// "actual_M: %u %.1f%% "
//// "zero_ins: %u %.1f%% "
//// "1/3: %u %.1f%% "
//// "2/3: %u %.1f%% "
//// "3/3: %u %.1f%%\n",
//// tmp_count,
//// k,
//// top_m_candidates_end, 100.0 * top_m_candidates_end / M,
//// zero_inserted_count, 100.0 * zero_inserted_count / top_m_candidates_end,
//// range_count[0], 100.0 * range_count[0] / sum,
//// range_count[1], 100.0 * range_count[1] / sum,
//// range_count[2], 100.0 * range_count[2] / sum);
//// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// {
// printf("query:%uiter: %u "
// "#neighbors: %lu "
// "#inserted: %lu "
// "ratio: %.2f%%\n",
// query_id, tmp_count,
// count_neighbors,
// count_inserted,
// 100.0 * count_inserted / count_neighbors);
// }
//// {// For histogram
////// const auto it_min = std::min_element(set_L.begin(), set_L.end());
////// const auto it_max = std::max_element(set_L.begin(), set_L.end());
////// const distf dist_min = it_min->distance_;
////// const distf dist_max = it_max->distance_;
////// const distf dist_min = it_min->distance_ - 1.0;
////// const distf dist_max = it_max->distance_ + 1.0;
//// const distf dist_range = dist_max_ - dist_min_;
////// const distf dist_range = dist_max - dist_min;
////// {
////// printf("it_min->distance_: %f dist_min: %f\n",
////// it_min->distance_, dist_min);
////// }
////// const distf dist_range = it_max->distance_ - it_min->distance_;
//// printf("iter:%u\n", tmp_count);
//// for (idi i_l = 0; i_l < L; ++i_l) {
////// printf("%f\n", set_L[i_l].distance_);
////// printf("%f\n", (set_L[i_l].distance_ - dist_min) / dist_range * 100.0);
//// printf("%f\n", (set_L[i_l].distance_ - dist_min_) / dist_range * 100.0);
////// printf("%.2f\n", (set_L[i_l].distance_ - it_min->distance_) / dist_range * 100.0);
//// }
//// }
// }
//
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
// if (query_id == 3) {
// exit(1);
// }
//}
//
//// Sequential Top-M algorithm for profiling purpose: byte array, CAS, and OpenMP
////void Searching::search_with_top_m(
//inline void Searching::search_with_top_m_profile_bit_CAS(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//{
//// std::vector<uint8_t> is_visited(num_v_, 0); // Byte array
//// boost::dynamic_bitset<> is_visited(num_v_); // Bit array
// BitVector is_visited(num_v_);
//
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
//// is_visited[init_ids[c_i]] = true;
// is_visited.atomic_set_bit(init_ids[c_i]);
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
//#pragma omp parallel for
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = true;
//
//// if (!AtomicOps::CAS(is_visited.data() + nb_id,
//// static_cast<uint8_t>(0),
//// static_cast<uint8_t>(1))) {
//// continue;
//// }
// {// Self-defined BitVector
// if (is_visited.atomic_is_bit_set(nb_id)) {
// continue;
// }
// is_visited.atomic_set_bit(nb_id);
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
////
//// {//test
//// for (idi k_i = 0; k_i < K; ++k_i) {
//// printf("%u: %u: %u %f\n",
//// query_id,
//// k_i, set_L[k_i].id_, set_L[k_i].distance_);
//// }
//// exit(1);
//// }
//}
///// Backup
//inline void Searching::search_with_top_m(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//// std::vector< std::vector<idi> > &top_m_list)
//{
// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = true;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
////
//// {//test
//// for (idi k_i = 0; k_i < K; ++k_i) {
//// printf("%u: %u: %u %f\n",
//// query_id,
//// k_i, set_L[k_i].id_, set_L[k_i].distance_);
//// }
//// exit(1);
//// }
//}
//
////// DEPRECATED: the is_visited array cannot be shared among threads.
//inline void Searching::search_with_top_m_no_local_arrays(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// boost::dynamic_bitset<> &is_visited)
//// std::vector< std::vector<idi> > &top_m_list)
//{
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = true;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
////
//// {//test
//// for (idi k_i = 0; k_i < K; ++k_i) {
//// printf("%u: %u: %u %f\n",
//// query_id,
//// k_i, set_L[k_i].id_, set_L[k_i].distance_);
//// }
//// exit(1);
//// }
//}
inline void Searching::search_with_top_m_in_batch(
const PANNS::idi M,
const PANNS::idi batch_start,
const PANNS::idi batch_size,
const PANNS::idi K,
const PANNS::idi L,
std::vector< std::vector<Candidate> > &set_L_list,
const std::vector<idi> &init_ids,
std::vector< std::vector<idi> > &set_K_list)
{
std::vector< boost::dynamic_bitset<> > is_visited_list(batch_size, boost::dynamic_bitset<> (num_v_));
// Prepare the init_ids
{
//#pragma omp parallel for
for (idi q_i = 0; q_i < batch_size; ++q_i) {
auto &is_visited = is_visited_list[q_i];
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = true;
}
}
}
// Initialize set_L_list
{
//#pragma omp parallel for
for (idi q_i = 0; q_i < batch_size; ++q_i) {
const dataf *query_data = queries_load_ + (q_i + batch_start) * dimension_;
for (idi i = 0; i < L; i++) {
idi v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
// ++count_distance_computation_;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L_list[q_i][i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L_list[q_i].begin(), set_L_list[q_i].begin() + L);
}
}
{
std::vector<idi> joint_queue(M * batch_size); // Joint queue for all shared top-M candidates
idi joint_queue_end = 0;
boost::dynamic_bitset<> is_in_joint_queue(num_v_);
// std::vector< std::vector<idi> > cands_query_ids(num_v_, std::vector<idi>(batch_size)); // If candidate cand_id is selected by query q_i, q_i should be in cands_query_ids[cand_id].
// std::vector<idi> cands_query_ids_ends(num_v_, 0);
std::unordered_map< idi, std::vector<idi> > cands_query_ids(batch_size * M);
std::vector<idi> ks(batch_size, 0); // Indices of every queue's first unchecked candidate.
std::vector<idi> nks(batch_size, L); // Indices of highest candidate inserted
std::vector<idi> last_ks(batch_size, L); // Indices of lowest candidate unchecked
std::vector<idi> queries_not_finished(batch_size);
idi queries_not_finished_end = batch_size;
for (idi q_i = 0; q_i < batch_size; ++q_i) {
queries_not_finished[q_i] = q_i;
}
bool is_finished = false;
idi counter_for_debug = 0;
while (!is_finished) {
++counter_for_debug;
// Build the new joint queue
// Traverse every query's queue
for(idi q_i = 0; q_i < queries_not_finished_end; ++q_i) {
idi q_local_id = queries_not_finished[q_i];
// last_ks[q_local_id] = L;
auto &set_L = set_L_list[q_local_id];
idi top_m_count = 0;
for (idi c_i = ks[q_local_id]; c_i < L && top_m_count < M; ++c_i) {
if (set_L[c_i].is_checked_) {
continue;
}
set_L[c_i].is_checked_ = true;
last_ks[q_local_id] = c_i;
++top_m_count;
idi cand_id = set_L[c_i].id_;
// Record which query selected cand_id
auto tmp_c = cands_query_ids.find(cand_id);
if (tmp_c != cands_query_ids.end()) {
tmp_c->second.push_back(q_local_id);
} else {
cands_query_ids.emplace(cand_id, std::vector<idi>());
cands_query_ids[cand_id].reserve(batch_size);
cands_query_ids[cand_id].push_back(q_local_id);
}
// cands_query_ids[cand_id][cands_query_ids_ends[cand_id]++] = q_local_id;
// Add candidate cand_id into the joint queue
if (is_in_joint_queue[cand_id]) {
continue;
}
is_in_joint_queue[cand_id] = true;
joint_queue[joint_queue_end++] = cand_id;
}
}
queries_not_finished_end = 0; // Clear queries_not_finished
// Traverse every shared candidate
for (idi c_i = 0; c_i < joint_queue_end; ++c_i) {
idi cand_id = joint_queue[c_i];
is_in_joint_queue[cand_id] = false; // Reset is_in_joint_queue
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
const auto &query_local_ids = cands_query_ids[cand_id];
// Push neighbors to every queue of the queries that selected cand_id.
// Traverse cand_id's neighbors
// idi &q_i_bound = cands_query_ids_ends[cand_id];
// for (idi q_i = 0; q_i < q_i_bound; ++q_i) {
// idi q_local_id = query_local_ids[q_i];
for (idi q_local_id : query_local_ids) {
dataf *query_data = queries_load_ + (q_local_id + batch_start) * dimension_;
auto &is_visited = is_visited_list[q_local_id];
auto &set_L = set_L_list[q_local_id];
// // Traverse cand_id's neighbors
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = true;
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// ++count_distance_computation_;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
// if (dist >= set_L[L-1].distance_) {
// continue;
// }
Candidate new_cand(nb_id, dist, false);
idi insert_loc = insert_into_queue(set_L, L, new_cand);
if (insert_loc < nks[q_local_id]) {
nks[q_local_id] = insert_loc;
}
}
}
cands_query_ids.erase(cand_id);
// q_i_bound = 0; // Clear cands_query_ids[cand_id]
}
joint_queue_end = 0; // Clear joint_queue
for (idi q_local_id = 0; q_local_id < batch_size; ++q_local_id) {
if (nks[q_local_id] <= last_ks[q_local_id]) {
ks[q_local_id] = nks[q_local_id];
} else {
ks[q_local_id] = last_ks[q_local_id] + 1;
}
nks[q_local_id] = L;
last_ks[q_local_id] = L;
if (ks[q_local_id] < L) {
queries_not_finished[queries_not_finished_end++] = q_local_id;
}
}
if (!queries_not_finished_end) {
is_finished = true;
}
}
}
{
for (idi q_i = 0; q_i < batch_size; ++q_i) {
for (idi c_i = 0; c_i < K && c_i < L; ++c_i) {
set_K_list[q_i + batch_start][c_i] = set_L_list[q_i][c_i].id_;
}
}
}
////
// {//test
// for (idi q_i = 0; q_i < batch_size; ++q_i) {
// printf("query: %u\n", q_i + batch_start);
// for (idi c_i = 0; c_i < K; ++c_i) {
// printf("%u: %u %f\n", c_i, set_L_list[q_i][c_i].id_, set_L_list[q_i][c_i].distance_);
// }
// }
// }
}
//inline void Searching::para_search_with_top_m_critical_area(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//// std::vector< std::vector<idi> > &top_m_list)
//{
// std::vector<uint8_t> is_visited(num_v_, 0);
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
////#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//// int nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT.
////#pragma omp parallel for
////#pragma omp parallel for reduction(min : nk)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = 1;
//
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r;
////#pragma omp critical
// {
// r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
////#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//}
//
//inline void Searching::para_search_with_top_m_critical_area_no_omp(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//// std::vector< std::vector<idi> > &top_m_list)
//{
// std::vector<uint8_t> is_visited(num_v_, 0);
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
////#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//// int nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT.
////#pragma omp parallel for
////#pragma omp parallel for reduction(min : nk)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = 1;
//
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r;
////#pragma omp critical
// {
// r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
////#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//}
//
//inline void Searching::para_search_with_top_m_critical_area_yes_omp(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//// std::vector< std::vector<idi> > &top_m_list)
//{
// std::vector<uint8_t> is_visited(num_v_, 0);
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//// int nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT.
////#pragma omp parallel for
////#pragma omp parallel for reduction(min : nk)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = 1;
//
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r;
////#pragma omp critical
// {
// r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
////#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//}
//
//inline void Searching::para_search_with_top_m_visited_array(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// std::vector<uint8_t> &is_visited)
//// std::vector< std::vector<idi> > &top_m_list)
//{
//// uint64_t count_visited = 0;
//
//// std::vector<uint8_t> is_visited(num_v_, 0);
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
////#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
//// ++count_visited;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//// int nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT.
////#pragma omp parallel for
////#pragma omp parallel for reduction(min : nk)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = 1;
//
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
//// ++count_visited;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++count_distance_computation_;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r;
////#pragma omp critical
// {
// r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
////#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//
//// {
//// printf("query_id: %u "
//// "count_visited: %lu %f%%\n",
//// query_id,
//// count_visited,
//// 100.0 * count_visited / num_v_);
//// }
//}
//
//inline void Searching::para_search_with_top_m_merge_queues(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//{
//// {//test
//// printf("query_id: %u\n", query_id);
//// }
//// const idi local_queue_length = ((M - 1) / num_threads_ + 1) * width_;
// const idi local_queue_length = L;
// std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length));
// std::vector<idi> local_queues_ends(num_threads_, 0);
// std::vector<uint8_t> is_visited(num_v_, 0);
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
//#pragma omp parallel for
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = 1;
//
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand);
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// idi nk = L;
//// // Merge. Parallel merging in every two queues.
//// {
//// for (int tid = 0; tid < num_threads_; ++tid) {
//// if (0 == local_queues_ends[tid]) continue;
//// idi r = merge_two_queues_into_1st_queue_para(
//// set_L,
//// 0,
//// L,
//// local_queues_list[tid],
//// 0,
//// local_queues_ends[tid]);
////// idi r = merge_two_queues_into_1st_queue_seq(
////// set_L,
////// 0,
////// L,
////// local_queues_list[tid],
////// 0,
////// local_queues_ends[tid]);
//// local_queues_ends[tid] = 0; // Reset the local queue
//// if (r < nk) {
//// nk = r;
//// }
//// }
//// }
//// {// text
//// if (query_id == 4 &&
//// tmp_count == 5) {
//// // Print local queues
//// for (int t_i = 0; t_i < num_threads_; ++t_i) {
////// idi start_i = t_i * local_queue_length;
//// for (idi q_i = 0; q_i < local_queues_ends[t_i]; ++q_i) {
//// printf("t[%u][%u]: "
//// "id: %u "
//// "dist: %f\n",
//// t_i, q_i,
//// local_queues_list[t_i][q_i].id_,
//// local_queues_list[t_i][q_i].distance_);
//// }
//// }
//// printf("----------\n");
//// for (idi i = 0; i < L; ++i) {
//// printf("set_L[%u]: "
//// "id: %u "
//// "dist: %f\n",
//// i,
//// set_L[i].id_,
//// set_L[i].distance_);
//// }
//// printf("----------\n");
//// }
//// }
// // Merge. Merge all queues in parallel.
// {
// if (num_threads_ > 1) {
// idi r = merge_all_queues_para_list(
// local_queues_list,
// local_queues_ends,
// set_L,
// L);
// if (r < nk) {
// nk = r;
// }
// } else {
// if (local_queues_ends[0]) {
// idi r = merge_two_queues_into_1st_queue_seq_fixed(
// set_L,
// 0,
// L,
// local_queues_list[0],
// 0,
// local_queues_ends[0]);
// local_queues_ends[0] = 0;
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
//// {//test
//// if (query_id == 4) {
//// for (idi i = 0; i < L; ++i) {
//// printf("tmp_count: %u "
//// "set_L[%u]: "
//// "id: %u "
//// "dist: %f\n",
//// tmp_count,
//// i,
//// set_L[i].id_,
//// set_L[i].distance_);
//// }
//// }
////
//// }
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//// {
//// exit(1);
//// }
//// {//test
////
////// if (query_id == 4) {
//// for (idi i = 0; i < L; ++i) {
//// printf("set_L[%u]: "
//// "id: %u "
//// "dist: %f\n",
//// i,
//// set_L[i].id_,
//// set_L[i].distance_);
//// }
////// exit(1);
////// }
//// }
//}
//
////// Using local queue and then sequential merge.
//inline void Searching::para_search_with_top_m_queues_seq_merge(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//// std::vector< std::vector<idi> > &top_m_list)
//{
//// const idi local_queue_length = ((L - 1) / num_threads_ + 1) * width_;
// const idi local_queue_length = L;
// std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length));
// std::vector<idi> local_queues_ends(num_threads_, 0);
// std::vector<uint8_t> is_visited(num_v_, 0);
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//// for (idi v_i = 0; v_i < L; ++v_i) {
//// idi v_id = init_ids[v_i];
//// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
//// }
// // Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//// {
//// printf("tmp_count: %u "
//// "k: %u\n",
//// tmp_count,
//// k);
//// }
//
//// unsigned nk = L;
//// int nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
//#pragma omp parallel for
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = 1;
//
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
//// idi r;
////#pragma omp critical
//// {
//// r = insert_into_queue(set_L, L, cand);
//// if (r < nk) {
//// nk = r;
//// }
//// }
// // Add to the local queue.
// add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand);
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// idi nk = L;
// // Merge
// {
// for (int tid = 0; tid < num_threads_; ++tid) {
// if (0 == local_queues_ends[tid]) continue;
// idi r = merge_two_queues_into_1st_queue_seq_fixed(
// set_L,
// 0,
// L,
// local_queues_list[tid],
// 0,
// local_queues_ends[tid]);
//// L + 1);
// local_queues_ends[tid] = 0; // Reset the local queue
// if (r < nk) {
// nk = r;
// }
// }
// }
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
////
//// {//test
//// for (idi k_i = 0; k_i < K; ++k_i) {
//// printf("%u: %u: %u %f\n",
//// query_id,
//// k_i, set_L[k_i].id_, set_L[k_i].distance_);
//// }
//// exit(1);
//// }
//}
//
//inline void Searching::para_search_with_top_m_merge_queues_no_CAS(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length,
// std::vector< std::vector<Candidate> > &local_queues_list,
// std::vector<idi> &local_queues_ends,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited)
//{
////// const idi local_queue_length = ((M - 1) / num_threads_ + 1) * width_;
//// const idi local_queue_length = L;
//// std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length));
//// std::vector<idi> local_queues_ends(num_threads_, 0);
////// std::vector<uint8_t> is_visited(num_v_, 0);
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
//#pragma omp parallel for
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
//
//// if (!AtomicOps::CAS(is_visited.data() + nb_id,
//// static_cast<uint8_t>(0),
//// static_cast<uint8_t>(1))) {
//// continue;
//// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand);
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// idi nk = L;
//// // Merge. Parallel merging in every two queues.
//// {
//// for (int tid = 0; tid < num_threads_; ++tid) {
//// if (0 == local_queues_ends[tid]) continue;
//// idi r = merge_two_queues_into_1st_queue_para(
//// set_L,
//// 0,
//// L,
//// local_queues_list[tid],
//// 0,
//// local_queues_ends[tid]);
////// idi r = merge_two_queues_into_1st_queue_seq(
////// set_L,
////// 0,
////// L,
////// local_queues_list[tid],
////// 0,
////// local_queues_ends[tid]);
//// local_queues_ends[tid] = 0; // Reset the local queue
//// if (r < nk) {
//// nk = r;
//// }
//// }
//// }
//// // Merge. Merge all queues in parallel.
//// {
//// if (num_threads_ > 1) {
//// idi r = merge_all_queues_para(
//// local_queues_list,
//// local_queues_ends,
//// set_L,
//// L);
//// if (r < nk) {
//// nk = r;
//// }
//// } else {
//// if (local_queues_ends[0]) {
//// idi r = merge_two_queues_into_1st_queue_seq(
//// set_L,
//// 0,
//// L,
//// local_queues_list[0],
//// 0,
//// local_queues_ends[0]);
//// local_queues_ends[0] = 0;
//// if (r < nk) {
//// nk = r;
//// }
//// }
//// }
//// }
// // Merge
// {
// for (int tid = 0; tid < num_threads_; ++tid) {
// if (0 == local_queues_ends[tid]) continue;
// idi r = merge_two_queues_into_1st_queue_seq_fixed(
// set_L,
// 0,
// L,
// local_queues_list[tid],
// 0,
// local_queues_ends[tid]);
//// L + 1);
// local_queues_ends[tid] = 0; // Reset the local queue
// if (r < nk) {
// nk = r;
// }
// }
// }
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//
// {// Reset
// is_visited.reset();
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
// }
//}
//inline void Searching::para_search_with_top_m_merge_queues_in_array(
//inline void Searching::para_search_with_top_m_merge_queues_new_threshold(
// const idi M,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
//// std::vector< std::vector<Candidate> > &local_queues_list,
// std::vector<Candidate> &local_queues_array,
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// BitVector &is_visited)
//// std::vector<uint8_t> &is_visited)
//// boost::dynamic_bitset<> &is_visited)
//{
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
//// is_visited[init_ids[c_i]] = 1;
// is_visited.atomic_set_bit(init_ids[c_i]);
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// idi min_index = L - 1;
// distf min_1st = set_L[min_index].distance_;
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
//#pragma omp parallel for
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// const idi local_queue_start = tid * local_queue_length;
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
//// { // Sequential edition
//// if (is_visited[nb_id]) {
//// continue;
//// }
//// is_visited[nb_id] = 1;
//// }
//// { // __ATOMIC_SEQ_CST edition
//// if (!AtomicOps::CAS(is_visited.data() + nb_id,
//// static_cast<uint8_t>(0),
//// static_cast<uint8_t>(1))) {
//// continue;
//// }
//// }
//// {// Acquire and Release edition
//// if (__atomic_load_n(is_visited.data() + nb_id, __ATOMIC_ACQUIRE)) {
//// continue;
//// }
//// __atomic_store_n(is_visited.data() + nb_id, 1, __ATOMIC_RELEASE);
//// }
// {// Self-defined BitVector
// if (is_visited.atomic_is_bit_set(nb_id)) {
// continue;
// }
// is_visited.atomic_set_bit(nb_id);
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
//
// if (dist > min_1st) {
// continue;
// } else if (min_index > 0) {
// // Inserted, so min_1st needs update
// if (dist > set_L[min_index - 1].distance_) {
// min_1st = dist;
// if (min_index < L - 1) {
// ++min_index;
// }
// } else {
// min_1st = set_L[--min_index].distance_;
// }
//// min_1st = set_L[--min_index].distance_;
// }
//
//// if (dist > set_L[L-1].distance_) {
//// continue;
//// }
//
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// add_into_queue(local_queues_array, local_queue_start, local_queues_ends[tid], local_queue_length, cand);
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// idi nk = L;
//// // Merge. Parallel merging in every two queues.
//// {
//// for (int tid = 0; tid < num_threads_; ++tid) {
//// if (0 == local_queues_ends[tid]) continue;
//// idi r = merge_two_queues_into_1st_queue_para(
//// set_L,
//// 0,
//// L,
//// local_queues_list[tid],
//// 0,
//// local_queues_ends[tid]);
////// idi r = merge_two_queues_into_1st_queue_seq(
////// set_L,
////// 0,
////// L,
////// local_queues_list[tid],
////// 0,
////// local_queues_ends[tid]);
//// local_queues_ends[tid] = 0; // Reset the local queue
//// if (r < nk) {
//// nk = r;
//// }
//// }
//// }
// // Merge. Merge all queues in parallel.
// {
// if (num_threads_ > 1) {
// idi r = merge_all_queues_para_array(
//// local_queues_list,
// local_queues_array,
// local_queues_ends,
// local_queue_length,
// set_L,
// L);
// if (r < nk) {
// nk = r;
// }
// } else {
// if (local_queues_ends[0]) {
// idi r = merge_two_queues_into_1st_queue_seq_fixed(
// set_L,
// 0,
// L,
//// local_queues_list[0],
// local_queues_array,
// 0,
// local_queues_ends[0]);
// local_queues_ends[0] = 0;
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
//// // Merge Sequentially
//// {
//// for (int tid = 0; tid < num_threads_; ++tid) {
//// if (0 == local_queues_ends[tid]) continue;
//// idi r = merge_two_queues_into_1st_queue_seq_fixed(
//// set_L,
//// 0,
//// L,
////// local_queues_list[tid],
////// 0,
//// local_queues_array,
//// tid * local_queue_length,
//// local_queues_ends[tid]);
////// L + 1);
//// local_queues_ends[tid] = 0; // Reset the local queue
//// if (r < nk) {
//// nk = r;
//// }
//// }
//// }
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//
// {// Reset
//// is_visited.reset();
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.clear_all();
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
// }
//}
/*
* 5/7/2020-15:14
* Use 1 threads to scale M until the value_M_middle.
* Then use multiple threads.
*/
inline void Searching::para_search_with_top_m_merge_queues_middle_m(
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
std::vector<idi> &local_queues_ends, // Sizes of local queue
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited)
{
// const idi base_set_L = (num_threads_ - 1) * local_queue_length;
{
#pragma omp parallel for
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
}
}
const dataf *query_data = queries_load_ + query_id * dimension_;
#pragma omp parallel for
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
uint64_t tmp_count_computation = 0;
// Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++tmp_count_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked.
}
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
// std::sort(set_L.begin(), set_L.begin() + L);
std::sort(
set_L.begin() + base_set_L,
set_L.begin() + base_set_L + L);
local_queues_ends[num_threads_ - 1] = L;
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
idi M = 1;
{ // Single thread
while (k < L && M < value_M_middle) {
++tmp_count;
// {//test
// printf("tmp_count: %d\n", tmp_count);
// }
// Select M candidates
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
idi index_set_L = c_i + base_set_L;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
}
idi nk = L;
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++tmp_count_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L - 1 + base_set_L].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
// Thread 0 maintains the "global" queue
idi r = add_into_queue(
set_L,
base_set_L,
local_queues_ends[num_threads_ - 1],
L,
cand);
if (r < nk) {
nk = r;
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
{// Scale M
if (M < value_M_max) {
M <<= 1;
} else {
M = value_M_max;
}
}
}
}
{ // Multiple Threads
while (k < L) {
++tmp_count;
// {//test
// printf("tmp_count: %d\n", tmp_count);
// }
// Select M candidates
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
idi index_set_L = c_i + base_set_L;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
}
idi nk = L;
// Push M candidates' neighbors into the queue.
//#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads)
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
int tid = omp_get_thread_num();
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++tmp_count_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L - 1 + base_set_L].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
// Add to the local queue.
if (0 != tid) {
// Non-Master threads using local queues
add_into_queue(
set_L,
(tid - 1) * local_queue_length,
local_queues_ends[tid - 1],
local_queue_length,
cand);
} else {
// Thread 0 maintains the "global" queue
idi r = add_into_queue(
set_L,
base_set_L,
local_queues_ends[num_threads_ - 1],
L,
cand);
if (r < nk) {
nk = r;
}
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
// {// Local queues' ends
// printf("query%u:iter: %u", query_id, tmp_count);
// for (int i_t = 0; i_t < num_threads_; ++i_t) {
// printf(" [%u]: %u", i_t, local_queues_ends[i_t]);
// }
// printf("\n");
// }
// // Merge. Merge all queues in parallel.
{
time_merge_ -= WallTimer::get_time_mark();
if (num_threads_ > 1) {
idi r = merge_all_queues_para_array(
set_L,
local_queues_ends,
local_queue_length,
L);
if (r < nk) {
nk = r;
}
}
time_merge_ += WallTimer::get_time_mark();
}
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
{// Scale M
if (M < value_M_max) {
M <<= 1;
} else {
M = value_M_max;
}
}
}
}
#pragma omp parallel for
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i + base_set_L].id_;
// set_K[k_i] = set_L[k_i].id_;
}
{// Reset
// std::fill(is_visited.begin(), is_visited.end(), 0);
is_visited.reset();
// is_visited.clear_all();
std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
}
// {//test
// if (3 == query_id) {
// exit(1);
// }
// }
}
inline void Searching::para_search_with_top_m_merge_queues_middle_m_no_merge(
const uint64_t computation_threshold,
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
const idi init_size,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
std::vector<idi> &local_queues_ends, // Sizes of local queue
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited)
{
uint64_t count_single_query_computation = 0;
uint64_t count_init_computation = 0;
uint64_t count_seq_computation = 0;
uint64_t count_par_computation = 0;
// {//test
// printf("query_id: %u\n", query_id);
// }
// time_initialization_ -= WallTimer::get_time_mark();
// const idi base_set_L = (num_threads_ - 1) * local_queue_length;
{
#pragma omp parallel for
for (idi c_i = 0; c_i < init_size; ++c_i) {
// for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
// is_visited.atomic_set_bit(init_ids[c_i]);
}
}
const dataf *query_data = queries_load_ + query_id * dimension_;
#pragma omp parallel for
for (idi v_i = 0; v_i < init_size; ++v_i) {
// for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
uint64_t tmp_count_computation = 0;
// Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (unsigned i = 0; i < init_size; i++) {
// for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
// ++count_distance_computation_;
++tmp_count_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked.
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
count_distance_computation_ += tmp_count_computation;
count_init_computation += tmp_count_computation;
count_single_query_computation += tmp_count_computation;
tmp_count_computation = 0;
// std::sort(set_L.begin(), set_L.begin() + L);
std::sort(
set_L.begin() + base_set_L,
set_L.begin() + base_set_L + init_size);
// set_L.begin() + base_set_L + L);
local_queues_ends[num_threads_ - 1] = init_size;
// local_queues_ends[num_threads_ - 1] = L;
// time_initialization_ += WallTimer::get_time_mark();
// time_sequential_phase_ -= WallTimer::get_time_mark();
// std::vector<idi> top_m_candidates(M);
idi &global_queue_size = local_queues_ends[num_threads_ - 1];
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
idi M = 1;
{ // Single thread
while (k < L && M < value_M_middle && count_single_query_computation <= computation_threshold) {
++tmp_count;
// {//test
// printf("tmp_count: %d\n", tmp_count);
// }
// int real_threads = std::min(static_cast<int>(M), num_threads_);
// idi queue_base = num_threads_ - real_threads;
// Select M candidates
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k; c_i < global_queue_size && top_m_candidates_end < M; ++c_i) {
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
idi index_set_L = c_i + base_set_L;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
}
idi nk = L;
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// ++count_distance_computation_;
++tmp_count_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[global_queue_size - 1 + base_set_L].distance_) {
// if (dist > set_L[L - 1 + base_set_L].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
// Thread 0 maintains the "global" queue
idi r = add_into_queue(
set_L,
base_set_L,
global_queue_size,
// local_queues_ends[num_threads_ - 1],
L,
cand);
if (r < nk) {
nk = r;
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
count_distance_computation_ += tmp_count_computation;
count_seq_computation += tmp_count_computation;
count_single_query_computation += tmp_count_computation;
tmp_count_computation = 0;
// {// Local queues' ends
// printf("query%u:iter: %u", query_id, tmp_count);
// for (int i_t = 0; i_t < num_threads_; ++i_t) {
// printf(" [%u]: %u", i_t, local_queues_ends[i_t]);
// }
// printf("\n");
// }
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
{// Scale M
if (M < value_M_max) {
M <<= 1;
} else {
M = value_M_max;
}
}
}
}
// time_sequential_phase_ += WallTimer::get_time_mark();
// time_parallel_phase_ -= WallTimer::get_time_mark();
{ // Multiple Threads
while (k < L and count_single_query_computation <= computation_threshold) {
// while (k < L) {
++tmp_count;
// {//test
// printf("tmp_count: %d "
// "k: %u "
// "global_queue_size: %u\n",
// tmp_count,
// k,
// global_queue_size);
// }
// int real_threads = std::min(static_cast<int>(M), num_threads_);
// idi queue_base = num_threads_ - real_threads;
// Select M candidates
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k; c_i < global_queue_size && top_m_candidates_end < M; ++c_i) {
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
idi index_set_L = c_i + base_set_L;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
}
idi nk = L;
// Push M candidates' neighbors into the queue.
//#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads)
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
int tid = omp_get_thread_num();
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// ++count_distance_computation_;
++tmp_count_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[global_queue_size - 1 + base_set_L].distance_) {
// if (dist > set_L[L - 1 + base_set_L].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
// Add to the local queue.
if (0 != tid) {
// Non-Master threads using local queues
add_into_queue(
set_L,
(tid - 1) * local_queue_length,
local_queues_ends[tid - 1],
local_queue_length,
cand);
} else {
// Thread 0 maintains the "global" queue
idi r = add_into_queue(
set_L,
base_set_L,
global_queue_size,
// local_queues_ends[num_threads_ - 1],
L,
cand);
if (r < nk) {
nk = r;
}
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
count_distance_computation_ += tmp_count_computation;
count_par_computation += tmp_count_computation;
count_single_query_computation += tmp_count_computation;
tmp_count_computation = 0;
// {// Local queues' ends
// printf("query%u:iter: %u", query_id, tmp_count);
// for (int i_t = 0; i_t < num_threads_; ++i_t) {
// printf(" [%u]: %u", i_t, local_queues_ends[i_t]);
// }
// printf("\n");
// }
// Merge. Merge all queues in parallel.
{
if (num_threads_ > 1) {
// idi r = merge_all_queues_queue_base(
// set_L,
// local_queues_ends,
// queue_base,
// real_threads,
// local_queue_length,
// L);
idi r = merge_all_queues_para_array(
set_L,
local_queues_ends,
local_queue_length,
L);
if (r < nk) {
nk = r;
}
}
}
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
{// Scale M
if (M < value_M_max) {
M <<= 1;
} else {
M = value_M_max;
}
}
// {// Print relative distance
//// distf top_dist = set_L[base_set_L].distance_;
// for (idi i_l = 0; i_l < L; ++i_l) {
// printf("%u %f\n",
// tmp_count, set_L[i_l + base_set_L].distance_);
//// tmp_count, set_L[i_l + base_set_L].distance_ - top_dist);
// }
// }
}
}
// time_parallel_phase_ += WallTimer::get_time_mark();
#pragma omp parallel for
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i + base_set_L].id_;
// set_K[k_i] = set_L[k_i].id_;
}
{// Reset
// std::fill(is_visited.begin(), is_visited.end(), 0);
is_visited.reset();
// is_visited.clear_all();
std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
}
// {//test
// if (3 == query_id) {
// exit(1);
// }
// }
// {//test
// printf("count_single: %lu "
// "ct_init: %lu "
// "ct_seq: %lu "
// "ct_par: %lu\n",
// count_single_query_computation,
// count_init_computation,
// count_seq_computation,
// count_par_computation);
// }
}
/*
* 6/15/2020-14:40
* Queues merging together to the global queue
*/
inline void Searching::para_search_with_top_m_merge_queues_sequential_merge(
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
std::vector<idi> &local_queues_ends, // Sizes of local queue
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited)
{
// const idi base_set_L = (num_threads_ - 1) * local_queue_length;
{
#pragma omp parallel for
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
// is_visited.atomic_set_bit(init_ids[c_i]);
}
}
const dataf *query_data = queries_load_ + query_id * dimension_;
#pragma omp parallel for
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
uint64_t tmp_count_computation = 0;
// Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
// ++count_distance_computation_;
++tmp_count_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked.
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
// std::sort(set_L.begin(), set_L.begin() + L);
std::sort(
set_L.begin() + base_set_L,
set_L.begin() + base_set_L + L);
local_queues_ends[num_threads_ - 1] = L;
// std::vector<idi> top_m_candidates(M);
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
idi M = 1;
{ // Single thread
while (k < L && M < value_M_middle) {
++tmp_count;
// {//test
// printf("tmp_count: %d\n", tmp_count);
// }
// Select M candidates
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
idi index_set_L = c_i + base_set_L;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
}
idi nk = L;
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// ++count_distance_computation_;
++tmp_count_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L - 1 + base_set_L].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
// Thread 0 maintains the "global" queue
idi r = add_into_queue(
set_L,
base_set_L,
local_queues_ends[num_threads_ - 1],
L,
cand);
if (r < nk) {
nk = r;
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
{// Scale M
if (M < value_M_max) {
M <<= 1;
} else {
M = value_M_max;
}
}
}
}
{ // Multiple Threads
while (k < L) {
++tmp_count;
// {//test
// if (num_threads_ == 2) {
// printf("tmp_count: %d "
// "k: %u\n",
// tmp_count,
// k);
// }
// }
// Select M candidates
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
idi index_set_L = c_i + base_set_L;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
}
idi nk = L;
// Push M candidates' neighbors into the queue.
//#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads)
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
int tid = omp_get_thread_num();
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// ++count_distance_computation_;
++tmp_count_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L - 1 + base_set_L].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
// Add to the local queue.
if (0 != tid) {
// Non-Master threads using local queues
add_into_queue(
set_L,
(tid - 1) * local_queue_length,
local_queues_ends[tid - 1],
local_queue_length,
cand);
} else {
// Thread 0 maintains the "global" queue
idi r = add_into_queue(
set_L,
base_set_L,
local_queues_ends[num_threads_ - 1],
L,
cand);
if (r < nk) {
nk = r;
}
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
// // Merge. Merge all queues in parallel.
{
// {//test
// for (idi q_i = 0; q_i < num_threads_; ++q_i) {
// if (0 == local_queues_ends[q_i]) {
// continue;
// }
// for (idi e_i = 0; e_i < local_queues_ends[q_i]; ++e_i) {
// printf("tmp_count: %u "
// "q_i: %u "
// "[%u]: (%u, %f)\n",
// tmp_count,
// q_i,
// e_i, set_L[q_i * local_queue_length + e_i].id_, set_L[q_i * local_queue_length + e_i].distance_);
// }
// }
// }
// time_merge_ -= WallTimer::get_time_mark();
if (num_threads_ > 1) {
idi r = merge_all_queues_all_together_in_sequential(
set_L,
local_queues_ends,
local_queue_length,
L);
// idi r = merge_all_queues_para_array(
// set_L,
// local_queues_ends,
// local_queue_length,
// L);
if (r < nk) {
nk = r;
}
// {//test
// printf("tmp_count: %u "
// "r: %u "
// "last_k: %u\n",
// tmp_count,
// r,
// last_k);
// for (idi l_i = 0; l_i < L; ++l_i) {
// printf("tmp_count: %u "
// "[%u]: (%u, %f)\n",
// tmp_count,
// l_i, set_L[l_i + base_set_L].id_, set_L[l_i + base_set_L].distance_);
// }
// }
}
// time_merge_ += WallTimer::get_time_mark();
}
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
{// Scale M
if (M < value_M_max) {
M <<= 1;
} else {
M = value_M_max;
}
}
}
}
#pragma omp parallel for
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i + base_set_L].id_;
// set_K[k_i] = set_L[k_i].id_;
}
{// Reset
// std::fill(is_visited.begin(), is_visited.end(), 0);
is_visited.reset();
// is_visited.clear_all();
std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
}
// {//test
// if (0 == query_id) {
// exit(1);
// }
// }
}
/*
* 6/19/2020:
* Intra-query + Inter-query
*/
inline void Searching::para_search_with_top_m_nested_para(
const idi batch_start,
const idi batch_size,
const idi value_M_middle,
const idi value_M_max,
const idi K,
const idi L,
std::vector< std::vector<Candidate> > &set_L_list,
const std::vector<idi> &init_ids,
std::vector< std::vector<idi> > &set_K_list,
const idi local_queue_length, // Maximum size of local queue
const idi base_set_L, // base_set_L = (num_threads_intra_ - 1) * local_queue_length;
std::vector< std::vector<idi> > &local_queues_ends_list, // Sizes of local queue
std::vector< std::vector<idi> > &top_m_candidates_list,
std::vector< boost::dynamic_bitset<> > &is_visited_list)
{
{// Initialize is_visited flag array
#pragma omp parallel for num_threads(num_threads_inter_)
for (idi q_i = 0; q_i < batch_size; ++q_i) {
auto &is_visited = is_visited_list[q_i];
#pragma omp parallel for num_threads(num_threads_intra_)
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
}
}
}
#pragma omp parallel for
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
uint64_t tmp_count_total_computation = 0;
#pragma omp parallel for num_threads(num_threads_inter_) reduction(+ : tmp_count_total_computation)
for (idi q_i = 0; q_i < batch_size; ++q_i) {
idi query_id = batch_start + q_i;
auto &set_L = set_L_list[q_i];
auto &local_queues_ends = local_queues_ends_list[q_i];
auto &is_visited = is_visited_list[q_i];
const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
uint64_t tmp_count_computation = 0;
// Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(num_threads_intra_)
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
// ++count_distance_computation_;
++tmp_count_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked.
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
// count_distance_computation_ += tmp_count_computation;
tmp_count_total_computation += tmp_count_computation;
tmp_count_computation = 0;
// std::sort(set_L.begin(), set_L.begin() + L);
std::sort(
set_L.begin() + base_set_L,
set_L.begin() + base_set_L + L);
local_queues_ends[num_threads_intra_ - 1] = L;
// std::vector<idi> top_m_candidates(M);
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
idi M = 1;
auto &top_m_candidates = top_m_candidates_list[q_i];
{ // Single thread
while (k < L && M < value_M_middle) {
++tmp_count;
// {//test
// printf("tmp_count: %d\n", tmp_count);
// }
// Select M candidates
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
idi index_set_L = c_i + base_set_L;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
}
idi nk = L;
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// ++count_distance_computation_;
++tmp_count_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// {//test
// if (391655 == nb_id) {
// printf("tmp_count: %u "
// "nb_id: %u "
// "distf: %f\n",
// tmp_count,
// nb_id,
// dist);
// }
// }
if (dist > set_L[L - 1 + base_set_L].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
// Thread 0 maintains the "global" queue
idi r = add_into_queue(
set_L,
base_set_L,
local_queues_ends[num_threads_intra_ - 1],
L,
cand);
if (r < nk) {
nk = r;
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
tmp_count_total_computation += tmp_count_computation;
tmp_count_computation = 0;
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
{// Scale M
if (M < value_M_max) {
M <<= 1;
} else {
M = value_M_max;
}
}
}
}
{ // Multiple Threads
while (k < L) {
++tmp_count;
// {//test
// printf("tmp_count: %d\n", tmp_count);
// }
// Select M candidates
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
idi index_set_L = c_i + base_set_L;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
}
idi nk = L;
// Push M candidates' neighbors into the queue.
#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(num_threads_intra_)
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
int tid = omp_get_thread_num();
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// ++count_distance_computation_;
++tmp_count_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// {//test
// if (391655 == nb_id) {
// printf("tmp_count: %u "
// "nb_id: %u "
// "distf: %f\n",
// tmp_count,
// nb_id,
// dist);
// }
// }
if (dist > set_L[L - 1 + base_set_L].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
// Add to the local queue.
if (0 != tid) {
// Non-Master threads using local queues
add_into_queue(
set_L,
(tid - 1) * local_queue_length,
local_queues_ends[tid - 1],
local_queue_length,
cand);
} else {
// Thread 0 maintains the "global" queue
idi r = add_into_queue(
set_L,
base_set_L,
local_queues_ends[num_threads_intra_ - 1],
L,
cand);
if (r < nk) {
nk = r;
}
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
tmp_count_total_computation += tmp_count_computation;
tmp_count_computation = 0;
// // Merge. Merge all queues in parallel.
{
// time_merge_ -= WallTimer::get_time_mark();
if (num_threads_intra_ > 1) {
idi r = merge_all_queues_para_array(
set_L,
local_queues_ends,
local_queue_length,
L);
if (r < nk) {
nk = r;
}
}
// time_merge_ += WallTimer::get_time_mark();
}
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
{// Scale M
if (M < value_M_max) {
M <<= 1;
} else {
M = value_M_max;
}
}
}
}
count_distance_computation_ += tmp_count_total_computation;
tmp_count_total_computation = 0;
auto &set_K = set_K_list[query_id];
#pragma omp parallel for num_threads(num_threads_intra_)
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i + base_set_L].id_;
// set_K[k_i] = set_L[k_i].id_;
}
{// Reset
// std::fill(is_visited.begin(), is_visited.end(), 0);
is_visited.reset();
// is_visited.clear_all();
std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
}
}
// {//test
// if (3 == query_id) {
// exit(1);
// }
// }
// {
// for (idi k_i = 0; k_i < K; ++k_i) {
// printf("%u: (%u %f)\n",
// k_i, set_L_list[0][k_i].id_, set_L_list[0][k_i].distance_);
// }
// if (0 == batch_start) {
// exit(1);
// }
// }
}
///*
// * 6/22/2020-09:38
// * A synchronized last element as the sentinel
// */
//inline void Searching::para_search_with_top_m_merge_queues_global_threshold(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
// std::vector<idi> &top_m_candidates,
// boost::dynamic_bitset<> &is_visited)
//{
//// const idi base_set_L = (num_threads_ - 1) * local_queue_length;
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// uint64_t tmp_count_computation = 0;
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked.
// }
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//// std::sort(set_L.begin(), set_L.begin() + L);
// std::sort(
// set_L.begin() + base_set_L,
// set_L.begin() + base_set_L + L);
// local_queues_ends[num_threads_ - 1] = L;
//
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// idi M = 1;
//
// { // Single thread
// while (k < L && M < value_M_middle) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
//
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
//
// }
// }
//
// { // Multiple Threads
// while (k < L) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
////#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads)
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// if (0 != tid) {
// // Non-Master threads using local queues
// add_into_queue(
// set_L,
// (tid - 1) * local_queue_length,
// local_queues_ends[tid - 1],
// local_queue_length,
// cand);
// } else {
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
// {// Local queues' ends
//// printf("query%u:iter: %u", query_id, tmp_count);
// idi total_elements = 0;
// for (int i_t = 0; i_t < num_threads_ - 1; ++i_t) {
// total_elements += local_queues_ends[i_t];
// }
// number_local_elements_ += total_elements;
//// printf(" total_elements: %u+%u\n", total_elements - local_queues_ends[num_threads_ - 1], local_queues_ends[num_threads_ - 1]);
//// for (int i_t = 0; i_t < num_threads_; ++i_t) {
//// printf(" [%u]: %u", i_t, local_queues_ends[i_t]);
//// }
//// printf("\n");
// }
//
//// // Merge. Merge all queues in parallel.
// {
// time_merge_ -= WallTimer::get_time_mark();
// if (num_threads_ > 1) {
// idi r = merge_all_queues_para_array(
// set_L,
// local_queues_ends,
// local_queue_length,
// L);
// if (r < nk) {
// nk = r;
// }
// }
// time_merge_ += WallTimer::get_time_mark();
// }
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
//
// }
// }
//
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i + base_set_L].id_;
//// set_K[k_i] = set_L[k_i].id_;
// }
//
// {// Reset
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.reset();
//// is_visited.clear_all();
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
// }
//
//// {//test
//// if (0 == query_id) {
//// exit(1);
//// }
//// }
//}
///*
// * 6/7/2020-16:55
// * Use 1 threads to scale M until the value_M_middle.
// * Then use multiple threads.
// * Except for Thread 0, other threads are collectors. They collect, but do not merge.
// * Only merge once after Thread 0 stops.
// */
//inline void Searching::para_search_with_top_m_merge_queues_collectors(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited)
//// std::vector<distf> &local_thresholds)
//// BitVector &is_visited)
//{
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
//// is_visited.atomic_set_bit(init_ids[c_i]);
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// uint64_t tmp_count_computation = 0;
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked.
//// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//// std::sort(set_L.begin(), set_L.begin() + L);
// std::sort(
// set_L.begin() + base_set_L,
// set_L.begin() + base_set_L + L);
//// boost::sort::block_indirect_sort(
//// set_L.begin() + base_set_L,
//// set_L.begin() + base_set_L + L,
//// num_threads_);
// local_queues_ends[num_threads_ - 1] = L;
//
//// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// idi M = 1;
//
// // Single thread
// {
// while (k < L && M < value_M_middle) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//
//// int real_threads = std::min(static_cast<int>(M), num_threads_);
//// idi queue_base = num_threads_ - real_threads;
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
//
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
// }
//
// // Multiple Threads
// {
//// while (k < L/num_threads_/2) {
// while (k < L) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//// int real_threads = std::min(static_cast<int>(M), num_threads_);
//// idi queue_base = num_threads_ - real_threads;
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
// idi chunk_size;
// if (num_threads_ <= top_m_candidates_end) {
// chunk_size = (top_m_candidates_end - 1) / num_threads_ + 1;
// } else {
// chunk_size = 1;
// }
// idi nk = L;
// // Push M candidates' neighbors into the queue.
////#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads)
////#pragma omp parallel for reduction(+ : tmp_count_computation)
//#pragma omp parallel for reduction(+ : tmp_count_computation) schedule(static, chunk_size)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
//// {
//// if (c_i < chunk_size && tid != 0) {
//// printf("query_id: %u "
//// "tmp_count: %u "
//// "chunk_size: %u "
//// "c_i: %u "
//// "tid: %u\n",
//// query_id,
//// tmp_count,
//// chunk_size,
//// c_i,
//// tid);
//// }
//// }
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// if (0 != tid) {
// // Non-Master threads using local queues
// add_into_queue(
// set_L,
// (tid - 1) * local_queue_length,
// local_queues_ends[tid - 1],
// local_queue_length,
// cand);
// } else {
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
////// // Merge. Merge all queues in parallel.
//// {
//// time_merge_ -= WallTimer::get_time_mark();
//// if (num_threads_ > 1) {
////// idi r = merge_all_queues_queue_base(
////// set_L,
////// local_queues_ends,
////// queue_base,
////// real_threads,
////// local_queue_length,
////// L);
//// idi r = merge_all_queues_para_array(
//// set_L,
//// local_queues_ends,
//// local_queue_length,
//// L);
//// if (r < nk) {
//// nk = r;
//// }
//// }
//// time_merge_ += WallTimer::get_time_mark();
//// }
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
//
//// // Merge only once after Master Thread stops.
//// {
//// time_merge_ -= WallTimer::get_time_mark();
//// if (num_threads_ > 1) {
////// idi r = merge_all_queues_queue_base(
////// set_L,
////// local_queues_ends,
////// queue_base,
////// real_threads,
////// local_queue_length,
////// L);
//// merge_all_queues_para_array(
//// set_L,
//// local_queues_ends,
//// local_queue_length,
//// L);
//// }
//// time_merge_ += WallTimer::get_time_mark();
//// }
// }
//
//
//#pragma omp parallel for
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i + base_set_L].id_;
//// set_K[k_i] = set_L[k_i].id_;
// }
//
// {// Reset
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.reset();
//// is_visited.clear_all();
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
// }
//
//// {//test
//// printf("tmp_count: %u\n", tmp_count);
//// if (3 == query_id) {
//// exit(1);
//// }
//// }
//}
///*
// * 6/8/2020-16:39
// * Selecting rather than merging
// */
//inline void Searching::para_search_with_top_m_merge_queues_selecting(
// const idi value_M_middle,
// const idi value_M_max,
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// const idi local_queue_length, // Maximum size of local queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
// std::vector<idi> &local_queues_ends, // Sizes of local queue
//// std::vector<Candidate> &top_m_candidates,
// std::vector<idi> &top_m_candidates,
//// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited)
//{
// {
//#pragma omp parallel for
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
//// is_visited.atomic_set_bit(init_ids[c_i]);
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// uint64_t tmp_count_computation = 0;
// // Get the distances of all candidates, store in the set set_L.
////#pragma omp parallel for
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked.
//// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//// std::sort(set_L.begin(), set_L.begin() + L);
// std::sort(
// set_L.begin() + base_set_L,
// set_L.begin() + base_set_L + L);
//// boost::sort::block_indirect_sort(
//// set_L.begin() + base_set_L,
//// set_L.begin() + base_set_L + L,
//// num_threads_);
// local_queues_ends[num_threads_ - 1] = L;
//
//// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// idi M = 1;
//
// // Single thread
// {
// while (k < L && M < value_M_middle) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//
//// int real_threads = std::min(static_cast<int>(M), num_threads_);
//// idi queue_base = num_threads_ - real_threads;
// // Select M candidates
// idi last_k = L;
//// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// idi index_set_L = c_i + base_set_L;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
//
// idi nk = L;
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Thread 0 maintains the "global" queue
// idi r = add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
//
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
// }
//
// // Multiple Threads
// {
//// while (k < L/num_threads_/2) {
//// while (k < L) {
// while (true) {
// ++tmp_count;
//// {//test
//// printf("tmp_count: %d\n", tmp_count);
//// }
//// // Select M candidates
//// idi last_k = L;
////// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
//// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
//// idi index_set_L = c_i + base_set_L;
//// if (set_L[index_set_L].is_checked_) {
//// continue;
//// }
//// last_k = c_i; // Record the location of the last candidate selected.
//// set_L[index_set_L].is_checked_ = true;
//// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
//// }
//
// // Select M candidates
// {
// idi traverse_count = 0;
// idi bound_sub = L; // This is not always true!
// for (idi sub = 0; sub < bound_sub && top_m_candidates_end < M && traverse_count < L; ++sub) {
// for (int tid = 0; tid < num_threads_ && top_m_candidates_end < M && traverse_count < L; ++tid) {
// if (sub >= local_queues_ends[tid]) {
// continue;
// }
// idi index_set_L = tid * local_queue_length + sub;
// if (set_L[index_set_L].is_checked_) {
// continue;
// }
// set_L[index_set_L].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
// }
// }
//
// if (0 == top_m_candidates_end) {
// break;
// }
// }
//
//// idi nk = L;
// // Push M candidates' neighbors into the queue.
////#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads)
//#pragma omp parallel for reduction(+ : tmp_count_computation)
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// int tid = omp_get_thread_num();
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
//
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation_;
// ++tmp_count_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L - 1 + base_set_L].distance_) {
// continue;
// }
//
// Candidate cand(nb_id, dist, false);
// // Add to the local queue.
// if (0 != tid) {
// // Non-Master threads using local queues
// add_into_queue(
// set_L,
// (tid - 1) * local_queue_length,
// local_queues_ends[tid - 1],
// local_queue_length,
// cand);
// } else {
// // Thread 0 maintains the "global" queue
//// idi r =
// add_into_queue(
// set_L,
// base_set_L,
// local_queues_ends[num_threads_ - 1],
// L,
// cand);
//// if (r < nk) {
//// nk = r;
//// }
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
// count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
//
//// // Merge. Merge all queues in parallel.
// {
// time_merge_ -= WallTimer::get_time_mark();
// if (num_threads_ > 1) {
//// idi r = merge_all_queues_queue_base(
//// set_L,
//// local_queues_ends,
//// queue_base,
//// real_threads,
//// local_queue_length,
//// L);
//// idi r =
// merge_all_queues_para_array(
// set_L,
// local_queues_ends,
// local_queue_length,
// L);
//// if (r < nk) {
//// nk = r;
//// }
// }
// time_merge_ += WallTimer::get_time_mark();
// }
//// if (nk <= last_k) {
//// k = nk;
//// } else {
//// k = last_k + 1;
//// }
// {// Scale M
// if (M < value_M_max) {
// M <<= 1;
// } else {
// M = value_M_max;
// }
// }
// }
// }
//
//
////#pragma omp parallel for
//// for (idi k_i = 0; k_i < K; ++k_i) {
//// set_K[k_i] = set_L[k_i + base_set_L].id_;
////// set_K[k_i] = set_L[k_i].id_;
//// }
//
// {
// idi k_i = 0;
// idi bound_sub = K / num_threads_;
// for (idi sub = 0; sub < bound_sub; ++sub) {
// for (int tid = 0; tid < num_threads_; ++tid) {
// idi index_set_L = tid * local_queue_length + sub;
// set_K[k_i++] = set_L[index_set_L].id_;
// }
// }
// idi remain = K - k_i;
// if (remain) {
// for (int tid = 0; tid < remain; ++tid) {
// idi index_set_L = tid * local_queue_length + bound_sub;
// set_K[k_i++] = set_L[index_set_L].id_;
// }
// }
// }
//
// {// Reset
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.reset();
//// is_visited.clear_all();
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
// }
//
//// {//test
//// printf("tmp_count: %u\n", tmp_count);
//// if (3 == query_id) {
//// exit(1);
//// }
//// }
//}
} // namespace PANNS
#endif //BATCH_SEARCHING_SEARCHING_H
|
deprecate.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD EEEEE PPPP RRRR EEEEE CCCC AAA TTTTT EEEEE %
% D D E P P R R E C A A T E %
% D D EEE PPPPP RRRR EEE C AAAAA T EEE %
% D D E P R R E C A A T E %
% DDDD EEEEE P R R EEEEE CCCC A A T EEEEE %
% %
% %
% MagickWand Deprecated Methods %
% %
% Software Design %
% John Cristy %
% October 2002 %
% %
% %
% Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "wand/studio.h"
#include "wand/MagickWand.h"
#include "wand/magick-wand-private.h"
#include "wand/wand.h"
#include "magick/monitor-private.h"
#include "magick/thread-private.h"
/*
Define declarations.
*/
#define PixelViewId "PixelView"
#define ThrowWandException(severity,tag,context) \
{ \
(void) ThrowMagickException(wand->exception,GetMagickModule(),severity, \
tag,"`%s'",context); \
return(MagickFalse); \
}
/*
Typedef declarations.
*/
struct _PixelView
{
size_t
id;
char
name[MaxTextExtent];
ExceptionInfo
*exception;
MagickWand
*wand;
CacheView
*view;
RectangleInfo
region;
size_t
number_threads;
PixelWand
***pixel_wands;
MagickBooleanType
debug;
size_t
signature;
};
#if !defined(MAGICKCORE_EXCLUDE_DEPRECATED)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k A v e r a g e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickAverageImages() average a set of images.
%
% The format of the MagickAverageImages method is:
%
% MagickWand *MagickAverageImages(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
static MagickWand *CloneMagickWandFromImages(const MagickWand *wand,
Image *images)
{
MagickWand
*clone_wand;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
clone_wand=(MagickWand *) AcquireMagickMemory(sizeof(*clone_wand));
if (clone_wand == (MagickWand *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
images->filename);
(void) ResetMagickMemory(clone_wand,0,sizeof(*clone_wand));
clone_wand->id=AcquireWandId();
(void) FormatLocaleString(clone_wand->name,MaxTextExtent,"%s-%.20g",
MagickWandId,(double) clone_wand->id);
clone_wand->exception=AcquireExceptionInfo();
InheritException(clone_wand->exception,wand->exception);
clone_wand->image_info=CloneImageInfo(wand->image_info);
clone_wand->quantize_info=CloneQuantizeInfo(wand->quantize_info);
clone_wand->images=images;
clone_wand->debug=IsEventLogging();
if (clone_wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_wand->name);
clone_wand->signature=WandSignature;
return(clone_wand);
}
WandExport MagickWand *MagickAverageImages(MagickWand *wand)
{
Image
*average_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
return((MagickWand *) NULL);
average_image=EvaluateImages(wand->images,MeanEvaluateOperator,
wand->exception);
if (average_image == (Image *) NULL)
return((MagickWand *) NULL);
return(CloneMagickWandFromImages(wand,average_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e P i x e l V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelView() makes a copy of the specified pixel view.
%
% The format of the ClonePixelView method is:
%
% PixelView *ClonePixelView(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport PixelView *ClonePixelView(const PixelView *pixel_view)
{
PixelView
*clone_view;
register ssize_t
i;
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
if (pixel_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",pixel_view->name);
clone_view=(PixelView *) AcquireMagickMemory(sizeof(*clone_view));
if (clone_view == (PixelView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
pixel_view->name);
(void) ResetMagickMemory(clone_view,0,sizeof(*clone_view));
clone_view->id=AcquireWandId();
(void) FormatLocaleString(clone_view->name,MaxTextExtent,"%s-%.20g",
PixelViewId,(double) clone_view->id);
clone_view->exception=AcquireExceptionInfo();
InheritException(clone_view->exception,pixel_view->exception);
clone_view->view=CloneCacheView(pixel_view->view);
clone_view->region=pixel_view->region;
clone_view->number_threads=pixel_view->number_threads;
for (i=0; i < (ssize_t) pixel_view->number_threads; i++)
clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **)
pixel_view->pixel_wands[i],pixel_view->region.width);
clone_view->debug=pixel_view->debug;
if (clone_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name);
clone_view->signature=WandSignature;
return(clone_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y P i x e l V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelView() deallocates memory associated with a pixel view.
%
% The format of the DestroyPixelView method is:
%
% PixelView *DestroyPixelView(PixelView *pixel_view,
% const size_t number_wands,const size_t number_threads)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
% o number_wand: the number of pixel wands.
%
% o number_threads: number of threads.
%
*/
static PixelWand ***DestroyPixelsThreadSet(PixelWand ***pixel_wands,
const size_t number_wands,const size_t number_threads)
{
register ssize_t
i;
assert(pixel_wands != (PixelWand ***) NULL);
for (i=0; i < (ssize_t) number_threads; i++)
if (pixel_wands[i] != (PixelWand **) NULL)
pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands);
pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands);
return(pixel_wands);
}
WandExport PixelView *DestroyPixelView(PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
pixel_view->pixel_wands=DestroyPixelsThreadSet(pixel_view->pixel_wands,
pixel_view->region.width,pixel_view->number_threads);
pixel_view->view=DestroyCacheView(pixel_view->view);
pixel_view->exception=DestroyExceptionInfo(pixel_view->exception);
pixel_view->signature=(~WandSignature);
RelinquishWandId(pixel_view->id);
pixel_view=(PixelView *) RelinquishMagickMemory(pixel_view);
return(pixel_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D u p l e x T r a n s f e r P i x e l V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DuplexTransferPixelViewIterator() iterates over three pixel views in
% parallel and calls your transfer method for each scanline of the view. The
% source and duplex pixel region is not confined to the image canvas-- that is
% you can include negative offsets or widths or heights that exceed the image
% dimension. However, the destination pixel view is confined to the image
% canvas-- that is no negative offsets or widths or heights that exceed the
% image dimension are permitted.
%
% Use this pragma:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the DuplexTransferPixelViewIterator method is:
%
% MagickBooleanType DuplexTransferPixelViewIterator(PixelView *source,
% PixelView *duplex,PixelView *destination,
% DuplexTransferPixelViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source pixel view.
%
% o duplex: the duplex pixel view.
%
% o destination: the destination pixel view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType DuplexTransferPixelViewIterator(
PixelView *source,PixelView *duplex,PixelView *destination,
DuplexTransferPixelViewMethod transfer,void *context)
{
#define DuplexTransferPixelViewTag "PixelView/DuplexTransfer"
ExceptionInfo
*exception;
Image
*destination_image,
*duplex_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (PixelView *) NULL);
assert(source->signature == WandSignature);
if (transfer == (DuplexTransferPixelViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
duplex_image=duplex->wand->images;
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status)
#endif
for (y=source->region.y; y < (ssize_t) source->region.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const IndexPacket
*restrict duplex_indexes,
*restrict indexes;
register const PixelPacket
*restrict duplex_pixels,
*restrict pixels;
register IndexPacket
*restrict destination_indexes;
register ssize_t
x;
register PixelPacket
*restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y,
source->region.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetIndex(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->region.x,y,
duplex->region.width,1,duplex->exception);
if (duplex_pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
duplex_indexes=GetCacheViewVirtualIndexQueue(duplex->view);
for (x=0; x < (ssize_t) duplex->region.width; x++)
PixelSetQuantumColor(duplex->pixel_wands[id][x],duplex_pixels+x);
if (duplex_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) duplex->region.width; x++)
PixelSetBlackQuantum(duplex->pixel_wands[id][x],
GetPixelIndex(duplex_indexes+x));
if (duplex_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) duplex->region.width; x++)
PixelSetIndex(duplex->pixel_wands[id][x],
GetPixelIndex(duplex_indexes+x));
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->region.x,y,destination->region.width,1,exception);
if (destination_pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view);
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetBlackQuantum(destination->pixel_wands[id][x],
GetPixelIndex(destination_indexes+x));
if (destination_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetIndex(destination->pixel_wands[id][x],
GetPixelIndex(destination_indexes+x));
if (transfer(source,duplex,destination,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->region.width; x++)
SetPixelIndex(destination_indexes+x,PixelGetBlackQuantum(
destination->pixel_wands[id][x]));
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_DuplexTransferPixelViewIterator)
#endif
proceed=SetImageProgress(source_image,DuplexTransferPixelViewTag,
progress++,source->region.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status == MagickFalse ? 0 : 1);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewException() returns the severity, reason, and description of any
% error that occurs when utilizing a pixel view.
%
% The format of the GetPixelViewException method is:
%
% char *GetPixelViewException(const PixelWand *pixel_view,
% ExceptionType *severity)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel pixel_view.
%
% o severity: the severity of the error is returned here.
%
*/
WandExport char *GetPixelViewException(const PixelView *pixel_view,
ExceptionType *severity)
{
char
*description;
assert(pixel_view != (const PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
if (pixel_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",pixel_view->name);
assert(severity != (ExceptionType *) NULL);
*severity=pixel_view->exception->severity;
description=(char *) AcquireQuantumMemory(2UL*MaxTextExtent,
sizeof(*description));
if (description == (char *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
pixel_view->name);
*description='\0';
if (pixel_view->exception->reason != (char *) NULL)
(void) CopyMagickString(description,GetLocaleExceptionMessage(
pixel_view->exception->severity,pixel_view->exception->reason),
MaxTextExtent);
if (pixel_view->exception->description != (char *) NULL)
{
(void) ConcatenateMagickString(description," (",MaxTextExtent);
(void) ConcatenateMagickString(description,GetLocaleExceptionMessage(
pixel_view->exception->severity,pixel_view->exception->description),
MaxTextExtent);
(void) ConcatenateMagickString(description,")",MaxTextExtent);
}
return(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w H e i g h t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewHeight() returns the pixel view height.
%
% The format of the GetPixelViewHeight method is:
%
% size_t GetPixelViewHeight(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport size_t GetPixelViewHeight(const PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->region.height);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewIterator() iterates over the pixel view in parallel and calls
% your get method for each scanline of the view. The pixel region is
% not confined to the image canvas-- that is you can include negative offsets
% or widths or heights that exceed the image dimension. Any updates to
% the pixels in your callback are ignored.
%
% Use this pragma:
%
% #pragma omp critical
%
% to define a section of code in your callback get method that must be
% executed by a single thread at a time.
%
% The format of the GetPixelViewIterator method is:
%
% MagickBooleanType GetPixelViewIterator(PixelView *source,
% GetPixelViewMethod get,void *context)
%
% A description of each parameter follows:
%
% o source: the source pixel view.
%
% o get: the get callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType GetPixelViewIterator(PixelView *source,
GetPixelViewMethod get,void *context)
{
#define GetPixelViewTag "PixelView/Get"
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (PixelView *) NULL);
assert(source->signature == WandSignature);
if (get == (GetPixelViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status)
#endif
for (y=source->region.y; y < (ssize_t) source->region.height; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*indexes;
register const PixelPacket
*pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y,
source->region.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetIndex(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (get(source,context) == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_GetPixelViewIterator)
#endif
proceed=SetImageProgress(source_image,GetPixelViewTag,progress++,
source->region.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status == MagickFalse ? 0 : 1);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewPixels() returns the pixel view pixel_wands.
%
% The format of the GetPixelViewPixels method is:
%
% PixelWand *GetPixelViewPixels(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport PixelWand **GetPixelViewPixels(const PixelView *pixel_view)
{
const int
id = GetOpenMPThreadId();
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->pixel_wands[id]);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w W a n d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewWand() returns the magick wand associated with the pixel view.
%
% The format of the GetPixelViewWand method is:
%
% MagickWand *GetPixelViewWand(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport MagickWand *GetPixelViewWand(const PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->wand);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w W i d t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewWidth() returns the pixel view width.
%
% The format of the GetPixelViewWidth method is:
%
% size_t GetPixelViewWidth(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport size_t GetPixelViewWidth(const PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->region.width);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w X %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewX() returns the pixel view x offset.
%
% The format of the GetPixelViewX method is:
%
% ssize_t GetPixelViewX(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport ssize_t GetPixelViewX(const PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->region.x);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w Y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewY() returns the pixel view y offset.
%
% The format of the GetPixelViewY method is:
%
% ssize_t GetPixelViewY(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport ssize_t GetPixelViewY(const PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->region.y);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P i x e l V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPixelView() returns MagickTrue if the the parameter is verified as a pixel
% view container.
%
% The format of the IsPixelView method is:
%
% MagickBooleanType IsPixelView(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport MagickBooleanType IsPixelView(const PixelView *pixel_view)
{
size_t
length;
if (pixel_view == (const PixelView *) NULL)
return(MagickFalse);
if (pixel_view->signature != WandSignature)
return(MagickFalse);
length=strlen(PixelViewId);
if (LocaleNCompare(pixel_view->name,PixelViewId,length) != 0)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k C l i p P a t h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickClipPathImage() clips along the named paths from the 8BIM profile, if
% present. Later operations take effect inside the path. Id may be a number
% if preceded with #, to work on a numbered path, e.g., "#1" to use the first
% path.
%
% The format of the MagickClipPathImage method is:
%
% MagickBooleanType MagickClipPathImage(MagickWand *wand,
% const char *pathname,const MagickBooleanType inside)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o pathname: name of clipping path resource. If name is preceded by #, use
% clipping path numbered by name.
%
% o inside: if non-zero, later operations take effect inside clipping path.
% Otherwise later operations take effect outside clipping path.
%
*/
WandExport MagickBooleanType MagickClipPathImage(MagickWand *wand,
const char *pathname,const MagickBooleanType inside)
{
return(MagickClipImagePath(wand,pathname,inside));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G e t F i l l A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGetFillAlpha() returns the alpha used when drawing using the fill
% color or fill texture. Fully opaque is 1.0.
%
% The format of the DrawGetFillAlpha method is:
%
% double DrawGetFillAlpha(const DrawingWand *wand)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
*/
WandExport double DrawGetFillAlpha(const DrawingWand *wand)
{
return(DrawGetFillOpacity(wand));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G e t S t r o k e A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGetStrokeAlpha() returns the alpha of stroked object outlines.
%
% The format of the DrawGetStrokeAlpha method is:
%
% double DrawGetStrokeAlpha(const DrawingWand *wand)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
*/
WandExport double DrawGetStrokeAlpha(const DrawingWand *wand)
{
return(DrawGetStrokeOpacity(wand));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P e e k G r a p h i c W a n d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPeekGraphicWand() returns the current drawing wand.
%
% The format of the PeekDrawingWand method is:
%
% DrawInfo *DrawPeekGraphicWand(const DrawingWand *wand)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
*/
WandExport DrawInfo *DrawPeekGraphicWand(const DrawingWand *wand)
{
return(PeekDrawingWand(wand));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P o p G r a p h i c C o n t e x t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPopGraphicContext() destroys the current drawing wand and returns to the
% previously pushed drawing wand. Multiple drawing wands may exist. It is an
% error to attempt to pop more drawing wands than have been pushed, and it is
% proper form to pop all drawing wands which have been pushed.
%
% The format of the DrawPopGraphicContext method is:
%
% MagickBooleanType DrawPopGraphicContext(DrawingWand *wand)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
*/
WandExport void DrawPopGraphicContext(DrawingWand *wand)
{
(void) PopDrawingWand(wand);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P u s h G r a p h i c C o n t e x t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPushGraphicContext() clones the current drawing wand to create a new
% drawing wand. The original drawing wand(s) may be returned to by
% invoking PopDrawingWand(). The drawing wands are stored on a drawing wand
% stack. For every Pop there must have already been an equivalent Push.
%
% The format of the DrawPushGraphicContext method is:
%
% MagickBooleanType DrawPushGraphicContext(DrawingWand *wand)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
*/
WandExport void DrawPushGraphicContext(DrawingWand *wand)
{
(void) PushDrawingWand(wand);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w S e t F i l l A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawSetFillAlpha() sets the alpha to use when drawing using the fill
% color or fill texture. Fully opaque is 1.0.
%
% The format of the DrawSetFillAlpha method is:
%
% void DrawSetFillAlpha(DrawingWand *wand,const double fill_alpha)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
% o fill_alpha: fill alpha
%
*/
WandExport void DrawSetFillAlpha(DrawingWand *wand,const double fill_alpha)
{
DrawSetFillOpacity(wand,fill_alpha);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w S e t S t r o k e A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawSetStrokeAlpha() specifies the alpha of stroked object outlines.
%
% The format of the DrawSetStrokeAlpha method is:
%
% void DrawSetStrokeAlpha(DrawingWand *wand,const double stroke_alpha)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
% o stroke_alpha: stroke alpha. The value 1.0 is opaque.
%
*/
WandExport void DrawSetStrokeAlpha(DrawingWand *wand,const double stroke_alpha)
{
DrawSetStrokeOpacity(wand,stroke_alpha);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k C o l o r F l o o d f i l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickColorFloodfillImage() changes the color value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod is
% specified, the color value is changed for any neighbor pixel that does not
% match the bordercolor member of image.
%
% The format of the MagickColorFloodfillImage method is:
%
% MagickBooleanType MagickColorFloodfillImage(MagickWand *wand,
% const PixelWand *fill,const double fuzz,const PixelWand *bordercolor,
% const ssize_t x,const ssize_t y)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o fill: the floodfill color pixel wand.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
% o bordercolor: the border color pixel wand.
%
% o x,y: the starting location of the operation.
%
*/
WandExport MagickBooleanType MagickColorFloodfillImage(MagickWand *wand,
const PixelWand *fill,const double fuzz,const PixelWand *bordercolor,
const ssize_t x,const ssize_t y)
{
DrawInfo
*draw_info;
MagickBooleanType
status;
PixelPacket
target;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
draw_info=CloneDrawInfo(wand->image_info,(DrawInfo *) NULL);
PixelGetQuantumColor(fill,&draw_info->fill);
(void) GetOneVirtualPixel(wand->images,x % wand->images->columns,
y % wand->images->rows,&target,wand->exception);
if (bordercolor != (PixelWand *) NULL)
PixelGetQuantumColor(bordercolor,&target);
wand->images->fuzz=fuzz;
status=ColorFloodfillImage(wand->images,draw_info,target,x,y,
bordercolor != (PixelWand *) NULL ? FillToBorderMethod : FloodfillMethod);
if (status == MagickFalse)
InheritException(wand->exception,&wand->images->exception);
draw_info=DestroyDrawInfo(draw_info);
return(status == MagickFalse ? 0 : 1);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k D e s c r i b e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickDescribeImage() identifies an image by printing its attributes to the
% file. Attributes include the image width, height, size, and others.
%
% The format of the MagickDescribeImage method is:
%
% const char *MagickDescribeImage(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport char *MagickDescribeImage(MagickWand *wand)
{
return(MagickIdentifyImage(wand));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k F l a t t e n I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickFlattenImages() merges a sequence of images. This useful for
% combining Photoshop layers into a single image.
%
% The format of the MagickFlattenImages method is:
%
% MagickWand *MagickFlattenImages(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport MagickWand *MagickFlattenImages(MagickWand *wand)
{
Image
*flatten_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
return((MagickWand *) NULL);
flatten_image=FlattenImages(wand->images,wand->exception);
if (flatten_image == (Image *) NULL)
return((MagickWand *) NULL);
return(CloneMagickWandFromImages(wand,flatten_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k G e t I m a g e A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageAttribute() returns a value associated with the specified
% property. Use MagickRelinquishMemory() to free the value when you are
% finished with it.
%
% The format of the MagickGetImageAttribute method is:
%
% char *MagickGetImageAttribute(MagickWand *wand,const char *property)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o property: the property.
%
*/
WandExport char *MagickGetImageAttribute(MagickWand *wand,const char *property)
{
return(MagickGetImageProperty(wand,property));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a g i c k G e t I m a g e I n d e x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageIndex() returns the index of the current image.
%
% The format of the MagickGetImageIndex method is:
%
% ssize_t MagickGetImageIndex(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport ssize_t MagickGetImageIndex(MagickWand *wand)
{
return(MagickGetIteratorIndex(wand));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a g i c k G e t I m a g e C h a n n e l E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageChannelExtrema() gets the extrema for one or more image
% channels.
%
% The format of the MagickGetImageChannelExtrema method is:
%
% MagickBooleanType MagickGetImageChannelExtrema(MagickWand *wand,
% const ChannelType channel,size_t *minima,size_t *maxima)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o channel: the image channel(s).
%
% o minima: The minimum pixel value for the specified channel(s).
%
% o maxima: The maximum pixel value for the specified channel(s).
%
*/
WandExport MagickBooleanType MagickGetImageChannelExtrema(MagickWand *wand,
const ChannelType channel,size_t *minima,size_t *maxima)
{
MagickBooleanType
status;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
status=GetImageChannelExtrema(wand->images,channel,minima,maxima,
wand->exception);
return(status == MagickFalse ? 0 : 1);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a g i c k G e t I m a g e E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageExtrema() gets the extrema for the image.
%
% The format of the MagickGetImageExtrema method is:
%
% MagickBooleanType MagickGetImageExtrema(MagickWand *wand,
% size_t *minima,size_t *maxima)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o minima: The minimum pixel value for the specified channel(s).
%
% o maxima: The maximum pixel value for the specified channel(s).
%
*/
WandExport MagickBooleanType MagickGetImageExtrema(MagickWand *wand,
size_t *minima,size_t *maxima)
{
MagickBooleanType
status;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
status=GetImageExtrema(wand->images,minima,maxima,wand->exception);
return(status == MagickFalse ? 0 : 1);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k G e t I m a g e M a t t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageMatte() returns MagickTrue if the image has a matte channel
% otherwise MagickFalse.
%
% The format of the MagickGetImageMatte method is:
%
% size_t MagickGetImageMatte(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport MagickBooleanType MagickGetImageMatte(MagickWand *wand)
{
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
return(wand->images->matte);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k G e t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImagePixels() extracts pixel data from an image and returns it to
% you. The method returns MagickTrue on success otherwise MagickFalse if an
% error is encountered. The data is returned as char, short int, int, ssize_t,
% float, or double in the order specified by map.
%
% Suppose you want to extract the first scanline of a 640x480 image as
% character data in red-green-blue order:
%
% MagickGetImagePixels(wand,0,0,640,1,"RGB",CharPixel,pixels);
%
% The format of the MagickGetImagePixels method is:
%
% MagickBooleanType MagickGetImagePixels(MagickWand *wand,
% const ssize_t x,const ssize_t y,const size_t columns,
% const size_t rows,const char *map,const StorageType storage,
% void *pixels)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x, y, columns, rows: These values define the perimeter
% of a region of pixels you want to extract.
%
% o map: This string reflects the expected ordering of the pixel array.
% It can be any combination or order of R = red, G = green, B = blue,
% A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan,
% Y = yellow, M = magenta, K = black, I = intensity (for grayscale),
% P = pad.
%
% o storage: Define the data type of the pixels. Float and double types are
% expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose from
% these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel,
% LongPixel, QuantumPixel, or ShortPixel.
%
% o pixels: This array of values contain the pixel components as defined by
% map and type. You must preallocate this array where the expected
% length varies depending on the values of width, height, map, and type.
%
*/
WandExport MagickBooleanType MagickGetImagePixels(MagickWand *wand,
const ssize_t x,const ssize_t y,const size_t columns,
const size_t rows,const char *map,const StorageType storage,
void *pixels)
{
return(MagickExportImagePixels(wand,x,y,columns,rows,map,storage,pixels));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k G e t I m a g e S i z e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageSize() returns the image length in bytes.
%
% The format of the MagickGetImageSize method is:
%
% MagickBooleanType MagickGetImageSize(MagickWand *wand,
% MagickSizeType *length)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o length: the image length in bytes.
%
*/
WandExport MagickSizeType MagickGetImageSize(MagickWand *wand)
{
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
return(GetBlobSize(wand->images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMapImage() replaces the colors of an image with the closest color
% from a reference image.
%
% The format of the MagickMapImage method is:
%
% MagickBooleanType MagickMapImage(MagickWand *wand,
% const MagickWand *map_wand,const MagickBooleanType dither)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o map: the map wand.
%
% o dither: Set this integer value to something other than zero to dither
% the mapped image.
%
*/
WandExport MagickBooleanType MagickMapImage(MagickWand *wand,
const MagickWand *map_wand,const MagickBooleanType dither)
{
MagickBooleanType
status;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if ((wand->images == (Image *) NULL) || (map_wand->images == (Image *) NULL))
ThrowWandException(WandError,"ContainsNoImages",wand->name);
status=MapImage(wand->images,map_wand->images,dither);
if (status == MagickFalse)
InheritException(wand->exception,&wand->images->exception);
return(status == MagickFalse ? 0 : 1);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M a t t e F l o o d f i l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMatteFloodfillImage() changes the transparency value of any pixel that
% matches target and is an immediate neighbor. If the method
% FillToBorderMethod is specified, the transparency value is changed for any
% neighbor pixel that does not match the bordercolor member of image.
%
% The format of the MagickMatteFloodfillImage method is:
%
% MagickBooleanType MagickMatteFloodfillImage(MagickWand *wand,
% const double alpha,const double fuzz,const PixelWand *bordercolor,
% const ssize_t x,const ssize_t y)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully
% transparent.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
% o bordercolor: the border color pixel wand.
%
% o x,y: the starting location of the operation.
%
*/
WandExport MagickBooleanType MagickMatteFloodfillImage(MagickWand *wand,
const double alpha,const double fuzz,const PixelWand *bordercolor,
const ssize_t x,const ssize_t y)
{
DrawInfo
*draw_info;
MagickBooleanType
status;
PixelPacket
target;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
draw_info=CloneDrawInfo(wand->image_info,(DrawInfo *) NULL);
(void) GetOneVirtualPixel(wand->images,x % wand->images->columns,
y % wand->images->rows,&target,wand->exception);
if (bordercolor != (PixelWand *) NULL)
PixelGetQuantumColor(bordercolor,&target);
wand->images->fuzz=fuzz;
status=MatteFloodfillImage(wand->images,target,ClampToQuantum(
(MagickRealType) QuantumRange-QuantumRange*alpha),x,y,bordercolor !=
(PixelWand *) NULL ? FillToBorderMethod : FloodfillMethod);
if (status == MagickFalse)
InheritException(wand->exception,&wand->images->exception);
draw_info=DestroyDrawInfo(draw_info);
return(status == MagickFalse ? 0 : 1);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M e d i a n F i l t e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMedianFilterImage() applies a digital filter that improves the quality
% of a noisy image. Each pixel is replaced by the median in a set of
% neighboring pixels as defined by radius.
%
% The format of the MagickMedianFilterImage method is:
%
% MagickBooleanType MagickMedianFilterImage(MagickWand *wand,
% const double radius)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o radius: the radius of the pixel neighborhood.
%
*/
WandExport MagickBooleanType MagickMedianFilterImage(MagickWand *wand,
const double radius)
{
Image
*median_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
median_image=MedianFilterImage(wand->images,radius,wand->exception);
if (median_image == (Image *) NULL)
return(MagickFalse);
ReplaceImageInList(&wand->images,median_image);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M i n i m u m I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMinimumImages() returns the minimum intensity of an image sequence.
%
% The format of the MagickMinimumImages method is:
%
% MagickWand *MagickMinimumImages(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport MagickWand *MagickMinimumImages(MagickWand *wand)
{
Image
*minimum_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
return((MagickWand *) NULL);
minimum_image=EvaluateImages(wand->images,MinEvaluateOperator,
wand->exception);
if (minimum_image == (Image *) NULL)
return((MagickWand *) NULL);
return(CloneMagickWandFromImages(wand,minimum_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickModeImage() makes each pixel the 'predominant color' of the
% neighborhood of the specified radius.
%
% The format of the MagickModeImage method is:
%
% MagickBooleanType MagickModeImage(MagickWand *wand,
% const double radius)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o radius: the radius of the pixel neighborhood.
%
*/
WandExport MagickBooleanType MagickModeImage(MagickWand *wand,
const double radius)
{
Image
*mode_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
mode_image=ModeImage(wand->images,radius,wand->exception);
if (mode_image == (Image *) NULL)
return(MagickFalse);
ReplaceImageInList(&wand->images,mode_image);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M o s a i c I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMosaicImages() inlays an image sequence to form a single coherent
% picture. It returns a wand with each image in the sequence composited at
% the location defined by the page offset of the image.
%
% The format of the MagickMosaicImages method is:
%
% MagickWand *MagickMosaicImages(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport MagickWand *MagickMosaicImages(MagickWand *wand)
{
Image
*mosaic_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
return((MagickWand *) NULL);
mosaic_image=MosaicImages(wand->images,wand->exception);
if (mosaic_image == (Image *) NULL)
return((MagickWand *) NULL);
return(CloneMagickWandFromImages(wand,mosaic_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k O p a q u e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickOpaqueImage() changes any pixel that matches color with the color
% defined by fill.
%
% The format of the MagickOpaqueImage method is:
%
% MagickBooleanType MagickOpaqueImage(MagickWand *wand,
% const PixelWand *target,const PixelWand *fill,const double fuzz)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o channel: the channel(s).
%
% o target: Change this target color to the fill color within the image.
%
% o fill: the fill pixel wand.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
*/
WandExport MagickBooleanType MagickOpaqueImage(MagickWand *wand,
const PixelWand *target,const PixelWand *fill,const double fuzz)
{
return(MagickPaintOpaqueImage(wand,target,fill,fuzz));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k P a i n t F l o o d f i l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickPaintFloodfillImage() changes the color value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod is
% specified, the color value is changed for any neighbor pixel that does not
% match the bordercolor member of image.
%
% The format of the MagickPaintFloodfillImage method is:
%
% MagickBooleanType MagickPaintFloodfillImage(MagickWand *wand,
% const ChannelType channel,const PixelWand *fill,const double fuzz,
% const PixelWand *bordercolor,const ssize_t x,const ssize_t y)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o channel: the channel(s).
%
% o fill: the floodfill color pixel wand.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
% o bordercolor: the border color pixel wand.
%
% o x,y: the starting location of the operation.
%
*/
WandExport MagickBooleanType MagickPaintFloodfillImage(MagickWand *wand,
const ChannelType channel,const PixelWand *fill,const double fuzz,
const PixelWand *bordercolor,const ssize_t x,const ssize_t y)
{
MagickBooleanType
status;
status=MagickFloodfillPaintImage(wand,channel,fill,fuzz,bordercolor,x,y,
MagickFalse);
return(status == MagickFalse ? 0 : 1);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k P a i n t O p a q u e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickPaintOpaqueImage() changes any pixel that matches color with the color
% defined by fill.
%
% The format of the MagickPaintOpaqueImage method is:
%
% MagickBooleanType MagickPaintOpaqueImage(MagickWand *wand,
% const PixelWand *target,const PixelWand *fill,const double fuzz)
% MagickBooleanType MagickPaintOpaqueImageChannel(MagickWand *wand,
% const ChannelType channel,const PixelWand *target,
% const PixelWand *fill,const double fuzz)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o channel: the channel(s).
%
% o target: Change this target color to the fill color within the image.
%
% o fill: the fill pixel wand.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
*/
WandExport MagickBooleanType MagickPaintOpaqueImage(MagickWand *wand,
const PixelWand *target,const PixelWand *fill,const double fuzz)
{
return(MagickPaintOpaqueImageChannel(wand,DefaultChannels,target,fill,fuzz));
}
WandExport MagickBooleanType MagickPaintOpaqueImageChannel(MagickWand *wand,
const ChannelType channel,const PixelWand *target,const PixelWand *fill,
const double fuzz)
{
MagickBooleanType
status;
status=MagickOpaquePaintImageChannel(wand,channel,target,fill,fuzz,
MagickFalse);
return(status == MagickFalse ? 0 : 1);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k P a i n t T r a n s p a r e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickPaintTransparentImage() changes any pixel that matches color with the
% color defined by fill.
%
% The format of the MagickPaintTransparentImage method is:
%
% MagickBooleanType MagickPaintTransparentImage(MagickWand *wand,
% const PixelWand *target,const double alpha,const double fuzz)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o target: Change this target color to specified opacity value within
% the image.
%
% o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully
% transparent.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
*/
WandExport MagickBooleanType MagickPaintTransparentImage(MagickWand *wand,
const PixelWand *target,const double alpha,const double fuzz)
{
return(MagickTransparentPaintImage(wand,target,alpha,fuzz,MagickFalse));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k R e c o l o r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickRecolorImage() apply color transformation to an image. The method
% permits saturation changes, hue rotation, luminance to alpha, and various
% other effects. Although variable-sized transformation matrices can be used,
% typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA
% (or RGBA with offsets). The matrix is similar to those used by Adobe Flash
% except offsets are in column 6 rather than 5 (in support of CMYKA images)
% and offsets are normalized (divide Flash offset by 255).
%
% The format of the MagickRecolorImage method is:
%
% MagickBooleanType MagickRecolorImage(MagickWand *wand,
% const size_t order,const double *color_matrix)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o order: the number of columns and rows in the color matrix.
%
% o color_matrix: An array of doubles representing the color matrix.
%
*/
WandExport MagickBooleanType MagickRecolorImage(MagickWand *wand,
const size_t order,const double *color_matrix)
{
Image
*transform_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (color_matrix == (const double *) NULL)
return(MagickFalse);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
transform_image=RecolorImage(wand->images,order,color_matrix,
wand->exception);
if (transform_image == (Image *) NULL)
return(MagickFalse);
ReplaceImageInList(&wand->images,transform_image);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k R e d u c e N o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickReduceNoiseImage() smooths the contours of an image while still
% preserving edge information. The algorithm works by replacing each pixel
% with its neighbor closest in value. A neighbor is defined by radius. Use
% a radius of 0 and ReduceNoise() selects a suitable radius for you.
%
% The format of the MagickReduceNoiseImage method is:
%
% MagickBooleanType MagickReduceNoiseImage(MagickWand *wand,
% const double radius)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o radius: the radius of the pixel neighborhood.
%
*/
WandExport MagickBooleanType MagickReduceNoiseImage(MagickWand *wand,
const double radius)
{
Image
*noise_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
noise_image=ReduceNoiseImage(wand->images,radius,wand->exception);
if (noise_image == (Image *) NULL)
return(MagickFalse);
ReplaceImageInList(&wand->images,noise_image);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M a x i m u m I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMaximumImages() returns the maximum intensity of an image sequence.
%
% The format of the MagickMaximumImages method is:
%
% MagickWand *MagickMaximumImages(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport MagickWand *MagickMaximumImages(MagickWand *wand)
{
Image
*maximum_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
return((MagickWand *) NULL);
maximum_image=EvaluateImages(wand->images,MaxEvaluateOperator,
wand->exception);
if (maximum_image == (Image *) NULL)
return((MagickWand *) NULL);
return(CloneMagickWandFromImages(wand,maximum_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k S e t I m a g e A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSetImageAttribute() associates a property with an image.
%
% The format of the MagickSetImageAttribute method is:
%
% MagickBooleanType MagickSetImageAttribute(MagickWand *wand,
% const char *property,const char *value)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o property: the property.
%
% o value: the value.
%
*/
WandExport MagickBooleanType MagickSetImageAttribute(MagickWand *wand,
const char *property,const char *value)
{
return(SetImageProperty(wand->images,property,value));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k S e t I m a g e I n d e x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSetImageIndex() set the current image to the position of the list
% specified with the index parameter.
%
% The format of the MagickSetImageIndex method is:
%
% MagickBooleanType MagickSetImageIndex(MagickWand *wand,
% const ssize_t index)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o index: the scene number.
%
*/
WandExport MagickBooleanType MagickSetImageIndex(MagickWand *wand,
const ssize_t index)
{
return(MagickSetIteratorIndex(wand,index));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a g i c k S e t I m a g e O p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSetImageOption() associates one or options with a particular image
% format (.e.g MagickSetImageOption(wand,"jpeg","perserve","yes").
%
% The format of the MagickSetImageOption method is:
%
% MagickBooleanType MagickSetImageOption(MagickWand *wand,
% const char *format,const char *key,const char *value)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o format: the image format.
%
% o key: The key.
%
% o value: The value.
%
*/
WandExport MagickBooleanType MagickSetImageOption(MagickWand *wand,
const char *format,const char *key,const char *value)
{
char
option[MaxTextExtent];
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
(void) FormatLocaleString(option,MaxTextExtent,"%s:%s=%s",format,key,value);
return(DefineImageOption(wand->image_info,option));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k T r a n s p a r e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickTransparentImage() changes any pixel that matches color with the
% color defined by fill.
%
% The format of the MagickTransparentImage method is:
%
% MagickBooleanType MagickTransparentImage(MagickWand *wand,
% const PixelWand *target,const double alpha,const double fuzz)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o target: Change this target color to specified opacity value within
% the image.
%
% o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully
% transparent.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
*/
WandExport MagickBooleanType MagickTransparentImage(MagickWand *wand,
const PixelWand *target,const double alpha,const double fuzz)
{
return(MagickPaintTransparentImage(wand,target,alpha,fuzz));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k R e g i o n O f I n t e r e s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickRegionOfInterestImage() extracts a region of the image and returns it
% as a new wand.
%
% The format of the MagickRegionOfInterestImage method is:
%
% MagickWand *MagickRegionOfInterestImage(MagickWand *wand,
% const size_t width,const size_t height,const ssize_t x,
% const ssize_t y)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o width: the region width.
%
% o height: the region height.
%
% o x: the region x offset.
%
% o y: the region y offset.
%
*/
WandExport MagickWand *MagickRegionOfInterestImage(MagickWand *wand,
const size_t width,const size_t height,const ssize_t x,
const ssize_t y)
{
return(MagickGetImageRegion(wand,width,height,x,y));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k S e t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSetImagePixels() accepts pixel datand stores it in the image at the
% location you specify. The method returns MagickFalse on success otherwise
% MagickTrue if an error is encountered. The pixel data can be either char,
% short int, int, ssize_t, float, or double in the order specified by map.
%
% Suppose your want to upload the first scanline of a 640x480 image from
% character data in red-green-blue order:
%
% MagickSetImagePixels(wand,0,0,640,1,"RGB",CharPixel,pixels);
%
% The format of the MagickSetImagePixels method is:
%
% MagickBooleanType MagickSetImagePixels(MagickWand *wand,
% const ssize_t x,const ssize_t y,const size_t columns,
% const size_t rows,const char *map,const StorageType storage,
% const void *pixels)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x, y, columns, rows: These values define the perimeter of a region
% of pixels you want to define.
%
% o map: This string reflects the expected ordering of the pixel array.
% It can be any combination or order of R = red, G = green, B = blue,
% A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan,
% Y = yellow, M = magenta, K = black, I = intensity (for grayscale),
% P = pad.
%
% o storage: Define the data type of the pixels. Float and double types are
% expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose from
% these types: CharPixel, ShortPixel, IntegerPixel, LongPixel, FloatPixel,
% or DoublePixel.
%
% o pixels: This array of values contain the pixel components as defined by
% map and type. You must preallocate this array where the expected
% length varies depending on the values of width, height, map, and type.
%
*/
WandExport MagickBooleanType MagickSetImagePixels(MagickWand *wand,
const ssize_t x,const ssize_t y,const size_t columns,
const size_t rows,const char *map,const StorageType storage,
const void *pixels)
{
return(MagickImportImagePixels(wand,x,y,columns,rows,map,storage,pixels));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k W r i t e I m a g e B l o b %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickWriteImageBlob() implements direct to memory image formats. It
% returns the image as a blob and its length. Use MagickSetFormat() to
% set the format of the returned blob (GIF, JPEG, PNG, etc.).
%
% Use MagickRelinquishMemory() to free the blob when you are done with it.
%
% The format of the MagickWriteImageBlob method is:
%
% unsigned char *MagickWriteImageBlob(MagickWand *wand,size_t *length)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o length: the length of the blob.
%
*/
WandExport unsigned char *MagickWriteImageBlob(MagickWand *wand,size_t *length)
{
return(MagickGetImageBlob(wand,length));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w P i x e l V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewPixelView() returns a pixel view required for all other methods in the
% Pixel View API.
%
% The format of the NewPixelView method is:
%
% PixelView *NewPixelView(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the wand.
%
*/
static PixelWand ***AcquirePixelsThreadSet(const size_t number_wands,
const size_t number_threads)
{
PixelWand
***pixel_wands;
register ssize_t
i;
pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads,
sizeof(*pixel_wands));
if (pixel_wands == (PixelWand ***) NULL)
return((PixelWand ***) NULL);
(void) ResetMagickMemory(pixel_wands,0,number_threads*sizeof(*pixel_wands));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_wands[i]=NewPixelWands(number_wands);
if (pixel_wands[i] == (PixelWand **) NULL)
return(DestroyPixelsThreadSet(pixel_wands,number_wands,number_threads));
}
return(pixel_wands);
}
WandExport PixelView *NewPixelView(MagickWand *wand)
{
PixelView
*pixel_view;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == MagickSignature);
pixel_view=(PixelView *) AcquireMagickMemory(sizeof(*pixel_view));
if (pixel_view == (PixelView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
(void) ResetMagickMemory(pixel_view,0,sizeof(*pixel_view));
pixel_view->id=AcquireWandId();
(void) FormatLocaleString(pixel_view->name,MaxTextExtent,"%s-%.20g",
PixelViewId,(double) pixel_view->id);
pixel_view->exception=AcquireExceptionInfo();
pixel_view->wand=wand;
pixel_view->view=AcquireVirtualCacheView(pixel_view->wand->images,
pixel_view->exception);
pixel_view->region.width=wand->images->columns;
pixel_view->region.height=wand->images->rows;
pixel_view->number_threads=GetOpenMPMaximumThreads();
pixel_view->pixel_wands=AcquirePixelsThreadSet(pixel_view->region.width,
pixel_view->number_threads);
if (pixel_view->pixel_wands == (PixelWand ***) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
pixel_view->debug=IsEventLogging();
pixel_view->signature=WandSignature;
return(pixel_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w P i x e l V i e w R e g i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewPixelViewRegion() returns a pixel view required for all other methods
% in the Pixel View API.
%
% The format of the NewPixelViewRegion method is:
%
% PixelView *NewPixelViewRegion(MagickWand *wand,const ssize_t x,
% const ssize_t y,const size_t width,const size_t height)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixel_wands view.
%
*/
WandExport PixelView *NewPixelViewRegion(MagickWand *wand,const ssize_t x,
const ssize_t y,const size_t width,const size_t height)
{
PixelView
*pixel_view;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == MagickSignature);
pixel_view=(PixelView *) AcquireMagickMemory(sizeof(*pixel_view));
if (pixel_view == (PixelView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
(void) ResetMagickMemory(pixel_view,0,sizeof(*pixel_view));
pixel_view->id=AcquireWandId();
(void) FormatLocaleString(pixel_view->name,MaxTextExtent,"%s-%.20g",
PixelViewId,(double) pixel_view->id);
pixel_view->exception=AcquireExceptionInfo();
pixel_view->view=AcquireVirtualCacheView(pixel_view->wand->images,
pixel_view->exception);
pixel_view->wand=wand;
pixel_view->region.width=width;
pixel_view->region.height=height;
pixel_view->region.x=x;
pixel_view->region.y=y;
pixel_view->number_threads=GetOpenMPMaximumThreads();
pixel_view->pixel_wands=AcquirePixelsThreadSet(pixel_view->region.width,
pixel_view->number_threads);
if (pixel_view->pixel_wands == (PixelWand ***) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
pixel_view->debug=IsEventLogging();
pixel_view->signature=WandSignature;
return(pixel_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P i x e l G e t N e x t R o w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PixelGetNextRow() returns the next row as an array of pixel wands from the
% pixel iterator.
%
% The format of the PixelGetNextRow method is:
%
% PixelWand **PixelGetNextRow(PixelIterator *iterator,
% size_t *number_wands)
%
% A description of each parameter follows:
%
% o iterator: the pixel iterator.
%
% o number_wands: the number of pixel wands.
%
*/
WandExport PixelWand **PixelGetNextRow(PixelIterator *iterator)
{
size_t
number_wands;
return(PixelGetNextIteratorRow(iterator,&number_wands));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P i x e l I t e r a t o r G e t E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PixelIteratorGetException() returns the severity, reason, and description of
% any error that occurs when using other methods in this API.
%
% The format of the PixelIteratorGetException method is:
%
% char *PixelIteratorGetException(const Pixeliterator *iterator,
% ExceptionType *severity)
%
% A description of each parameter follows:
%
% o iterator: the pixel iterator.
%
% o severity: the severity of the error is returned here.
%
*/
WandExport char *PixelIteratorGetException(const PixelIterator *iterator,
ExceptionType *severity)
{
return(PixelGetIteratorException(iterator,severity));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t P i x e l V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelViewIterator() iterates over the pixel view in parallel and calls
% your set method for each scanline of the view. The pixel region is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension. The pixels are initiallly
% undefined and any settings you make in the callback method are automagically
% synced back to your image.
%
% Use this pragma:
%
% #pragma omp critical
%
% to define a section of code in your callback set method that must be
% executed by a single thread at a time.
%
% The format of the SetPixelViewIterator method is:
%
% MagickBooleanType SetPixelViewIterator(PixelView *destination,
% SetPixelViewMethod set,void *context)
%
% A description of each parameter follows:
%
% o destination: the pixel view.
%
% o set: the set callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType SetPixelViewIterator(PixelView *destination,
SetPixelViewMethod set,void *context)
{
#define SetPixelViewTag "PixelView/Set"
ExceptionInfo
*exception;
Image
*destination_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(destination != (PixelView *) NULL);
assert(destination->signature == WandSignature);
if (set == (SetPixelViewMethod) NULL)
return(MagickFalse);
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status)
#endif
for (y=destination->region.y; y < (ssize_t) destination->region.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(destination->view,destination->region.x,
y,destination->region.width,1,exception);
if (pixels == (PixelPacket *) NULL)
{
InheritException(destination->exception,GetCacheViewException(
destination->view));
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(destination->view);
if (set(destination,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->region.width; x++)
SetPixelIndex(indexes+x,PixelGetBlackQuantum(
destination->pixel_wands[id][x]));
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
destination->view));
status=MagickFalse;
}
if (destination_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_SetPixelViewIterator)
#endif
proceed=SetImageProgress(destination_image,SetPixelViewTag,progress++,
destination->region.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status == MagickFalse ? 0 : 1);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f e r P i x e l V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransferPixelViewIterator() iterates over two pixel views in parallel and
% calls your transfer method for each scanline of the view. The source pixel
% region is not confined to the image canvas-- that is you can include
% negative offsets or widths or heights that exceed the image dimension.
% However, the destination pixel view is confined to the image canvas-- that
% is no negative offsets or widths or heights that exceed the image dimension
% are permitted.
%
% Use this pragma:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the TransferPixelViewIterator method is:
%
% MagickBooleanType TransferPixelViewIterator(PixelView *source,
% PixelView *destination,TransferPixelViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source pixel view.
%
% o destination: the destination pixel view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType TransferPixelViewIterator(PixelView *source,
PixelView *destination,TransferPixelViewMethod transfer,void *context)
{
#define TransferPixelViewTag "PixelView/Transfer"
ExceptionInfo
*exception;
Image
*destination_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (PixelView *) NULL);
assert(source->signature == WandSignature);
if (transfer == (TransferPixelViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status)
#endif
for (y=source->region.y; y < (ssize_t) source->region.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict pixels;
register IndexPacket
*restrict destination_indexes;
register ssize_t
x;
register PixelPacket
*restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y,
source->region.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetIndex(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->region.x,y,destination->region.width,1,exception);
if (destination_pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view);
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetQuantumColor(destination->pixel_wands[id][x],pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetBlackQuantum(destination->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (destination_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetIndex(destination->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (transfer(source,destination,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->region.width; x++)
SetPixelIndex(destination_indexes+x,PixelGetBlackQuantum(
destination->pixel_wands[id][x]));
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_TransferPixelViewIterator)
#endif
proceed=SetImageProgress(source_image,TransferPixelViewTag,progress++,
source->region.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status == MagickFalse ? 0 : 1);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U p d a t e P i x e l V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UpdatePixelViewIterator() iterates over the pixel view in parallel and calls
% your update method for each scanline of the view. The pixel region is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension are permitted. Updates to pixels
% in your callback are automagically synced back to the image.
%
% Use this pragma:
%
% #pragma omp critical
%
% to define a section of code in your callback update method that must be
% executed by a single thread at a time.
%
% The format of the UpdatePixelViewIterator method is:
%
% MagickBooleanType UpdatePixelViewIterator(PixelView *source,
% UpdatePixelViewMethod update,void *context)
%
% A description of each parameter follows:
%
% o source: the source pixel view.
%
% o update: the update callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType UpdatePixelViewIterator(PixelView *source,
UpdatePixelViewMethod update,void *context)
{
#define UpdatePixelViewTag "PixelView/Update"
ExceptionInfo
*exception;
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (PixelView *) NULL);
assert(source->signature == WandSignature);
if (update == (UpdatePixelViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
if (SetImageStorageClass(source_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=source->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status)
#endif
for (y=source->region.y; y < (ssize_t) source->region.height; y++)
{
const int
id = GetOpenMPThreadId();
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(source->view,source->region.x,y,
source->region.width,1,exception);
if (pixels == (PixelPacket *) NULL)
{
InheritException(source->exception,GetCacheViewException(
source->view));
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(source->view);
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetPixelIndex(indexes+x));
if (update(source,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) source->region.width; x++)
PixelGetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->region.width; x++)
SetPixelIndex(indexes+x,PixelGetBlackQuantum(
source->pixel_wands[id][x]));
if (SyncCacheViewAuthenticPixels(source->view,exception) == MagickFalse)
{
InheritException(source->exception,GetCacheViewException(source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_UpdatePixelViewIterator)
#endif
proceed=SetImageProgress(source_image,UpdatePixelViewTag,progress++,
source->region.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status == MagickFalse ? 0 : 1);
}
#endif
|
transform.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% TTTTT RRRR AAA N N SSSSS FFFFF OOO RRRR M M %
% T R R A A NN N SS F O O R R MM MM %
% T RRRR AAAAA N N N SSS FFF O O RRRR M M M %
% T R R A A N NN SS F O O R R M M %
% T R R A A N N SSSSS F OOO R R M M %
% %
% %
% MagickCore Image Transform Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/effect.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image.h"
#include "MagickCore/memory_.h"
#include "MagickCore/layer.h"
#include "MagickCore/list.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/resource_.h"
#include "MagickCore/resize.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/transform-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o O r i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoOrientImage() adjusts an image so that its orientation is suitable for
% viewing (i.e. top-left orientation).
%
% The format of the AutoOrientImage method is:
%
% Image *AutoOrientImage(const Image *image,
% const OrientationType orientation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image.
%
% o orientation: Current image orientation.
%
% o exception: Return any errors or warnings in this structure.
%
*/
MagickExport Image *AutoOrientImage(const Image *image,
const OrientationType orientation,ExceptionInfo *exception)
{
Image
*orient_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
orient_image=(Image *) NULL;
switch(orientation)
{
case UndefinedOrientation:
case TopLeftOrientation:
default:
{
orient_image=CloneImage(image,0,0,MagickTrue,exception);
break;
}
case TopRightOrientation:
{
orient_image=FlopImage(image,exception);
break;
}
case BottomRightOrientation:
{
orient_image=RotateImage(image,180.0,exception);
break;
}
case BottomLeftOrientation:
{
orient_image=FlipImage(image,exception);
break;
}
case LeftTopOrientation:
{
orient_image=TransposeImage(image,exception);
break;
}
case RightTopOrientation:
{
orient_image=RotateImage(image,90.0,exception);
break;
}
case RightBottomOrientation:
{
orient_image=TransverseImage(image,exception);
break;
}
case LeftBottomOrientation:
{
orient_image=RotateImage(image,270.0,exception);
break;
}
}
if (orient_image != (Image *) NULL)
orient_image->orientation=TopLeftOrientation;
return(orient_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ChopImage() removes a region of an image and collapses the image to occupy
% the removed portion.
%
% The format of the ChopImage method is:
%
% Image *ChopImage(const Image *image,const RectangleInfo *chop_info)
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o chop_info: Define the region of the image to chop.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ChopImage(const Image *image,const RectangleInfo *chop_info,
ExceptionInfo *exception)
{
#define ChopImageTag "Chop/Image"
CacheView
*chop_view,
*image_view;
Image
*chop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
extent;
ssize_t
y;
/*
Check chop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(chop_info != (RectangleInfo *) NULL);
if (((chop_info->x+(ssize_t) chop_info->width) < 0) ||
((chop_info->y+(ssize_t) chop_info->height) < 0) ||
(chop_info->x > (ssize_t) image->columns) ||
(chop_info->y > (ssize_t) image->rows))
ThrowImageException(OptionWarning,"GeometryDoesNotContainImage");
extent=(*chop_info);
if ((extent.x+(ssize_t) extent.width) > (ssize_t) image->columns)
extent.width=(size_t) ((ssize_t) image->columns-extent.x);
if ((extent.y+(ssize_t) extent.height) > (ssize_t) image->rows)
extent.height=(size_t) ((ssize_t) image->rows-extent.y);
if (extent.x < 0)
{
extent.width-=(size_t) (-extent.x);
extent.x=0;
}
if (extent.y < 0)
{
extent.height-=(size_t) (-extent.y);
extent.y=0;
}
chop_image=CloneImage(image,image->columns-extent.width,image->rows-
extent.height,MagickTrue,exception);
if (chop_image == (Image *) NULL)
return((Image *) NULL);
/*
Extract chop image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
chop_view=AcquireAuthenticCacheView(chop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,chop_image,extent.y,1)
#endif
for (y=0; y < (ssize_t) extent.y; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(chop_view,0,y,chop_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width)))
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(chop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(chop_image,channel,p[i],q);
}
q+=GetPixelChannels(chop_image);
}
p+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ChopImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
/*
Extract chop image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,chop_image,image->rows-(extent.y+extent.height),1)
#endif
for (y=0; y < (ssize_t) (image->rows-(extent.y+extent.height)); y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,extent.y+extent.height+y,
image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(chop_view,0,extent.y+y,chop_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width)))
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(chop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(chop_image,channel,p[i],q);
}
q+=GetPixelChannels(chop_image);
}
p+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ChopImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
chop_view=DestroyCacheView(chop_view);
image_view=DestroyCacheView(image_view);
chop_image->type=image->type;
if (status == MagickFalse)
chop_image=DestroyImage(chop_image);
return(chop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s o l i d a t e C M Y K I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConsolidateCMYKImage() consolidates separate C, M, Y, and K planes into a
% single image.
%
% The format of the ConsolidateCMYKImage method is:
%
% Image *ConsolidateCMYKImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image sequence.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConsolidateCMYKImages(const Image *images,
ExceptionInfo *exception)
{
CacheView
*cmyk_view,
*image_view;
Image
*cmyk_image,
*cmyk_images;
register ssize_t
j;
ssize_t
y;
/*
Consolidate separate C, M, Y, and K planes into a single image.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cmyk_images=NewImageList();
for (j=0; j < (ssize_t) GetImageListLength(images); j+=4)
{
register ssize_t
i;
assert(images != (Image *) NULL);
cmyk_image=CloneImage(images,0,0,MagickTrue,
exception);
if (cmyk_image == (Image *) NULL)
break;
if (SetImageStorageClass(cmyk_image,DirectClass,exception) == MagickFalse)
break;
(void) SetImageColorspace(cmyk_image,CMYKColorspace,exception);
for (i=0; i < 4; i++)
{
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=QueueCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) images->columns; x++)
{
Quantum
pixel;
pixel=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p));
switch (i)
{
case 0: SetPixelCyan(cmyk_image,pixel,q); break;
case 1: SetPixelMagenta(cmyk_image,pixel,q); break;
case 2: SetPixelYellow(cmyk_image,pixel,q); break;
case 3: SetPixelBlack(cmyk_image,pixel,q); break;
default: break;
}
p+=GetPixelChannels(images);
q+=GetPixelChannels(cmyk_image);
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
}
AppendImageToList(&cmyk_images,cmyk_image);
}
return(cmyk_images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImage() extracts a region of the image starting at the offset defined
% by geometry. Region must be fully defined, and no special handling of
% geometry flags is performed.
%
% The format of the CropImage method is:
%
% Image *CropImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to crop with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CropImage(const Image *image,const RectangleInfo *geometry,
ExceptionInfo *exception)
{
#define CropImageTag "Crop/Image"
CacheView
*crop_view,
*image_view;
Image
*crop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
OffsetInfo
offset;
RectangleInfo
bounding_box,
page;
ssize_t
y;
/*
Check crop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
bounding_box=image->page;
if ((bounding_box.width == 0) || (bounding_box.height == 0))
{
bounding_box.width=image->columns;
bounding_box.height=image->rows;
}
page=(*geometry);
if (page.width == 0)
page.width=bounding_box.width;
if (page.height == 0)
page.height=bounding_box.height;
if (((bounding_box.x-page.x) >= (ssize_t) page.width) ||
((bounding_box.y-page.y) >= (ssize_t) page.height) ||
((page.x-bounding_box.x) > (ssize_t) image->columns) ||
((page.y-bounding_box.y) > (ssize_t) image->rows))
{
/*
Crop is not within virtual canvas, return 1 pixel transparent image.
*/
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
crop_image=CloneImage(image,1,1,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->background_color.alpha=(MagickRealType) TransparentAlpha;
crop_image->alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(crop_image,exception);
crop_image->page=bounding_box;
crop_image->page.x=(-1);
crop_image->page.y=(-1);
if (crop_image->dispose == BackgroundDispose)
crop_image->dispose=NoneDispose;
return(crop_image);
}
if ((page.x < 0) && (bounding_box.x >= 0))
{
page.width+=page.x-bounding_box.x;
page.x=0;
}
else
{
page.width-=bounding_box.x-page.x;
page.x-=bounding_box.x;
if (page.x < 0)
page.x=0;
}
if ((page.y < 0) && (bounding_box.y >= 0))
{
page.height+=page.y-bounding_box.y;
page.y=0;
}
else
{
page.height-=bounding_box.y-page.y;
page.y-=bounding_box.y;
if (page.y < 0)
page.y=0;
}
if ((page.x+(ssize_t) page.width) > (ssize_t) image->columns)
page.width=image->columns-page.x;
if ((geometry->width != 0) && (page.width > geometry->width))
page.width=geometry->width;
if ((page.y+(ssize_t) page.height) > (ssize_t) image->rows)
page.height=image->rows-page.y;
if ((geometry->height != 0) && (page.height > geometry->height))
page.height=geometry->height;
bounding_box.x+=page.x;
bounding_box.y+=page.y;
if ((page.width == 0) || (page.height == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
return((Image *) NULL);
}
/*
Initialize crop image attributes.
*/
crop_image=CloneImage(image,page.width,page.height,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->page.width=image->page.width;
crop_image->page.height=image->page.height;
offset.x=(ssize_t) (bounding_box.x+bounding_box.width);
offset.y=(ssize_t) (bounding_box.y+bounding_box.height);
if ((offset.x > (ssize_t) image->page.width) ||
(offset.y > (ssize_t) image->page.height))
{
crop_image->page.width=bounding_box.width;
crop_image->page.height=bounding_box.height;
}
crop_image->page.x=bounding_box.x;
crop_image->page.y=bounding_box.y;
/*
Crop image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
crop_view=AcquireAuthenticCacheView(crop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,crop_image,crop_image->rows,1)
#endif
for (y=0; y < (ssize_t) crop_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,page.x,page.y+y,crop_image->columns,
1,exception);
q=QueueCacheViewAuthenticPixels(crop_view,0,y,crop_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) crop_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait crop_traits=GetPixelChannelTraits(crop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(crop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(crop_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(crop_image);
}
if (SyncCacheViewAuthenticPixels(crop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CropImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
crop_view=DestroyCacheView(crop_view);
image_view=DestroyCacheView(image_view);
crop_image->type=image->type;
if (status == MagickFalse)
crop_image=DestroyImage(crop_image);
return(crop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e T o T i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImageToTiles() crops a single image, into a possible list of tiles.
% This may include a single sub-region of the image. This basically applies
% all the normal geometry flags for Crop.
%
% Image *CropImageToTiles(const Image *image,
% const RectangleInfo *crop_geometry, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double ConstrainPixelOffset(double x)
{
if (x < (double) -(SSIZE_MAX-512))
return((double) -(SSIZE_MAX-512));
if (x > (double) (SSIZE_MAX-512))
return((double) (SSIZE_MAX-512));
return(x);
}
static inline ssize_t PixelRoundOffset(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return((ssize_t) floor(ConstrainPixelOffset(x)));
return((ssize_t) ceil(ConstrainPixelOffset(x)));
}
MagickExport Image *CropImageToTiles(const Image *image,
const char *crop_geometry,ExceptionInfo *exception)
{
Image
*next,
*crop_image;
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
crop_image=NewImageList();
next=NewImageList();
flags=ParseGravityGeometry(image,crop_geometry,&geometry,exception);
if ((flags & AreaValue) != 0)
{
PointInfo
delta,
offset;
RectangleInfo
crop;
size_t
height,
width;
/*
Crop into NxM tiles (@ flag).
*/
width=image->columns;
height=image->rows;
if (geometry.width == 0)
geometry.width=1;
if (geometry.height == 0)
geometry.height=1;
if ((flags & AspectValue) == 0)
{
width-=(geometry.x < 0 ? -1 : 1)*geometry.x;
height-=(geometry.y < 0 ? -1 : 1)*geometry.y;
}
else
{
width+=(geometry.x < 0 ? -1 : 1)*geometry.x;
height+=(geometry.y < 0 ? -1 : 1)*geometry.y;
}
delta.x=(double) width/geometry.width;
delta.y=(double) height/geometry.height;
if (delta.x < 1.0)
delta.x=1.0;
if (delta.y < 1.0)
delta.y=1.0;
for (offset.y=0; offset.y < (double) height; )
{
if ((flags & AspectValue) == 0)
{
crop.y=PixelRoundOffset((double) (offset.y-
(geometry.y > 0 ? 0 : geometry.y)));
offset.y+=delta.y; /* increment now to find width */
crop.height=(size_t) PixelRoundOffset((double) (offset.y+
(geometry.y < 0 ? 0 : geometry.y)));
}
else
{
crop.y=PixelRoundOffset((double) (offset.y-
(geometry.y > 0 ? geometry.y : 0)));
offset.y+=delta.y; /* increment now to find width */
crop.height=(size_t) PixelRoundOffset((double)
(offset.y+(geometry.y < -1 ? geometry.y : 0)));
}
crop.height-=crop.y;
crop.y+=image->page.y;
for (offset.x=0; offset.x < (double) width; )
{
if ((flags & AspectValue) == 0)
{
crop.x=PixelRoundOffset((double) (offset.x-
(geometry.x > 0 ? 0 : geometry.x)));
offset.x+=delta.x; /* increment now to find height */
crop.width=(size_t) PixelRoundOffset((double) (offset.x+
(geometry.x < 0 ? 0 : geometry.x)));
}
else
{
crop.x=PixelRoundOffset((double) (offset.x-
(geometry.x > 0 ? geometry.x : 0)));
offset.x+=delta.x; /* increment now to find height */
crop.width=(size_t) PixelRoundOffset((double) (offset.x+
(geometry.x < 0 ? geometry.x : 0)));
}
crop.width-=crop.x;
crop.x+=image->page.x;
next=CropImage(image,&crop,exception);
if (next != (Image *) NULL)
AppendImageToList(&crop_image,next);
}
}
ClearMagickException(exception);
return(crop_image);
}
if (((geometry.width == 0) && (geometry.height == 0)) ||
((flags & XValue) != 0) || ((flags & YValue) != 0))
{
/*
Crop a single region at +X+Y.
*/
crop_image=CropImage(image,&geometry,exception);
if ((crop_image != (Image *) NULL) && ((flags & AspectValue) != 0))
{
crop_image->page.width=geometry.width;
crop_image->page.height=geometry.height;
crop_image->page.x-=geometry.x;
crop_image->page.y-=geometry.y;
}
return(crop_image);
}
if ((image->columns > geometry.width) || (image->rows > geometry.height))
{
RectangleInfo
page;
size_t
height,
width;
ssize_t
x,
y;
/*
Crop into tiles of fixed size WxH.
*/
page=image->page;
if (page.width == 0)
page.width=image->columns;
if (page.height == 0)
page.height=image->rows;
width=geometry.width;
if (width == 0)
width=page.width;
height=geometry.height;
if (height == 0)
height=page.height;
next=NewImageList();
for (y=0; y < (ssize_t) page.height; y+=(ssize_t) height)
{
for (x=0; x < (ssize_t) page.width; x+=(ssize_t) width)
{
geometry.width=width;
geometry.height=height;
geometry.x=x;
geometry.y=y;
next=CropImage(image,&geometry,exception);
if (next == (Image *) NULL)
break;
AppendImageToList(&crop_image,next);
}
if (next == (Image *) NULL)
break;
}
return(crop_image);
}
return(CloneImage(image,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x c e r p t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExcerptImage() returns a excerpt of the image as defined by the geometry.
%
% The format of the ExcerptImage method is:
%
% Image *ExcerptImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to extend with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ExcerptImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define ExcerptImageTag "Excerpt/Image"
CacheView
*excerpt_view,
*image_view;
Image
*excerpt_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate excerpt image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
excerpt_image=CloneImage(image,geometry->width,geometry->height,MagickTrue,
exception);
if (excerpt_image == (Image *) NULL)
return((Image *) NULL);
/*
Excerpt each row.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
excerpt_view=AcquireAuthenticCacheView(excerpt_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,excerpt_image,excerpt_image->rows,1)
#endif
for (y=0; y < (ssize_t) excerpt_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,geometry->x,geometry->y+y,
geometry->width,1,exception);
q=GetCacheViewAuthenticPixels(excerpt_view,0,y,excerpt_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) excerpt_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait excerpt_traits=GetPixelChannelTraits(excerpt_image,channel);
if ((traits == UndefinedPixelTrait) ||
(excerpt_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(excerpt_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(excerpt_image);
}
if (SyncCacheViewAuthenticPixels(excerpt_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ExcerptImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
excerpt_view=DestroyCacheView(excerpt_view);
image_view=DestroyCacheView(image_view);
excerpt_image->type=image->type;
if (status == MagickFalse)
excerpt_image=DestroyImage(excerpt_image);
return(excerpt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x t e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExtentImage() extends the image as defined by the geometry, gravity, and
% image background color. Set the (x,y) offset of the geometry to move the
% original image relative to the extended image.
%
% The format of the ExtentImage method is:
%
% Image *ExtentImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to extend with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ExtentImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
Image
*extent_image;
MagickBooleanType
status;
/*
Allocate extent image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
extent_image=CloneImage(image,geometry->width,geometry->height,MagickTrue,
exception);
if (extent_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageBackgroundColor(extent_image,exception);
if (status == MagickFalse)
{
extent_image=DestroyImage(extent_image);
return((Image *) NULL);
}
status=CompositeImage(extent_image,image,image->compose,MagickTrue,
-geometry->x,-geometry->y,exception);
return(extent_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlipImage() creates a vertical mirror image by reflecting the pixels
% around the central x-axis.
%
% The format of the FlipImage method is:
%
% Image *FlipImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlipImage(const Image *image,ExceptionInfo *exception)
{
#define FlipImageTag "Flip/Image"
CacheView
*flip_view,
*image_view;
Image
*flip_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
flip_image=CloneImage(image,0,0,MagickTrue,exception);
if (flip_image == (Image *) NULL)
return((Image *) NULL);
/*
Flip image.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireVirtualCacheView(image,exception);
flip_view=AcquireAuthenticCacheView(flip_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,flip_image,flip_image->rows,1)
#endif
for (y=0; y < (ssize_t) flip_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flip_view,0,(ssize_t) (flip_image->rows-y-
1),flip_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) flip_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait flip_traits=GetPixelChannelTraits(flip_image,channel);
if ((traits == UndefinedPixelTrait) ||
(flip_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(flip_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(flip_image);
}
if (SyncCacheViewAuthenticPixels(flip_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FlipImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flip_view=DestroyCacheView(flip_view);
image_view=DestroyCacheView(image_view);
flip_image->type=image->type;
if (page.height != 0)
page.y=(ssize_t) (page.height-flip_image->rows-page.y);
flip_image->page=page;
if (status == MagickFalse)
flip_image=DestroyImage(flip_image);
return(flip_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlopImage() creates a horizontal mirror image by reflecting the pixels
% around the central y-axis.
%
% The format of the FlopImage method is:
%
% Image *FlopImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlopImage(const Image *image,ExceptionInfo *exception)
{
#define FlopImageTag "Flop/Image"
CacheView
*flop_view,
*image_view;
Image
*flop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
flop_image=CloneImage(image,0,0,MagickTrue,exception);
if (flop_image == (Image *) NULL)
return((Image *) NULL);
/*
Flop each row.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireVirtualCacheView(image,exception);
flop_view=AcquireAuthenticCacheView(flop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,flop_image,flop_image->rows,1)
#endif
for (y=0; y < (ssize_t) flop_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flop_view,0,y,flop_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
q+=GetPixelChannels(flop_image)*flop_image->columns;
for (x=0; x < (ssize_t) flop_image->columns; x++)
{
register ssize_t
i;
q-=GetPixelChannels(flop_image);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait flop_traits=GetPixelChannelTraits(flop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(flop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(flop_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(flop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FlopImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flop_view=DestroyCacheView(flop_view);
image_view=DestroyCacheView(image_view);
flop_image->type=image->type;
if (page.width != 0)
page.x=(ssize_t) (page.width-flop_image->columns-page.x);
flop_image->page=page;
if (status == MagickFalse)
flop_image=DestroyImage(flop_image);
return(flop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RollImage() offsets an image as defined by x_offset and y_offset.
%
% The format of the RollImage method is:
%
% Image *RollImage(const Image *image,const ssize_t x_offset,
% const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x_offset: the number of columns to roll in the horizontal direction.
%
% o y_offset: the number of rows to roll in the vertical direction.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CopyImageRegion(Image *destination,const Image *source, const size_t columns,const size_t rows,const ssize_t sx,const ssize_t sy,
const ssize_t dx,const ssize_t dy,ExceptionInfo *exception)
{
CacheView
*source_view,
*destination_view;
MagickBooleanType
status;
ssize_t
y;
if (columns == 0)
return(MagickTrue);
status=MagickTrue;
source_view=AcquireVirtualCacheView(source,exception);
destination_view=AcquireAuthenticCacheView(destination,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,destination,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
/*
Transfer scanline.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,sx,sy+y,columns,1,exception);
q=GetCacheViewAuthenticPixels(destination_view,dx,dy+y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(source); i++)
{
PixelChannel channel = GetPixelChannelChannel(source,i);
PixelTrait source_traits=GetPixelChannelTraits(source,channel);
PixelTrait destination_traits=GetPixelChannelTraits(destination,
channel);
if ((source_traits == UndefinedPixelTrait) ||
(destination_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(destination,channel,p[i],q);
}
p+=GetPixelChannels(source);
q+=GetPixelChannels(destination);
}
sync=SyncCacheViewAuthenticPixels(destination_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
destination_view=DestroyCacheView(destination_view);
source_view=DestroyCacheView(source_view);
return(status);
}
MagickExport Image *RollImage(const Image *image,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define RollImageTag "Roll/Image"
Image
*roll_image;
MagickStatusType
status;
RectangleInfo
offset;
/*
Initialize roll image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
roll_image=CloneImage(image,0,0,MagickTrue,exception);
if (roll_image == (Image *) NULL)
return((Image *) NULL);
offset.x=x_offset;
offset.y=y_offset;
while (offset.x < 0)
offset.x+=(ssize_t) image->columns;
while (offset.x >= (ssize_t) image->columns)
offset.x-=(ssize_t) image->columns;
while (offset.y < 0)
offset.y+=(ssize_t) image->rows;
while (offset.y >= (ssize_t) image->rows)
offset.y-=(ssize_t) image->rows;
/*
Roll image.
*/
status=CopyImageRegion(roll_image,image,(size_t) offset.x,
(size_t) offset.y,(ssize_t) image->columns-offset.x,(ssize_t) image->rows-
offset.y,0,0,exception);
(void) SetImageProgress(image,RollImageTag,0,3);
status&=CopyImageRegion(roll_image,image,image->columns-offset.x,
(size_t) offset.y,0,(ssize_t) image->rows-offset.y,offset.x,0,
exception);
(void) SetImageProgress(image,RollImageTag,1,3);
status&=CopyImageRegion(roll_image,image,(size_t) offset.x,image->rows-
offset.y,(ssize_t) image->columns-offset.x,0,0,offset.y,exception);
(void) SetImageProgress(image,RollImageTag,2,3);
status&=CopyImageRegion(roll_image,image,image->columns-offset.x,image->rows-
offset.y,0,0,offset.x,offset.y,exception);
(void) SetImageProgress(image,RollImageTag,3,3);
roll_image->type=image->type;
if (status == MagickFalse)
roll_image=DestroyImage(roll_image);
return(roll_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShaveImage() shaves pixels from the image edges. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the ShaveImage method is:
%
% Image *ShaveImage(const Image *image,const RectangleInfo *shave_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o shave_image: Method ShaveImage returns a pointer to the shaved
% image. A null image is returned if there is a memory shortage or
% if the image width or height is zero.
%
% o image: the image.
%
% o shave_info: Specifies a pointer to a RectangleInfo which defines the
% region of the image to crop.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShaveImage(const Image *image,
const RectangleInfo *shave_info,ExceptionInfo *exception)
{
Image
*shave_image;
RectangleInfo
geometry;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (((2*shave_info->width) >= image->columns) ||
((2*shave_info->height) >= image->rows))
ThrowImageException(OptionWarning,"GeometryDoesNotContainImage");
SetGeometry(image,&geometry);
geometry.width-=2*shave_info->width;
geometry.height-=2*shave_info->height;
geometry.x=(ssize_t) shave_info->width+image->page.x;
geometry.y=(ssize_t) shave_info->height+image->page.y;
shave_image=CropImage(image,&geometry,exception);
if (shave_image == (Image *) NULL)
return((Image *) NULL);
shave_image->page.width-=2*shave_info->width;
shave_image->page.height-=2*shave_info->height;
shave_image->page.x-=(ssize_t) shave_info->width;
shave_image->page.y-=(ssize_t) shave_info->height;
return(shave_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p l i c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SpliceImage() splices a solid color into the image as defined by the
% geometry.
%
% The format of the SpliceImage method is:
%
% Image *SpliceImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to splice with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SpliceImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define SpliceImageTag "Splice/Image"
CacheView
*image_view,
*splice_view;
Image
*splice_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
splice_geometry;
ssize_t
columns,
y;
/*
Allocate splice image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
splice_geometry=(*geometry);
splice_image=CloneImage(image,image->columns+splice_geometry.width,
image->rows+splice_geometry.height,MagickTrue,exception);
if (splice_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(splice_image,DirectClass,exception) == MagickFalse)
{
splice_image=DestroyImage(splice_image);
return((Image *) NULL);
}
if ((IsPixelInfoGray(&splice_image->background_color) == MagickFalse) &&
(IsGrayColorspace(splice_image->colorspace) != MagickFalse))
(void) SetImageColorspace(splice_image,sRGBColorspace,exception);
if ((splice_image->background_color.alpha_trait != UndefinedPixelTrait) &&
(splice_image->alpha_trait == UndefinedPixelTrait))
(void) SetImageAlpha(splice_image,OpaqueAlpha,exception);
(void) SetImageBackgroundColor(splice_image,exception);
/*
Respect image geometry.
*/
switch (image->gravity)
{
default:
case UndefinedGravity:
case NorthWestGravity:
break;
case NorthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
break;
}
case NorthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
break;
}
case WestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.width/2;
break;
}
case CenterGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case EastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case SouthWestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
}
/*
Splice image.
*/
status=MagickTrue;
progress=0;
columns=MagickMin(splice_geometry.x,(ssize_t) splice_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
splice_view=AcquireAuthenticCacheView(splice_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,splice_image,splice_geometry.y,1)
#endif
for (y=0; y < (ssize_t) splice_geometry.y; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,splice_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q+=GetPixelChannels(splice_image);
for ( ; x < (ssize_t) splice_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SpliceImageTag,progress,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,splice_image,splice_image->rows,2)
#endif
for (y=(ssize_t) (splice_geometry.y+splice_geometry.height);
y < (ssize_t) splice_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
if ((y < 0) || (y >= (ssize_t)splice_image->rows))
continue;
p=GetCacheViewVirtualPixels(image_view,0,y-(ssize_t) splice_geometry.height,
splice_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q+=GetPixelChannels(splice_image);
for ( ; x < (ssize_t) splice_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SpliceImageTag,progress,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
splice_view=DestroyCacheView(splice_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
splice_image=DestroyImage(splice_image);
return(splice_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImage() is a convenience method that behaves like ResizeImage() or
% CropImage() but accepts scaling and/or cropping information as a region
% geometry specification. If the operation fails, the original image handle
% is left as is.
%
% This should only be used for single images.
%
% This function destroys what it assumes to be a single image list.
% If the input image is part of a larger list, all other images in that list
% will be simply 'lost', not destroyed.
%
% Also if the crop generates a list of images only the first image is resized.
% And finally if the crop succeeds and the resize failed, you will get a
% cropped image, as well as a 'false' or 'failed' report.
%
% This function and should probably be deprecated in favor of direct calls
% to CropImageToTiles() or ResizeImage(), as appropriate.
%
% The format of the TransformImage method is:
%
% MagickBooleanType TransformImage(Image **image,const char *crop_geometry,
% const char *image_geometry,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string. This geometry defines a
% subregion of the image to crop.
%
% o image_geometry: An image geometry string. This geometry defines the
% final size of the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType TransformImage(Image **image,
const char *crop_geometry,const char *image_geometry,ExceptionInfo *exception)
{
Image
*resize_image,
*transform_image;
RectangleInfo
geometry;
assert(image != (Image **) NULL);
assert((*image)->signature == MagickCoreSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
transform_image=(*image);
if (crop_geometry != (const char *) NULL)
{
Image
*crop_image;
/*
Crop image to a user specified size.
*/
crop_image=CropImageToTiles(*image,crop_geometry,exception);
if (crop_image == (Image *) NULL)
transform_image=CloneImage(*image,0,0,MagickTrue,exception);
else
{
transform_image=DestroyImage(transform_image);
transform_image=GetFirstImageInList(crop_image);
}
*image=transform_image;
}
if (image_geometry == (const char *) NULL)
return(MagickTrue);
/*
Scale image to a user specified size.
*/
(void) ParseRegionGeometry(transform_image,image_geometry,&geometry,
exception);
if ((transform_image->columns == geometry.width) &&
(transform_image->rows == geometry.height))
return(MagickTrue);
resize_image=ResizeImage(transform_image,geometry.width,geometry.height,
transform_image->filter,exception);
if (resize_image == (Image *) NULL)
return(MagickFalse);
transform_image=DestroyImage(transform_image);
transform_image=resize_image;
*image=transform_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p o s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransposeImage() creates a horizontal mirror image by reflecting the pixels
% around the central y-axis while rotating them by 90 degrees.
%
% The format of the TransposeImage method is:
%
% Image *TransposeImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TransposeImage(const Image *image,ExceptionInfo *exception)
{
#define TransposeImageTag "Transpose/Image"
CacheView
*image_view,
*transpose_view;
Image
*transpose_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
transpose_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
if (transpose_image == (Image *) NULL)
return((Image *) NULL);
/*
Transpose image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
transpose_view=AcquireAuthenticCacheView(transpose_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,transpose_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-y-1,
image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(transpose_view,(ssize_t) (image->rows-y-1),
0,1,transpose_image->rows,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait transpose_traits=GetPixelChannelTraits(transpose_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(transpose_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(transpose_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(transpose_image);
}
if (SyncCacheViewAuthenticPixels(transpose_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TransposeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
transpose_view=DestroyCacheView(transpose_view);
image_view=DestroyCacheView(image_view);
transpose_image->type=image->type;
page=transpose_image->page;
Swap(page.width,page.height);
Swap(page.x,page.y);
transpose_image->page=page;
if (status == MagickFalse)
transpose_image=DestroyImage(transpose_image);
return(transpose_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s v e r s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransverseImage() creates a vertical mirror image by reflecting the pixels
% around the central x-axis while rotating them by 270 degrees.
%
% The format of the TransverseImage method is:
%
% Image *TransverseImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TransverseImage(const Image *image,ExceptionInfo *exception)
{
#define TransverseImageTag "Transverse/Image"
CacheView
*image_view,
*transverse_view;
Image
*transverse_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
transverse_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
if (transverse_image == (Image *) NULL)
return((Image *) NULL);
/*
Transverse image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
transverse_view=AcquireAuthenticCacheView(transverse_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,transverse_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(transverse_view,(ssize_t) (image->rows-y-1),
0,1,transverse_image->rows,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
q+=GetPixelChannels(transverse_image)*image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
q-=GetPixelChannels(transverse_image);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait transverse_traits=GetPixelChannelTraits(transverse_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(transverse_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(transverse_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(transverse_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TransverseImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
transverse_view=DestroyCacheView(transverse_view);
image_view=DestroyCacheView(image_view);
transverse_image->type=image->type;
page=transverse_image->page;
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.width != 0)
page.x=(ssize_t) (page.width-transverse_image->columns-page.x);
if (page.height != 0)
page.y=(ssize_t) (page.height-transverse_image->rows-page.y);
transverse_image->page=page;
if (status == MagickFalse)
transverse_image=DestroyImage(transverse_image);
return(transverse_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r i m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TrimImage() trims pixels from the image edges. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the TrimImage method is:
%
% Image *TrimImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TrimImage(const Image *image,ExceptionInfo *exception)
{
RectangleInfo
geometry;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
geometry=GetImageBoundingBox(image,exception);
if ((geometry.width == 0) || (geometry.height == 0))
{
Image
*crop_image;
crop_image=CloneImage(image,1,1,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->background_color.alpha=(MagickRealType) TransparentAlpha;
crop_image->alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(crop_image,exception);
crop_image->page=image->page;
crop_image->page.x=(-1);
crop_image->page.y=(-1);
return(crop_image);
}
geometry.x+=image->page.x;
geometry.y+=image->page.y;
return(CropImage(image,&geometry,exception));
}
|
ompfor10.c | #include <math.h>
void main(int n, int m, float *a, float *b, float *y, float *z)
{
int i;
int j;
#pragma omp parallel
{
#pragma omp for nowait linear(i,j) collapse(456) ordered(i)
for (i=1; i<n; i++)
b[i] = (a[i] + a[i-1]) / 2.0;
#pragma omp for nowait ordered schedule ( monotonic : runtime )
for (i=0; i<m; i++)
y[i] = z[i];
}
}
|
HW2.c | /*
* Write a serial program to check if a given number is prime or not.
* Report the required time.
*
* Convert it to a OpenMP code.
*/
#include <stdio.h>
#include <omp.h>
#include <time.h>
int main(int argc,char *argv[])
{
clock_t start,stop;
long int n,i;
char flag='y';
start=clock();
sscanf(argv[1],"%ld",&n);
omp_set_num_threads(4);
#pragma omp parallel private(i)
{
for(i=2;i<n;i++) {
if(n%i==0) flag='n';
}
(flag=='n')? printf("%ld is not prime!\n",n):printf("%ld is prime!\n",n);
}
stop=clock();
printf("Time required in milliseconds: %ld\n",stop-start);
return 0;
}
|
jac_solv_simd.c | /*
** PROGRAM: jacobi Solver
**
** PURPOSE: This program will explore use of a jacobi iterative
** method to solve a system of linear equations (Ax= b).
**
** Here is the basic idea behind the method. Rewrite
** the matrix A as a Lower Triangular (L), upper triangular
** (U) and diagonal matrix (D)
**
** Ax = (L + D + U)x = b
**
** Carry out the multiplication and rearrange:
**
** Dx = b - (L+U)x --> x = (b-(L+U)x)/D
**
** We can do this iteratively
**
** x_new = (b-(L+U)x_old)/D
**
** USAGE: Run without arguments to use default SIZE.
**
** ./jac_solv
**
** Run with a single argument for the order of the A
** matrix ... for example
**
** ./jac_solv 2500
**
** HISTORY: Written by Tim Mattson, Oct 2015
*/
#include<omp.h>
#include <stdlib.h>
#include<math.h>
#include "mm_utils.h" //a library of basic matrix utilities functions
//and some key constants used in this program
//(such as TYPE)
#define TOLERANCE 0.001
#define DEF_SIZE 1000
#define MAX_ITERS 5000
#define LARGE 1000000.0
//#define DEBUG 1 // output a small subset of intermediate values
//#define VERBOSE 1
int main(int argc, char **argv)
{
int Ndim; // A[Ndim][Ndim]
int i,j, iters;
double start_time, elapsed_time;
TYPE conv, tmp, err, chksum;
TYPE *A, *b, *x1, *x2, *xnew, xo, xn, *xold, *xtmp;
// set matrix dimensions and allocate memory for matrices
if(argc ==2){
Ndim = atoi(argv[1]);
}
else{
Ndim = DEF_SIZE;
}
printf(" ndim = %d\n",Ndim);
A = (TYPE *) malloc(Ndim*Ndim*sizeof(TYPE));
b = (TYPE *) malloc(Ndim*sizeof(TYPE));
x1 = (TYPE *) malloc(Ndim*sizeof(TYPE));
x2 = (TYPE *) malloc(Ndim*sizeof(TYPE));
if (!A || !b || !x1 || !x2)
{
printf("\n memory allocation error\n");
exit(-1);
}
// generate our diagonally dominant matrix, A
init_diag_dom_near_identity_matrix(Ndim, A);
#ifdef VERBOSE
mm_print(Ndim, Ndim, A);
#endif
//
// Initialize x and just give b some non-zero random values
//
for(i=0; i<Ndim; i++){
x1[i] = (TYPE)0.0;
x2[i] = (TYPE)0.0;
b[i] = (TYPE)(rand()%51)/100.0;
}
start_time = omp_get_wtime();
//
// jacobi iterative solver
//
conv = LARGE;
iters = 0;
xnew = x1;
xold = x2;
while((conv > TOLERANCE) && (iters<MAX_ITERS))
{
iters++;
xtmp = xnew; // don't copy arrays.
xnew = xold; // just swap pointers.
xold = xtmp;
for (i=0; i<Ndim; i++){
xn = (TYPE) 0.0;
#pragma omp simd reduction(+:xn)
for (j=0; j<Ndim;j++){
xn+= A[i*Ndim + j]*xold[j]*(i!=j);
}
xnew[i] = (b[i]-xn)/A[i*Ndim+i];
}
//
// test convergence
//
conv = (TYPE) 0.0;
#pragma omp simd reduction(+:conv)
for (i=0; i<Ndim; i++){
tmp = xnew[i]-xold[i];
conv += tmp*tmp;
}
conv = sqrt((double)conv);
#ifdef DEBUG
printf(" conv = %f \n",(float)conv);
#endif
}
elapsed_time = omp_get_wtime() - start_time;
printf(" Convergence = %g with %d iterations and %f seconds\n",
(float)conv, iters, (float)elapsed_time);
//
// test answer by multiplying my computed value of x by
// the input A matrix and comparing the result with the
// input b vector.
//
err = (TYPE) 0.0;
chksum = (TYPE) 0.0;
for(i=0;i<Ndim;i++){
xo = (TYPE) 0.0;
#pragma omp simd reduction(+:xo)
for(j=0; j<Ndim; j++)
xo += A[i*Ndim+j]*xnew[j];
tmp = xo - b[i];
#ifdef DEBUG
printf(" i=%d, diff = %f, computed b = %f, input b= %f \n",
i, (float)tmp, (float)xold[i], (float)b[i]);
#endif
chksum += xnew[i];
err += tmp*tmp;
}
err = sqrt((double)err);
printf("jacobi solver: err = %f, solution checksum = %f \n",
(float)sqrt(err), (float)chksum);
free(A);
free(b);
free(x1);
free(x2);
}
|
GB_ijsort.c | //------------------------------------------------------------------------------
// GB_ijsort: sort an index array I and remove duplicates
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Sort an index array and remove duplicates. In MATLAB notation:
/*
[I1 I1k] = sort (I) ;
Iduplicate = [(I1 (1:end-1) == I1 (2:end)), false] ;
I2 = I1 (~Iduplicate) ;
I2k = I1k (~Iduplicate) ;
*/
#include "GB_ij.h"
#include "GB_sort.h"
#define GB_FREE_WORK \
{ \
GB_FREE_WERK (&Work, Work_size) ; \
}
GrB_Info GB_ijsort
(
const GrB_Index *restrict I, // size ni, where ni > 1 always holds
int64_t *restrict p_ni, // : size of I, output: # of indices in I2
GrB_Index *restrict *p_I2, // size ni2, where I2 [0..ni2-1]
// contains the sorted indices with duplicates removed.
size_t *I2_size_handle,
GrB_Index *restrict *p_I2k, // output array of size ni2
size_t *I2k_size_handle,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
ASSERT (I != NULL) ;
ASSERT (p_ni != NULL) ;
ASSERT (p_I2 != NULL) ;
ASSERT (p_I2k != NULL) ;
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
GrB_Index *Work = NULL ; size_t Work_size = 0 ;
GrB_Index *restrict I2 = NULL ; size_t I2_size = 0 ;
GrB_Index *restrict I2k = NULL ; size_t I2k_size = 0 ;
int64_t ni = *p_ni ;
ASSERT (ni > 1) ;
int ntasks = 0 ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (ni, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// determine number of tasks to create
//--------------------------------------------------------------------------
ntasks = (nthreads == 1) ? 1 : (32 * nthreads) ;
ntasks = GB_IMIN (ntasks, ni) ;
ntasks = GB_IMAX (ntasks, 1) ;
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
Work = GB_MALLOC_WERK (2*ni + ntasks + 1, GrB_Index, &Work_size) ;
if (Work == NULL)
{
// out of memory
return (GrB_OUT_OF_MEMORY) ;
}
GrB_Index *restrict I1 = Work ; // size ni
GrB_Index *restrict I1k = Work + ni ; // size ni
int64_t *restrict Count = (int64_t *) (Work + 2*ni) ; // size ntasks+1
//--------------------------------------------------------------------------
// copy I into I1 and construct I1k
//--------------------------------------------------------------------------
GB_memcpy (I1, I, ni * sizeof (GrB_Index), nthreads) ;
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < ni ; k++)
{
// the key is selected so that the last duplicate entry comes first in
// the sorted result. It must be adjusted later, so that the kth entry
// has a key equal to k.
I1k [k] = (ni-k) ;
}
//--------------------------------------------------------------------------
// sort [I1 I1k]
//--------------------------------------------------------------------------
info = GB_msort_2b ((int64_t *) I1, (int64_t *) I1k, ni, nthreads) ;
if (info != GrB_SUCCESS)
{
// out of memory
GB_FREE_WORK ;
return (GrB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// count unique entries in I1
//--------------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t kfirst, klast, my_count = (tid == 0) ? 1 : 0 ;
GB_PARTITION (kfirst, klast, ni, tid, ntasks) ;
for (int64_t k = GB_IMAX (kfirst,1) ; k < klast ; k++)
{
if (I1 [k-1] != I1 [k])
{
my_count++ ;
}
}
Count [tid] = my_count ;
}
GB_cumsum (Count, ntasks, NULL, 1, NULL) ;
int64_t ni2 = Count [ntasks] ;
//--------------------------------------------------------------------------
// allocate the result I2
//--------------------------------------------------------------------------
I2 = GB_MALLOC_WERK (ni2, GrB_Index, &I2_size) ;
I2k = GB_MALLOC_WERK (ni2, GrB_Index, &I2k_size) ;
if (I2 == NULL || I2k == NULL)
{
// out of memory
GB_FREE_WORK ;
GB_FREE_WERK (&I2, I2_size) ;
GB_FREE_WERK (&I2k, I2k_size) ;
return (GrB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// construct the new list I2 from I1, removing duplicates
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t kfirst, klast, k2 = Count [tid] ;
GB_PARTITION (kfirst, klast, ni, tid, ntasks) ;
if (tid == 0)
{
// the first entry in I1 is never a duplicate
I2 [k2] = I1 [0] ;
I2k [k2] = (ni - I1k [0]) ;
k2++ ;
}
for (int64_t k = GB_IMAX (kfirst,1) ; k < klast ; k++)
{
if (I1 [k-1] != I1 [k])
{
I2 [k2] = I1 [k] ;
I2k [k2] = ni - I1k [k] ;
k2++ ;
}
}
}
//--------------------------------------------------------------------------
// check result: compare with single-pass, single-threaded algorithm
//--------------------------------------------------------------------------
#ifdef GB_DEBUG
{
int64_t ni1 = 1 ;
I1k [0] = ni - I1k [0] ;
for (int64_t k = 1 ; k < ni ; k++)
{
if (I1 [ni1-1] != I1 [k])
{
I1 [ni1] = I1 [k] ;
I1k [ni1] = ni - I1k [k] ;
ni1++ ;
}
}
ASSERT (ni1 == ni2) ;
for (int64_t k = 0 ; k < ni1 ; k++)
{
ASSERT (I1 [k] == I2 [k]) ;
ASSERT (I1k [k] == I2k [k]) ;
}
}
#endif
//--------------------------------------------------------------------------
// free workspace and return the new sorted list
//--------------------------------------------------------------------------
GB_FREE_WORK ;
*(p_I2 ) = (GrB_Index *) I2 ; (*I2_size_handle ) = I2_size ;
*(p_I2k) = (GrB_Index *) I2k ; (*I2k_size_handle) = I2k_size ;
*(p_ni ) = (int64_t ) ni2 ;
return (GrB_SUCCESS) ;
}
|
declare_reduction_codegen.c | // RUN: %clang_cc1 -verify -fopenmp -x c -emit-llvm %s -triple %itanium_abi_triple -o - -femit-all-decls -disable-llvm-passes | FileCheck %s
// RUN: %clang_cc1 -fopenmp -x c -triple %itanium_abi_triple -emit-pch -o %t %s -femit-all-decls -disable-llvm-passes
// RUN: %clang_cc1 -fopenmp -x c -triple %itanium_abi_triple -include-pch %t -verify %s -emit-llvm -o - -femit-all-decls -disable-llvm-passes | FileCheck --check-prefix=CHECK-LOAD %s
// RUN: %clang_cc1 -fopenmp -x c -triple %itanium_abi_triple -emit-pch -o %t %s -femit-all-decls -disable-llvm-passes -fopenmp-version=45
// RUN: %clang_cc1 -fopenmp -x c -triple %itanium_abi_triple -include-pch %t -verify %s -emit-llvm -o - -femit-all-decls -disable-llvm-passes -fopenmp-version=45 | FileCheck --check-prefixes=CHECK-LOAD,OMP45-LOAD %s
// RUN: %clang_cc1 -verify -fopenmp-simd -x c -emit-llvm %s -triple %itanium_abi_triple -o - -femit-all-decls -disable-llvm-passes | FileCheck --check-prefix SIMD-ONLY0 %s
// RUN: %clang_cc1 -fopenmp-simd -x c -triple %itanium_abi_triple -emit-pch -o %t %s -femit-all-decls -disable-llvm-passes
// RUN: %clang_cc1 -fopenmp-simd -x c -triple %itanium_abi_triple -include-pch %t -verify %s -emit-llvm -o - -femit-all-decls -disable-llvm-passes | FileCheck --check-prefix SIMD-ONLY0 %s
// SIMD-ONLY0-NOT: {{__kmpc|__tgt}}
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
// CHECK: [[SSS_INT:.+]] = type { i32 }
// CHECK-LOAD: [[SSS_INT:.+]] = type { i32 }
// CHECK-DAG: [[SSS_INIT:@.+]] = private constant %struct.SSS zeroinitializer
// CHECK-DAG: [[INT_INIT:@.+]] = private constant i32 0
#pragma omp declare reduction(+ : int, char : omp_out *= omp_in)
// CHECK: define internal {{.*}}void @{{[^(]+}}(i32* noalias %0, i32* noalias %1)
// CHECK: [[MUL:%.+]] = mul nsw i32
// CHECK-NEXT: store i32 [[MUL]], i32*
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}(i32* noalias %0, i32* noalias %1)
// CHECK-LOAD: [[MUL:%.+]] = mul nsw i32
// CHECK-LOAD-NEXT: store i32 [[MUL]], i32*
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
// CHECK: define internal {{.*}}void @{{[^(]+}}(i8* noalias %0, i8* noalias %1)
// CHECK: sext i8
// CHECK: sext i8
// CHECK: [[MUL:%.+]] = mul nsw i32
// CHECK-NEXT: [[TRUNC:%.+]] = trunc i32 [[MUL]] to i8
// CHECK-NEXT: store i8 [[TRUNC]], i8*
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}(i8* noalias %0, i8* noalias %1)
// CHECK-LOAD: sext i8
// CHECK-LOAD: sext i8
// CHECK-LOAD: [[MUL:%.+]] = mul nsw i32
// CHECK-LOAD-NEXT: [[TRUNC:%.+]] = trunc i32 [[MUL]] to i8
// CHECK-LOAD-NEXT: store i8 [[TRUNC]], i8*
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
#pragma omp declare reduction(fun : float : omp_out += omp_in) initializer(omp_priv = 15 + omp_orig)
// CHECK: define internal {{.*}}void @{{[^(]+}}(float* noalias %0, float* noalias %1)
// CHECK: [[ADD:%.+]] = fadd float
// CHECK-NEXT: store float [[ADD]], float*
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: define internal {{.*}}void @{{[^(]+}}(float* noalias %0, float* noalias %1)
// CHECK: [[ADD:%.+]] = fadd float 1.5
// CHECK-NEXT: store float [[ADD]], float*
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}(float* noalias %0, float* noalias %1)
// CHECK-LOAD: [[ADD:%.+]] = fadd float
// CHECK-LOAD-NEXT: store float [[ADD]], float*
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}(float* noalias %0, float* noalias %1)
// CHECK-LOAD: [[ADD:%.+]] = fadd float 1.5
// CHECK-LOAD-NEXT: store float [[ADD]], float*
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
struct SSS {
int field;
#pragma omp declare reduction(+ : int, char : omp_out *= omp_in)
// CHECK: define internal {{.*}}void @{{[^(]+}}(i32* noalias %0, i32* noalias %1)
// CHECK: [[MUL:%.+]] = mul nsw i32
// CHECK-NEXT: store i32 [[MUL]], i32*
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: define internal {{.*}}void @{{[^(]+}}(i8* noalias %0, i8* noalias %1)
// CHECK: sext i8
// CHECK: sext i8
// CHECK: [[MUL:%.+]] = mul nsw i32
// CHECK-NEXT: [[TRUNC:%.+]] = trunc i32 [[MUL]] to i8
// CHECK-NEXT: store i8 [[TRUNC]], i8*
// CHECK-NEXT: ret void
// CHECK-NEXT: }
};
void init(struct SSS *priv, struct SSS orig);
#pragma omp declare reduction(fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig))
// CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias %0, [[SSS_INT]]* noalias %1)
// CHECK: call void @llvm.memcpy
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias %0, [[SSS_INT]]* noalias %1)
// CHECK: call void @init(
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias %0, [[SSS_INT]]* noalias %1)
// CHECK-LOAD: call void @llvm.memcpy
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias %0, [[SSS_INT]]* noalias %1)
// CHECK-LOAD: call void @init(
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
// CHECK-LABEL: @main
// CHECK-LOAD-LABEL: @main
int main() {
#pragma omp declare reduction(fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig))
// CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias %0, [[SSS_INT]]* noalias %1)
// CHECK: call void @llvm.memcpy
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias %0, [[SSS_INT]]* noalias %1)
// CHECK: call void @init(
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias %0, [[SSS_INT]]* noalias %1)
// CHECK-LOAD: call void @llvm.memcpy
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias %0, [[SSS_INT]]* noalias %1)
// CHECK-LOAD: call void @init(
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
{
#pragma omp declare reduction(fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig))
// CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias %0, [[SSS_INT]]* noalias %1)
// CHECK: call void @llvm.memcpy
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias %0, [[SSS_INT]]* noalias %1)
// CHECK: call void @init(
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias %0, [[SSS_INT]]* noalias %1)
// CHECK-LOAD: call void @llvm.memcpy
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias %0, [[SSS_INT]]* noalias %1)
// CHECK-LOAD: call void @init(
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
}
return 0;
}
// OMP45-LOAD: define internal {{.*}}void @{{[^(]+}}(i32* noalias %0, i32* noalias %1)
// OMP45-LOAD: [[MUL:%.+]] = mul nsw i32
// OMP45-LOAD-NEXT: store i32 [[MUL]], i32*
// OMP45-LOAD-NEXT: ret void
// OMP45-LOAD-NEXT: }
// OMP45-LOAD: define internal {{.*}}void @{{[^(]+}}(i8* noalias %0, i8* noalias %1)
// OMP45-LOAD: sext i8
// OMP45-LOAD: sext i8
// OMP45-LOAD: [[MUL:%.+]] = mul nsw i32
// OMP45-LOAD-NEXT: [[TRUNC:%.+]] = trunc i32 [[MUL]] to i8
// OMP45-LOAD-NEXT: store i8 [[TRUNC]], i8*
// OMP45-LOAD-NEXT: ret void
// OMP45-LOAD-NEXT: }
// CHECK-LABEL: bar
struct SSS ss;
int in;
void bar() {
// CHECK: [[SS_PRIV:%.+]] = alloca %struct.SSS,
// CHECK: [[IN_PRIV:%.+]] = alloca i32,
// CHECK: [[BC:%.+]] = bitcast %struct.SSS* [[SS_PRIV]] to i8*
// CHECK: call void @llvm.memcpy.p0i8.p0i8.i{{64|32}}(i8* {{.*}}[[BC]], i8* {{.*}}bitcast (%struct.SSS* [[SSS_INIT]] to i8*), i{{64|32}} 4, i1 false)
// CHECK: [[IN_VAL:%.+]] = load i32, i32* [[INT_INIT]],
// CHECK: store i32 [[IN_VAL]], i32* [[IN_PRIV]],
// CHECK: call void @__kmpc_for_static_init_4(
#pragma omp declare reduction(+ \
: struct SSS \
: omp_out = omp_in)
#pragma omp declare reduction(+ \
: int \
: omp_out = omp_in)
#pragma omp for reduction(+ \
: ss, in)
for (int i = 0; i < 10; ++i)
;
}
#endif
|
reduction_array2.h | // =============================================================================
// == reduction_array2.h
// == --------------------------------------------------------------------------
// == A reduction array class that can be used with OpenMP. Current OpenMP
// == software only allows the reduction of an array into a scalar. This class
// == allows one to reduce an array into an array. Also, since the code here
// == implements a reduction similar to OpenMP without actually using OpenMP's
// == reduction clause, the reduction_array2 class allows users to define their
// == own reduction functions and reductions on user-specified classes.
// ==
// == An array of size numThreads x D is created. Reduction is performed so
// == that each thread only has write accesses a unique spot in the array.
// ==
// == Notation is as follows:
// == numThreads - The number of OpenMP threads that will be running
// == D - The dimension of the array to reduce to (if scalar, should be 1)
// ==
// == General usage:
// == (1) Before the parallel environment, initialize the reduction array:
// == >> reduction_array2<double> my_arr(numThreads,D,initial_value);
// ==
// == (2) Inside the parallel for loop, you can reduce with data x with:
// == >> my_arr.reduce_XXX(omp_get_thread_num(), bin, x);
// == "XXX" can be the predefined "add", "multiply", "max", or "min"
// == or you can specify a function pointer to your own function with
// == >> my_arr.reduce_function(omp_get_thread_num(), bin, x, func_ptr);
// ==
// == (3) After the parallel environment, reduce on the separate threads:
// == >> double* output = my_arr.final_reduce_XXX();
// == Again, "XXX" can be any of the predefined functions, or you can use
// == >> double* output = my_arr.final_reduce_function(func_ptr);
// ==
// == (4) output now is an array of length D with the reduced values.
// == Do *NOT* attempt to deallocate output or my_arr as they have their
// == own destructors.
// ==
// == Notes:
// == (1) Because of possible "false sharing", the array size here is actually
// == numThreads x D x cache_line. We pad the array so that different
// == threads will not access the same cache_line. If the cache line is
// == not large enough, please increase ie manually.
// == --------------------------------------------------------------------------
// == Written by Jason Chang 04-14-2013 - jchang7@csail.mit.edu
// =============================================================================
#ifndef _REDUCTION_ARRAY2
#define _REDUCTION_ARRAY2
#include <string.h>
#include <cstdlib>
#include <stdlib.h>
#include <iostream>
#include <algorithm>
#include "dpmmSubclusters/reduction_array.h"
//#include "array.h"
//#include "helperMEX.h"
#ifndef cache_line
#define cache_line 4
#endif
template <typename T>
class reduction_array2
{
private:
arr(T) data;
int numThreads;
int K;
int D;
long offt; // offsets for the threads
long offk; // offsets for the dimensions
public:
// --------------------------------------------------------------------------
// -- reduction_array2
// -- constructors; initializes the reduction array with a number of
// -- threads, each with a KxD dimensional vector. The third parameter can be
// -- specified to give the initializing value.
// --------------------------------------------------------------------------
reduction_array2();
reduction_array2(int thenumThreads, int theK, int theD);
reduction_array2(int thenumThreads, int theK, int theD, T value);
virtual ~reduction_array2();
// --------------------------------------------------------------------------
// -- init_values
// -- Initializes all values in the reduciton array to "value"
// --
// -- parameters:
// -- - value : the value to set all elements to
// --------------------------------------------------------------------------
void init_values(T value);
// --------------------------------------------------------------------------
// -- reduce_XXX
// -- Performs the reduction "XXX" on the d^th dimension with value
// --------------------------------------------------------------------------
void reduce_inc(int t, int k);
void reduce_add(int t, int k, arr(T) value);
template <typename T2>
void reduce_add(int t, int k, long d, T2 value);
template <typename T2>
void reduce_add(int t, int k, long* ds, T2* values, int nnz);
void reduce_add_outerprod(int t, int k, arr(T) value);
void reduce_multiply(int t, int k, arr(T) value);
void reduce_min(int t, int k, arr(T) value);
void reduce_max(int t, int k, arr(T) value);
// --------------------------------------------------------------------------
// -- reduce_function
// -- Performs the reduction specified by the function pointer
// --------------------------------------------------------------------------
void reduce_function(int t, int k, arr(T) value, T (*func)(T,T));
// --------------------------------------------------------------------------
// -- final_reduce_XXX
// -- Performs the reduction "XXX" on the threads and returns result
// --------------------------------------------------------------------------
arr(T) final_reduce_add();
arr(T) final_reduce_multiply();
arr(T) final_reduce_min();
arr(T) final_reduce_max();
// --------------------------------------------------------------------------
// -- final_reduce_function
// -- The function pointer version of above
// --------------------------------------------------------------------------
arr(T) final_reduce_function(T (*func)(T,T));
// --------------------------------------------------------------------------
// -- final_reduce_ext_XXX
// -- Performs the reduction "XXX" on the threads into the external array.
// -- Assumes that ext is already allocated to the correct size.
// --------------------------------------------------------------------------
void final_reduce_ext_add(arr(T) ext);
void final_reduce_ext_multiply(arr(T) ext);
// --------------------------------------------------------------------------
// -- collapse_cache_line
// -- collapses the cache line for the final return
// --------------------------------------------------------------------------
void collapse_cache_line();
};
// --------------------------------------------------------------------------
// -- reduction_array2
// -- constructors; initializes the reduction array with a number of
// -- threads, each with a KxD dimensional vector. The third parameter can be
// -- specified to give the initializing value.
// --------------------------------------------------------------------------
template <typename T>
reduction_array2<T>::reduction_array2() :
numThreads(0), K(0), D(0), data(NULL)
{
}
template <typename T>
reduction_array2<T>::reduction_array2(int thenumThreads, int theK, int theD) :
numThreads(thenumThreads), K(theK), D(theD)
{
offk = D + cache_line;
offt = offk*K;
data = allocate_memory<T>(numThreads*offt);
}
template <typename T>
reduction_array2<T>::reduction_array2(int thenumThreads, int theK, int theD, T value) :
numThreads(thenumThreads), K(theK), D(theD)
{
offk = D + cache_line;
offt = offk*K;
data = allocate_memory<T>(numThreads*offt);
set_memory<T>(data, value, sizeof(T)*numThreads*offt);
}
template <typename T>
reduction_array2<T>::~reduction_array2()
{
if (data!=NULL)
deallocate_memory(data);;
}
// --------------------------------------------------------------------------
// -- init_values
// -- Initializes all values in the reduciton array to "value"
// --
// -- parameters:
// -- - value : the value to set all elements to
// --------------------------------------------------------------------------
template <typename T>
void reduction_array2<T>::init_values(T value)
{
set_memory<T>(data, value, sizeof(T)*numThreads*offt);
}
// --------------------------------------------------------------------------
// -- reduce_XXX
// -- Performs the reduction "XXX" on the d^th dimension with value
// --------------------------------------------------------------------------
template <typename T>
inline void reduction_array2<T>::reduce_inc(int t, int k)
{
long offset = t*offt + k*offk;
for (int d=0; d<D; d++)
++data[offset+d];
}
template <typename T>
inline void reduction_array2<T>::reduce_add(int t, int k, arr(T) value)
{
long offset = t*offt + k*offk;
for (int d=0; d<D; d++)
data[offset+d] += value[d];
}
template <typename T>
template <typename T2>
inline void reduction_array2<T>::reduce_add(int t, int k, long d, T2 value)
{
long offset = t*offt + k*offk;
data[offset+d] += value;
}
template <typename T>
template <typename T2>
inline void reduction_array2<T>::reduce_add(int t, int k, long* ds, T2* values, int nnz)
{
long offset = t*offt + k*offk;
for (int di=0; di<nnz; di++)
data[offset+ds[di]] += values[di];
}
template <typename T>
inline void reduction_array2<T>::reduce_add_outerprod(int t, int k, arr(T) value)
{
long offset = t*offt + k*offk;
long sqrtD = sqrt(D);
for (int d=0; d<D; d++)
data[offset+d] += value[d/sqrtD]*value[d%sqrtD];
}
template <typename T>
inline void reduction_array2<T>::reduce_multiply(int t, int k, arr(T) value)
{
long offset = t*offt + k*offk;
for (int d=0; d<D; d++)
data[offset+d] *= value[d];
}
template <typename T>
inline void reduction_array2<T>::reduce_min(int t, int k, arr(T) value)
{
long offset = t*offt + k*offk;
for (int d=0; d<D; d++)
data[offset+d] = mymin(data[offset+d], value[d]);
}
template <typename T>
inline void reduction_array2<T>::reduce_max(int t, int k, arr(T) value)
{
long offset = t*offt + k*offk;
for (int d=0; d<D; d++)
data[offset+d] = mymax(data[offset+d], value[d]);
}
// --------------------------------------------------------------------------
// -- reduce_function
// -- Performs the reduction specified by the function pointer
// --------------------------------------------------------------------------
template <typename T>
inline void reduction_array2<T>::reduce_function(int t, int k, arr(T) value, T (*func)(T,T))
{
long offset = t*offt + k*offk;
for (int d=0; d<D; d++)
data[offset+d] = func(data[offset+d], value[d]);
}
// --------------------------------------------------------------------------
// -- final_reduce_XXX
// -- Performs the reduction "XXX" on the threads and returns result
// --------------------------------------------------------------------------
template <typename T>
inline arr(T) reduction_array2<T>::final_reduce_add()
{
for (int k=0; k<K; k++)
for (int t=1; t<numThreads; t++)
{
long offset0 = k*offk;
long offset1 = t*offt + k*offk;
#pragma omp parallel for
for (int d=0; d<D; d++)
data[offset0+d] += data[offset1+d];
}
collapse_cache_line();
return data;
}
template <typename T>
inline arr(T) reduction_array2<T>::final_reduce_multiply()
{
for (int t=1; t<numThreads; t++)
for (int k=0; k<K; k++)
{
long offset0 = k*offk;
long offset1 = t*offt + k*offk;
for (int d=0; d<D; d++)
data[offset0+d] *= data[offset1+d];
}
collapse_cache_line();
return data;
}
template <typename T>
inline arr(T) reduction_array2<T>::final_reduce_min()
{
for (int t=1; t<numThreads; t++)
for (int k=0; k<K; k++)
{
long offset0 = k*offk;
long offset1 = t*offt + k*offk;
for (int d=0; d<D; d++)
data[offset0+d] = mymin(data[offset0+d],data[offset1+d]);
}
collapse_cache_line();
return data;
}
template <typename T>
inline arr(T) reduction_array2<T>::final_reduce_max()
{
for (int t=1; t<numThreads; t++)
for (int k=0; k<K; k++)
{
long offset0 = k*offk;
long offset1 = t*offt + k*offk;
for (int d=0; d<D; d++)
data[offset0+d] = mymax(data[offset0+d],data[offset1+d]);
}
collapse_cache_line();
return data;
}
// --------------------------------------------------------------------------
// -- final_reduce_function
// -- The function pointer version of above
// --------------------------------------------------------------------------
template <typename T>
inline arr(T) reduction_array2<T>::final_reduce_function(T (*func)(T,T))
{
for (int t=1; t<numThreads; t++)
for (int k=0; k<K; k++)
{
long offset0 = k*offk;
long offset1 = t*offt + k*offk;
for (int d=0; d<D; d++)
data[offset0+d] = func(data[offset0+d],data[offset1+d]);
}
collapse_cache_line();
return data;
}
// --------------------------------------------------------------------------
// -- collapse_cache_line
// -- collapses the cache line for the final return
// --------------------------------------------------------------------------
template <typename T>
inline void reduction_array2<T>::collapse_cache_line()
{
// doesn't need to do anything since cache_line = 1;
for (int k=1; k<K; k++)
{
long offsetOld = k*offk;
long offsetNew = k*D;
for (int d=0; d<D; d++)
data[offsetNew+d] = data[offsetOld+d];
}
}
// --------------------------------------------------------------------------
// -- final_reduce_ext_XXX
// -- Performs the reduction "XXX" on the threads into the external array.
// -- Assumes that ext is already allocated to the correct size.
// --------------------------------------------------------------------------
template <typename T>
inline void reduction_array2<T>::final_reduce_ext_add(arr(T) ext)
{
set_memory<T>(ext, 0, sizeof(T)*K*D);
for (int t=0; t<numThreads; t++)
for (int k=0; k<K; k++)
{
long offset0 = k*D;
long offset1 = t*offt + k*offk;
for (int d=0; d<D; d++)
ext[offset0+d] += data[offset1+d];
}
}
template <typename T>
inline void reduction_array2<T>::final_reduce_ext_multiply(arr(T) ext)
{
set_memory<T>(ext, 1, sizeof(T)*K*D);
for (int t=0; t<numThreads; t++)
for (int k=0; k<K; k++)
{
long offset0 = k*D;
long offset1 = t*offt + k*offk;
for (int d=0; d<D; d++)
ext[offset0+d] *= data[offset1+d];
}
}
#endif
|
uccsd_t.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdio.h>
#include <stdlib.h>
#include "config.h"
#include "np_helper/np_helper.h"
#include "vhf/fblas.h"
#include <assert.h>
typedef struct {
void *cache[6];
short a;
short b;
short c;
short _padding;
} CacheJob;
int Sc (int i, int a, int nocc);
int Dc (int i, int j, int a, int b, int nocc2);
int Tc (int i, int j, int k, int a, int b, int c, int nocc3);
int DSc(int i, int j, int k, int a, int b, int c, int nocc, int nvir, int nocc2);
int SDc(int i, int j, int k, int a, int b, int c, int nocc, int nvir, int nocc2)
{ return Sc(i, a, nocc) * nocc2 * nvir*(nvir-1)/2 + Dc(j, k, b, c, nocc2); }
int S(int i, int a, int nvir);
int D(int i, int j, int a, int b, int nocc, int nvir);
size_t T(int i, int j, int k, int a, int b, int c, int nocc, int nvir);
int SD(int i, int j, int k, int a, int b, int c, int nocc, int nvir)
{ return S(i, a, nvir) * nocc * nocc * nvir * nvir + D(j, k, b, c, nocc, nvir); }
double _ccsd_t_get_energy(double *w, double *v, double *mo_energy, int nocc,
int a, int b, int c, double fac);
size_t _ccsd_t_gen_jobs(CacheJob *jobs, int nocc, int nvir,
int a0, int a1, int b0, int b1,
void *cache_row_a, void *cache_col_a,
void *cache_row_b, void *cache_col_b, size_t stride);
void _make_permute_indices(int *idx, int n);
double _ccsd_t_zget_energy(double complex *w, double complex *v,
double *mo_energy, int nocc,
int a, int b, int c, double fac);
int comparator (const void * p1, const void * p2)
{
return (*(int*)p1 - *(int*)p2);
}
double _ecccsd_t_get_energy(double *w, double *v, double *mo_energy, double *paaa, int nocc, int nocc3, int nocc_iact,
int a, int b, int c, double fac)
{
int i, j, k, n;
int it, jt, kt, at, bt, ct;
double abc = mo_energy[nocc+a] + mo_energy[nocc+b] + mo_energy[nocc+c];
double et = 0;
double exclude = 1.0;
int arr[3] = {a, b, c};
qsort(arr, 3, sizeof(int), comparator);
at = arr[0];
bt = arr[1];
ct = arr[2];
for (n = 0, i = 0; i < nocc; i++) {
for (j = 0; j < nocc; j++) {
for (k = 0; k < nocc; k++, n++) {
arr[0]=i;
arr[1]=j;
arr[2]=k;
qsort(arr, 3, sizeof(int), comparator);
it = arr[0]-nocc_iact;
jt = arr[1]-nocc_iact;
kt = arr[2]-nocc_iact;
if ( it != jt && jt != kt ) exclude = paaa[Tc(it, jt, kt, at, bt, ct, nocc3)];
et += fac * w[n] * v[n] / (mo_energy[i] + mo_energy[j] + mo_energy[k] - abc) * exclude;
} } }
return et;
}
/*
* w + w.transpose(1,2,0) + w.transpose(2,0,1)
* - w.transpose(2,1,0) - w.transpose(0,2,1) - w.transpose(1,0,2)
*/
static void add_and_permute(double *out, double *w, double *v, int n)
{
int nn = n * n;
int nnn = nn * n;
int i, j, k;
for (i = 0; i < nnn; i++) {
v[i] += w[i];
}
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
for (k = 0; k < n; k++) {
out[i*nn+j*n+k] = v[i*nn+j*n+k] + v[j*nn+k*n+i] + v[k*nn+i*n+j]
- v[k*nn+j*n+i] - v[i*nn+k*n+j] - v[j*nn+i*n+k];
} } }
}
/*
* t2T = t2.transpose(2,3,0,1)
* ov = vv_op[:,nocc:]
* oo = vv_op[:,:nocc]
* w = numpy.einsum('if,fjk->ijk', -ov, t2T[c])
* w-= numpy.einsum('ijm,mk->ijk', vooo[a], t2T[b,c])
* v = numpy.einsum('ij,k->ijk', oo, t1T[c])
* v+= w
*/
static void get_wv(double *w, double *v, double *cache,
double *fvohalf, double *vooo,
double *vv_op, double *t1T, double *t2T,
int nocc, int nvir, int a, int b, int c, int *idx)
{
const double D0 = 0;
const double D1 = 1;
const double DN1 =-1;
const char TRANS_N = 'N';
const char TRANS_T = 'T';
const int nmo = nocc + nvir;
const int noo = nocc * nocc;
const size_t nooo = nocc * noo;
const size_t nvoo = nvir * noo;
int i, j, k, n;
double *pt2T;
dgemm_(&TRANS_N, &TRANS_N, &noo, &nocc, &nvir,
&DN1, t2T+c*nvoo, &noo, vv_op+nocc, &nmo,
&D0, cache, &noo);
dgemm_(&TRANS_N, &TRANS_T, &nocc, &noo, &nocc,
&DN1, t2T+b*nvoo+c*noo, &nocc, vooo+a*nooo, &noo,
&D1, cache, &nocc);
pt2T = t2T + a * nvoo + b * noo;
for (n = 0, i = 0; i < nocc; i++) {
for (j = 0; j < nocc; j++) {
for (k = 0; k < nocc; k++, n++) {
w[idx[n]] += cache[n];
v[idx[n]] +=(vv_op[i*nmo+j] * t1T[c*nocc+k]
+ pt2T[i*nocc+j] * fvohalf[c*nocc+k]);
} } }
}
////full p mat version
//static void get_wv_ec(double *w, double *v, double *cache,
// double *fvohalf, double *vooo,
// double *vv_op, double *t1T, double *t2T,
// double *paaa, int nocc, int nocc3, int nocc_iact,
// int nvir, int a, int b, int c, int *idx)
//{
// const double D0 = 0;
// const double D1 = 1;
// const double DN1 =-1;
// const char TRANS_N = 'N';
// const char TRANS_T = 'T';
// const int nmo = nocc + nvir;
// const int noo = nocc * nocc;
// const size_t nooo = nocc * noo;
// const size_t nvoo = nvir * noo;
// int i, j, k, n;
// double *pt2T;
//
// dgemm_(&TRANS_N, &TRANS_N, &noo, &nocc, &nvir,
// &DN1, t2T+c*nvoo, &noo, vv_op+nocc, &nmo,
// &D0, cache, &noo);
// dgemm_(&TRANS_N, &TRANS_T, &nocc, &noo, &nocc,
// &DN1, t2T+b*nvoo+c*noo, &nocc, vooo+a*nooo, &noo,
// &D1, cache, &nocc);
//
// pt2T = t2T + a * nvoo + b * noo;
// for (n = 0, i = 0; i < nocc; i++) {
// for (j = 0; j < nocc; j++) {
// for (k = 0; k < nocc; k++, n++) {
// w[idx[n]] += cache[n] * paaa[T(i-nocc_iact, j-nocc_iact, k-nocc_iact, a, b, c, nocc, nvir)];
// v[idx[n]] +=(vv_op[i*nmo+j] * t1T[c*nocc+k]
// + pt2T[i*nocc+j] * fvohalf[c*nocc+k]) * paaa[T(i-nocc_iact, j-nocc_iact, k-nocc_iact, a, b, c, nocc, nvir)];
// } } }
//}
// off diag version
static void get_wv_ec(double *w, double *v, double *cache,
double *fvohalf, double *vooo,
double *vv_op, double *t1T, double *t2T,
double *paaa, int nocc, int nocc3, int nocc_iact,
int nvir, int nvir_cas, int a, int b, int c, int *idx)
{
const double D0 = 0;
const double D1 = 1;
const double DN1 =-1;
const char TRANS_N = 'N';
const char TRANS_T = 'T';
const int nmo = nocc + nvir;
const int noo = nocc * nocc;
const size_t nooo = nocc * noo;
const size_t nvoo = nvir * noo;
int i, j, k, n;
double *pt2T;
int it, jt, kt, at, bt, ct;
double exclude = 1.0;
int arr[3] = {a, b, c};
qsort(arr, 3, sizeof(int), comparator);
at = arr[0];
bt = arr[1];
ct = arr[2];
dgemm_(&TRANS_N, &TRANS_N, &noo, &nocc, &nvir,
&DN1, t2T+c*nvoo, &noo, vv_op+nocc, &nmo,
&D0, cache, &noo);
dgemm_(&TRANS_N, &TRANS_T, &nocc, &noo, &nocc,
&DN1, t2T+b*nvoo+c*noo, &nocc, vooo+a*nooo, &noo,
&D1, cache, &nocc);
pt2T = t2T + a * nvoo + b * noo;
for (n = 0, i = 0; i < nocc; i++) {
for (j = 0; j < nocc; j++) {
for (k = 0; k < nocc; k++, n++) {
arr[0]=i;
arr[1]=j;
arr[2]=k;
qsort(arr, 3, sizeof(int), comparator);
it = arr[0]-nocc_iact;
jt = arr[1]-nocc_iact;
kt = arr[2]-nocc_iact;
if ( it >= 0 && jt >= 0 && kt >= 0
&& it != jt && jt != kt && it != kt
&& at != bt && bt != ct && at != ct
&& at < nvir_cas && bt < nvir_cas && ct < nvir_cas ){
exclude = paaa[Tc(it, jt, kt, at, bt, ct, nocc3)];
//printf("idx: %d for %d %d %d %d %d %d\n",Tc(it, jt, kt, at, bt, ct, nocc3),it, jt, kt, at, bt, ct);
//if (exclude == 1.0) printf("no exclude: %d %d %d %d %d %d\n",it+nocc_iact,jt+nocc_iact,kt+nocc_iact,at+nocc,bt+nocc,ct+nocc);
}
else
exclude = 1.0;
w[idx[n]] += cache[n] * exclude;
v[idx[n]] +=(vv_op[i*nmo+j] * t1T[c*nocc+k]
+ pt2T[i*nocc+j] * fvohalf[c*nocc+k]) * exclude;
} } }
}
// off diag version
static void get_wv_ecr(double *w, double *v, double *y, double *cache,
double *fvohalf, double *vooo,
double *vv_op, double *t1T, double *t2T,
double *paaa, int nocc, int nocc3, int nocc_iact,
int nvir, int nvir_cas, int a, int b, int c, int *idx)
{
const double D0 = 0;
const double D1 = 1;
const double DN1 =-1;
const char TRANS_N = 'N';
const char TRANS_T = 'T';
const int nmo = nocc + nvir;
const int noo = nocc * nocc;
const size_t nooo = nocc * noo;
const size_t nvoo = nvir * noo;
int i, j, k, n;
double *pt2Tab, *pt2Tbc, *pt2Tac;
int it, jt, kt, at, bt, ct;
double exclude = 1.0;
int arr[3] = {a, b, c};
qsort(arr, 3, sizeof(int), comparator);
at = arr[0];
bt = arr[1];
ct = arr[2];
dgemm_(&TRANS_N, &TRANS_N, &noo, &nocc, &nvir,
&DN1, t2T+c*nvoo, &noo, vv_op+nocc, &nmo,
&D0, cache, &noo);
dgemm_(&TRANS_N, &TRANS_T, &nocc, &noo, &nocc,
&DN1, t2T+b*nvoo+c*noo, &nocc, vooo+a*nooo, &noo,
&D1, cache, &nocc);
pt2Tab = t2T + a * nvoo + b * noo;
pt2Tbc = t2T + b * nvoo + c * noo;
pt2Tac = t2T + a * nvoo + c * noo;
for (n = 0, i = 0; i < nocc; i++) {
for (j = 0; j < nocc; j++) {
for (k = 0; k < nocc; k++, n++) {
arr[0]=i;
arr[1]=j;
arr[2]=k;
qsort(arr, 3, sizeof(int), comparator);
it = arr[0]-nocc_iact;
jt = arr[1]-nocc_iact;
kt = arr[2]-nocc_iact;
if ( it >= 0 && jt >= 0 && kt >= 0
&& it != jt && jt != kt && it != kt
&& at != bt && bt != ct && at != ct
&& at < nvir_cas && bt < nvir_cas && ct < nvir_cas ){
exclude = paaa[Tc(it, jt, kt, at, bt, ct, nocc3)];
//printf("idx: %d for %d %d %d %d %d %d\n",Tc(it, jt, kt, at, bt, ct, nocc3),it, jt, kt, at, bt, ct);
//if (exclude == 1.0) printf("no exclude: %d %d %d %d %d %d\n",it+nocc_iact,jt+nocc_iact,kt+nocc_iact,at+nocc,bt+nocc,ct+nocc);
}
else
exclude = 1.0;
w[idx[n]] += cache[n] * exclude;
v[idx[n]] +=(vv_op[i*nmo+j] * t1T[c*nocc+k]
+ pt2Tab[i*nocc+j]* fvohalf[c*nocc+k]) * exclude;
y[idx[n]] += (t1T[a*nocc+i] * t1T[b*nocc+j] * t1T[c*nocc+k]
+t1T[a*nocc+i] * pt2Tbc[j*nocc+k]
+t1T[b*nocc+j] * pt2Tac[i*nocc+k]
+t1T[c*nocc+k] * pt2Tab[i*nocc+j])* exclude;
} } }
}
static void sym_wv(double *w, double *v, double *cache,
double *fvohalf, double *vooo,
double *vv_op, double *t1T, double *t2T,
int nocc, int nvir, int a, int b, int c, int nirrep,
int *o_ir_loc, int *v_ir_loc, int *oo_ir_loc, int *orbsym,
int *idx)
{
const double D0 = 0;
const double D1 = 1;
const char TRANS_N = 'N';
const int nmo = nocc + nvir;
const int noo = nocc * nocc;
const int nooo = nocc * noo;
const int nvoo = nvir * noo;
int a_irrep = orbsym[nocc+a];
int b_irrep = orbsym[nocc+b];
int c_irrep = orbsym[nocc+c];
int ab_irrep = a_irrep ^ b_irrep;
int bc_irrep = c_irrep ^ b_irrep;
int i, j, k, n;
int fr, f0, f1, df, mr, m0, m1, dm, mk0;
int ir, i0, i1, di, kr, k0, k1, dk, jr;
int ijr, ij0, ij1, dij, jkr, jk0, jk1, djk;
double *pt2T;
/* symmetry adapted
* w = numpy.einsum('if,fjk->ijk', ov, t2T[c]) */
pt2T = t2T + c * nvoo;
for (ir = 0; ir < nirrep; ir++) {
i0 = o_ir_loc[ir];
i1 = o_ir_loc[ir+1];
di = i1 - i0;
if (di > 0) {
fr = ir ^ ab_irrep;
f0 = v_ir_loc[fr];
f1 = v_ir_loc[fr+1];
df = f1 - f0;
if (df > 0) {
jkr = fr ^ c_irrep;
jk0 = oo_ir_loc[jkr];
jk1 = oo_ir_loc[jkr+1];
djk = jk1 - jk0;
if (djk > 0) {
dgemm_(&TRANS_N, &TRANS_N, &djk, &di, &df,
&D1, pt2T+f0*noo+jk0, &noo, vv_op+i0*nmo+nocc+f0, &nmo,
&D0, cache, &djk);
for (n = 0, i = o_ir_loc[ir]; i < o_ir_loc[ir+1]; i++) {
for (jr = 0; jr < nirrep; jr++) {
kr = jkr ^ jr;
for (j = o_ir_loc[jr]; j < o_ir_loc[jr+1]; j++) {
for (k = o_ir_loc[kr]; k < o_ir_loc[kr+1]; k++, n++) {
w[idx[i*noo+j*nocc+k]] -= cache[n];
} }
} }
}
}
}
}
/* symmetry adapted
* w-= numpy.einsum('ijm,mk->ijk', eris_vooo[a], t2T[c,b]) */
pt2T = t2T + c * nvoo + b * noo;
vooo += a * nooo;
mk0 = oo_ir_loc[bc_irrep];
for (mr = 0; mr < nirrep; mr++) {
m0 = o_ir_loc[mr];
m1 = o_ir_loc[mr+1];
dm = m1 - m0;
if (dm > 0) {
kr = mr ^ bc_irrep;
k0 = o_ir_loc[kr];
k1 = o_ir_loc[kr+1];
dk = k1 - k0;
if (dk > 0) {
ijr = mr ^ a_irrep;
ij0 = oo_ir_loc[ijr];
ij1 = oo_ir_loc[ijr+1];
dij = ij1 - ij0;
if (dij > 0) {
dgemm_(&TRANS_N, &TRANS_N, &dk, &dij, &dm,
&D1, pt2T+mk0, &dk, vooo+ij0*nocc+m0, &nocc,
&D0, cache, &dk);
for (n = 0, ir = 0; ir < nirrep; ir++) {
jr = ijr ^ ir;
for (i = o_ir_loc[ir]; i < o_ir_loc[ir+1]; i++) {
for (j = o_ir_loc[jr]; j < o_ir_loc[jr+1]; j++) {
for (k = o_ir_loc[kr]; k < o_ir_loc[kr+1]; k++, n++) {
w[idx[i*noo+j*nocc+k]] -= cache[n];
} }
} }
}
mk0 += dm * dk;
}
}
}
pt2T = t2T + a * nvoo + b * noo;
for (n = 0, i = 0; i < nocc; i++) {
for (j = 0; j < nocc; j++) {
for (k = 0; k < nocc; k++, n++) {
v[idx[n]] +=(vv_op[i*nmo+j] * t1T[c*nocc+k]
+ pt2T[i*nocc+j] * fvohalf[c*nocc+k]);
} } }
}
static double contract6_aaa(int nocc, int nvir, int a, int b, int c,
double *mo_energy, double *t1T, double *t2T,
int nirrep, int *o_ir_loc, int *v_ir_loc,
int *oo_ir_loc, int *orbsym, double *fvo,
double *vooo, double *cache1, void **cache,
int *permute_idx)
{
int nooo = nocc * nocc * nocc;
int *idx0 = permute_idx;
int *idx1 = idx0 + nooo;
int *idx2 = idx1 + nooo;
int *idx3 = idx2 + nooo;
int *idx4 = idx3 + nooo;
int *idx5 = idx4 + nooo;
double *v0 = cache1;
double *w0 = v0 + nooo;
double *z0 = w0 + nooo;
double *wtmp = z0;
int i;
for (i = 0; i < nooo; i++) {
w0[i] = 0;
v0[i] = 0;
}
if (nirrep == 1) {
get_wv(w0, v0, wtmp, fvo, vooo, cache[0], t1T, t2T, nocc, nvir, a, b, c, idx0);
get_wv(w0, v0, wtmp, fvo, vooo, cache[1], t1T, t2T, nocc, nvir, a, c, b, idx1);
get_wv(w0, v0, wtmp, fvo, vooo, cache[2], t1T, t2T, nocc, nvir, b, a, c, idx2);
get_wv(w0, v0, wtmp, fvo, vooo, cache[3], t1T, t2T, nocc, nvir, b, c, a, idx3);
get_wv(w0, v0, wtmp, fvo, vooo, cache[4], t1T, t2T, nocc, nvir, c, a, b, idx4);
get_wv(w0, v0, wtmp, fvo, vooo, cache[5], t1T, t2T, nocc, nvir, c, b, a, idx5);
} else {
sym_wv(w0, v0, wtmp, fvo, vooo, cache[0], t1T, t2T, nocc, nvir, a, b, c,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx0);
sym_wv(w0, v0, wtmp, fvo, vooo, cache[1], t1T, t2T, nocc, nvir, a, c, b,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx1);
sym_wv(w0, v0, wtmp, fvo, vooo, cache[2], t1T, t2T, nocc, nvir, b, a, c,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx2);
sym_wv(w0, v0, wtmp, fvo, vooo, cache[3], t1T, t2T, nocc, nvir, b, c, a,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx3);
sym_wv(w0, v0, wtmp, fvo, vooo, cache[4], t1T, t2T, nocc, nvir, c, a, b,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx4);
sym_wv(w0, v0, wtmp, fvo, vooo, cache[5], t1T, t2T, nocc, nvir, c, b, a,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx5);
}
add_and_permute(z0, w0, v0, nocc);
double et;
if (a == c) {
et = _ccsd_t_get_energy(w0, z0, mo_energy, nocc, a, b, c, 1./6);
} else if (a == b || b == c) {
et = _ccsd_t_get_energy(w0, z0, mo_energy, nocc, a, b, c, .5);
} else {
et = _ccsd_t_get_energy(w0, z0, mo_energy, nocc, a, b, c, 1.);
}
return et;
}
static double eccontract6_aaa(int nocc, int nocc3, int nocc_iact,
int nvir, int nvir_cas, int a, int b, int c,
double *mo_energy, double *t1T, double *t2T,
int nirrep, int *o_ir_loc, int *v_ir_loc,
int *oo_ir_loc, int *orbsym, double *fvo,
double *vooo, double *cache1, void **cache,
int *permute_idx, double *paaa)
{
int nooo = nocc * nocc * nocc;
int *idx0 = permute_idx;
int *idx1 = idx0 + nooo;
int *idx2 = idx1 + nooo;
int *idx3 = idx2 + nooo;
int *idx4 = idx3 + nooo;
int *idx5 = idx4 + nooo;
double *v0 = cache1;
double *w0 = v0 + nooo;
double *z0 = w0 + nooo;
double *wtmp = z0;
int i;
for (i = 0; i < nooo; i++) {
w0[i] = 0;
v0[i] = 0;
}
if (nirrep == 1) {
get_wv_ec(w0, v0, wtmp, fvo, vooo, cache[0], t1T, t2T, paaa, nocc, nocc3, nocc_iact, nvir, nvir_cas, a, b, c, idx0);
get_wv_ec(w0, v0, wtmp, fvo, vooo, cache[1], t1T, t2T, paaa, nocc, nocc3, nocc_iact, nvir, nvir_cas, a, c, b, idx1);
get_wv_ec(w0, v0, wtmp, fvo, vooo, cache[2], t1T, t2T, paaa, nocc, nocc3, nocc_iact, nvir, nvir_cas, b, a, c, idx2);
get_wv_ec(w0, v0, wtmp, fvo, vooo, cache[3], t1T, t2T, paaa, nocc, nocc3, nocc_iact, nvir, nvir_cas, b, c, a, idx3);
get_wv_ec(w0, v0, wtmp, fvo, vooo, cache[4], t1T, t2T, paaa, nocc, nocc3, nocc_iact, nvir, nvir_cas, c, a, b, idx4);
get_wv_ec(w0, v0, wtmp, fvo, vooo, cache[5], t1T, t2T, paaa, nocc, nocc3, nocc_iact, nvir, nvir_cas, c, b, a, idx5);
// get_wv(w0, v0, wtmp, fvo, vooo, cache[0], t1T, t2T, nocc, nvir, a, b, c, idx0);
// get_wv(w0, v0, wtmp, fvo, vooo, cache[1], t1T, t2T, nocc, nvir, a, c, b, idx1);
// get_wv(w0, v0, wtmp, fvo, vooo, cache[2], t1T, t2T, nocc, nvir, b, a, c, idx2);
// get_wv(w0, v0, wtmp, fvo, vooo, cache[3], t1T, t2T, nocc, nvir, b, c, a, idx3);
// get_wv(w0, v0, wtmp, fvo, vooo, cache[4], t1T, t2T, nocc, nvir, c, a, b, idx4);
// get_wv(w0, v0, wtmp, fvo, vooo, cache[5], t1T, t2T, nocc, nvir, c, b, a, idx5);
} else {
printf("Not implemented yet");
sym_wv(w0, v0, wtmp, fvo, vooo, cache[0], t1T, t2T, nocc, nvir, a, b, c,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx0);
sym_wv(w0, v0, wtmp, fvo, vooo, cache[1], t1T, t2T, nocc, nvir, a, c, b,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx1);
sym_wv(w0, v0, wtmp, fvo, vooo, cache[2], t1T, t2T, nocc, nvir, b, a, c,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx2);
sym_wv(w0, v0, wtmp, fvo, vooo, cache[3], t1T, t2T, nocc, nvir, b, c, a,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx3);
sym_wv(w0, v0, wtmp, fvo, vooo, cache[4], t1T, t2T, nocc, nvir, c, a, b,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx4);
sym_wv(w0, v0, wtmp, fvo, vooo, cache[5], t1T, t2T, nocc, nvir, c, b, a,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx5);
}
add_and_permute(z0, w0, v0, nocc);
double et;
if (a == c) {
et = _ccsd_t_get_energy(w0, z0, mo_energy, nocc, a, b, c, 1./6);
} else if (a == b || b == c) {
et = _ccsd_t_get_energy(w0, z0, mo_energy, nocc, a, b, c, .5);
} else {
et = _ccsd_t_get_energy(w0, z0, mo_energy, nocc, a, b, c, 1.);
}
return et;
}
static void permute(double *out, double *v, int n)
{
int nn = n * n;
int nnn = nn * n;
int i, j, k;
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
for (k = 0; k < n; k++) {
out[i*nn+j*n+k] = v[i*nn+j*n+k] * 4
+ v[j*nn+k*n+i]
+ v[k*nn+i*n+j]
- v[k*nn+j*n+i] * 2
- v[i*nn+k*n+j] * 2
- v[j*nn+i*n+k] * 2;
} } }
}
double _rccsd_t_get_denom(double *w, double *v, double *mo_energy, int nocc,
int a, int b, int c, double fac)
{
int i, j, k, n;
double abc = mo_energy[nocc+a] + mo_energy[nocc+b] + mo_energy[nocc+c];
double dn = 0;
for (n = 0, i = 0; i < nocc; i++) {
for (j = 0; j < nocc; j++) {
for (k = 0; k < nocc; k++, n++) {
dn += fac * w[n] * v[n] / (mo_energy[i] + mo_energy[j] + mo_energy[k] - abc);
} } }
return dn;
}
static void ecrcontract6_aaa(double et, double dn, int nocc, int nocc3, int nocc_iact,
int nvir, int nvir_cas, int a, int b, int c,
double *mo_energy, double *t1T, double *t2T,
int nirrep, int *o_ir_loc, int *v_ir_loc,
int *oo_ir_loc, int *orbsym, double *fvo,
double *vooo, double *cache1, void **cache,
int *permute_idx, double *paaa)
{
int nooo = nocc * nocc * nocc;
int *idx0 = permute_idx;
int *idx1 = idx0 + nooo;
int *idx2 = idx1 + nooo;
int *idx3 = idx2 + nooo;
int *idx4 = idx3 + nooo;
int *idx5 = idx4 + nooo;
double *v0 = cache1;
double *w0 = v0 + nooo;
double *z0 = w0 + nooo;
double *y0 = z0 + nooo;
double *wtmp = y0;
int i;
for (i = 0; i < nooo; i++) {
w0[i] = 0;
v0[i] = 0;
y0[i] = 0;
}
if (nirrep == 1) {
get_wv_ecr(w0, v0, y0, wtmp, fvo, vooo, cache[0], t1T, t2T, paaa, nocc, nocc3, nocc_iact, nvir, nvir_cas, a, b, c, idx0);
get_wv_ecr(w0, v0, y0, wtmp, fvo, vooo, cache[1], t1T, t2T, paaa, nocc, nocc3, nocc_iact, nvir, nvir_cas, a, c, b, idx1);
get_wv_ecr(w0, v0, y0, wtmp, fvo, vooo, cache[2], t1T, t2T, paaa, nocc, nocc3, nocc_iact, nvir, nvir_cas, b, a, c, idx2);
get_wv_ecr(w0, v0, y0, wtmp, fvo, vooo, cache[3], t1T, t2T, paaa, nocc, nocc3, nocc_iact, nvir, nvir_cas, b, c, a, idx3);
get_wv_ecr(w0, v0, y0, wtmp, fvo, vooo, cache[4], t1T, t2T, paaa, nocc, nocc3, nocc_iact, nvir, nvir_cas, c, a, b, idx4);
get_wv_ecr(w0, v0, y0, wtmp, fvo, vooo, cache[5], t1T, t2T, paaa, nocc, nocc3, nocc_iact, nvir, nvir_cas, c, b, a, idx5);
} else {
printf("Not implemented yet");
sym_wv(w0, v0, wtmp, fvo, vooo, cache[0], t1T, t2T, nocc, nvir, a, b, c,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx0);
sym_wv(w0, v0, wtmp, fvo, vooo, cache[1], t1T, t2T, nocc, nvir, a, c, b,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx1);
sym_wv(w0, v0, wtmp, fvo, vooo, cache[2], t1T, t2T, nocc, nvir, b, a, c,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx2);
sym_wv(w0, v0, wtmp, fvo, vooo, cache[3], t1T, t2T, nocc, nvir, b, c, a,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx3);
sym_wv(w0, v0, wtmp, fvo, vooo, cache[4], t1T, t2T, nocc, nvir, c, a, b,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx4);
sym_wv(w0, v0, wtmp, fvo, vooo, cache[5], t1T, t2T, nocc, nvir, c, b, a,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx5);
}
add_and_permute(z0, w0, v0, nocc);
if (a == c) {
et = _ccsd_t_get_energy(w0, z0, mo_energy, nocc, a, b, c, 1./6);
} else if (a == b || b == c) {
et = _ccsd_t_get_energy(w0, z0, mo_energy, nocc, a, b, c, .5);
} else {
et = _ccsd_t_get_energy(w0, z0, mo_energy, nocc, a, b, c, 1.);
}
for (i = 0; i < nooo; i++) {
w0[i] = 0;
}
permute(w0, y0, nocc);
if (a == c) {
dn = _rccsd_t_get_denom(w0, z0, mo_energy, nocc, a, b, c, 1./6);
} else if (a == b || b == c) {
dn = _rccsd_t_get_denom(w0, z0, mo_energy, nocc, a, b, c, .5);
} else {
dn = _rccsd_t_get_denom(w0, z0, mo_energy, nocc, a, b, c, 1.);
}
}
void CCuccsd_t_aaa(double complex *e_tot,
double *mo_energy, double *t1T, double *t2T,
double *vooo, double *fvo,
int nocc, int nvir, int a0, int a1, int b0, int b1,
int nirrep, int *o_ir_loc, int *v_ir_loc,
int *oo_ir_loc, int *orbsym,
double *cache_row_a, double *cache_col_a,
double *cache_row_b, double *cache_col_b)
{
int da = a1 - a0;
int db = b1 - b0;
CacheJob *jobs = malloc(sizeof(CacheJob) * da*db*b1);
size_t njobs = _ccsd_t_gen_jobs(jobs, nocc, nvir, a0, a1, b0, b1,
cache_row_a, cache_col_a,
cache_row_b, cache_col_b, sizeof(double));
double fvohalf[nvir*nocc];
int i;
for (i = 0; i < nvir*nocc; i++) {
fvohalf[i] = fvo[i] * .5;
}
int *permute_idx = malloc(sizeof(int) * nocc*nocc*nocc * 6);
_make_permute_indices(permute_idx, nocc);
#pragma omp parallel default(none) \
shared(njobs, nocc, nvir, mo_energy, t1T, t2T, nirrep, o_ir_loc, \
v_ir_loc, oo_ir_loc, orbsym, vooo, fvohalf, jobs, e_tot, \
permute_idx)
{
int a, b, c;
size_t k;
double *cache1 = malloc(sizeof(double) * (nocc*nocc*nocc*3+2));
double e = 0;
#pragma omp for schedule (dynamic, 4)
for (k = 0; k < njobs; k++) {
a = jobs[k].a;
b = jobs[k].b;
c = jobs[k].c;
e += contract6_aaa(nocc, nvir, a, b, c, mo_energy, t1T, t2T,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym,
fvohalf, vooo, cache1, jobs[k].cache,
permute_idx);
}
free(cache1);
#pragma omp critical
*e_tot += e;
}
free(permute_idx);
}
void CCecccsd_t_aaa(double complex *e_tot,
double *mo_energy, double *t1T, double *t2T,
double *vooo, double *fvo, double *paaa,
int nocc, int nvir, const int nocc_iact, const int nvir_cas, const int nocc3,
int a0, int a1, int b0, int b1,
int nirrep, int *o_ir_loc, int *v_ir_loc,
int *oo_ir_loc, int *orbsym,
double *cache_row_a, double *cache_col_a,
double *cache_row_b, double *cache_col_b)
{
int da = a1 - a0;
int db = b1 - b0;
CacheJob *jobs = malloc(sizeof(CacheJob) * da*db*b1);
size_t njobs = _ccsd_t_gen_jobs(jobs, nocc, nvir, a0, a1, b0, b1,
cache_row_a, cache_col_a,
cache_row_b, cache_col_b, sizeof(double));
double fvohalf[nvir*nocc];
int i;
for (i = 0; i < nvir*nocc; i++) {
fvohalf[i] = fvo[i] * .5;
}
int *permute_idx = malloc(sizeof(int) * nocc*nocc*nocc * 6);
_make_permute_indices(permute_idx, nocc);
#pragma omp parallel default(none) \
shared(njobs, nocc, nvir, mo_energy, t1T, t2T, nirrep, o_ir_loc, \
v_ir_loc, oo_ir_loc, orbsym, vooo, fvohalf, jobs, e_tot, \
permute_idx, paaa)
{
int a, b, c;
size_t k;
double *cache1 = malloc(sizeof(double) * (nocc*nocc*nocc*3+2));
double e = 0;
#pragma omp for schedule (dynamic, 4)
for (k = 0; k < njobs; k++) {
a = jobs[k].a;
b = jobs[k].b;
c = jobs[k].c;
e += eccontract6_aaa(nocc, nocc3, nocc_iact, nvir, nvir_cas, a, b, c, mo_energy, t1T, t2T,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym,
fvohalf, vooo, cache1, jobs[k].cache,
permute_idx, paaa);
}
free(cache1);
#pragma omp critical
*e_tot += e;
}
free(permute_idx);
}
void CCecrccsd_t_aaa(double complex *e_tot, double complex *denom,
double *mo_energy, double *t1T, double *t2T,
double *vooo, double *fvo, double *paaa,
int nocc, int nvir, const int nocc_iact, const int nvir_cas, const int nocc3,
int a0, int a1, int b0, int b1,
int nirrep, int *o_ir_loc, int *v_ir_loc,
int *oo_ir_loc, int *orbsym,
double *cache_row_a, double *cache_col_a,
double *cache_row_b, double *cache_col_b)
{
int da = a1 - a0;
int db = b1 - b0;
CacheJob *jobs = malloc(sizeof(CacheJob) * da*db*b1);
size_t njobs = _ccsd_t_gen_jobs(jobs, nocc, nvir, a0, a1, b0, b1,
cache_row_a, cache_col_a,
cache_row_b, cache_col_b, sizeof(double));
double fvohalf[nvir*nocc];
int i;
for (i = 0; i < nvir*nocc; i++) {
fvohalf[i] = fvo[i] * .5;
}
int *permute_idx = malloc(sizeof(int) * nocc*nocc*nocc * 6);
_make_permute_indices(permute_idx, nocc);
#pragma omp parallel default(none) \
shared(njobs, nocc, nvir, mo_energy, t1T, t2T, nirrep, o_ir_loc, \
v_ir_loc, oo_ir_loc, orbsym, vooo, fvohalf, jobs, e_tot, \
denom, permute_idx, paaa)
{
int a, b, c;
size_t k;
double *cache1 = malloc(sizeof(double) * (nocc*nocc*nocc*4+2));
double e = 0;
double d = 0;
double etmp, dtmp;
#pragma omp for schedule (dynamic, 4)
for (k = 0; k < njobs; k++) {
a = jobs[k].a;
b = jobs[k].b;
c = jobs[k].c;
ecrcontract6_aaa(etmp, dtmp, nocc, nocc3, nocc_iact, nvir,
nvir_cas, a, b, c, mo_energy, t1T, t2T,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym,
fvohalf, vooo, cache1, jobs[k].cache,
permute_idx, paaa);
e += etmp;
d += dtmp;
}
free(cache1);
#pragma omp critical
*e_tot += e;
*denom += d;
}
free(permute_idx);
}
/*************************************************
*
* UCCSD(T) beta-alpha-alpha
*
*************************************************/
static void get_wv_baa(double *w, double *v, double **vs_ts, double **cache,
int nocca, int noccb, int nvira, int nvirb,
int a, int b, int c)
{
double *fvo = vs_ts[2];
double *fVO = vs_ts[3];
double *vooo = vs_ts[4];
double *vOoO = vs_ts[5];
double *VoOo = vs_ts[6];
double *t1aT = vs_ts[7];
double *t1bT = vs_ts[8];
double *t2aaT = vs_ts[9];
double *t2abT = vs_ts[10];
double *vvop = cache[0];
double *vVoP = cache[1];
double *VvOp = cache[2];
const double D0 = 0;
const double D1 = 1;
const double D2 = 2;
const char TRANS_T = 'T';
const char TRANS_N = 'N';
const int nmoa = nocca + nvira;
const int nmob = noccb + nvirb;
const int noo = nocca * nocca;
const int nOo = noccb * nocca;
const size_t nooo = nocca * noo;
const size_t noOo = nocca * nOo;
const size_t nOoO = noccb * nOo;
const size_t nvoo = nvira * noo;
const int nVoO = nvirb * nOo;
int i, j, k, n;
/*
* t2aaT = t2aa.transpose(2,3,0,1)
* w = numpy.einsum('ejI,ke->Ijk', t2abT[:,a], vvov) * 2
* w += numpy.einsum('EjI,kE->Ijk', t2abT[b,:], vVoV) * 2
* w += numpy.einsum('mj,mIk->Ijk', t2aaT[b,c], VoOo[a,:])
* w += numpy.einsum('kM,MjI->Ijk', t2abT[b,a], vOoO[c,:]) * 2
* w += numpy.einsum('ejk,Ie->Ijk', t2aaT[b,:], VvOv)
* w += numpy.einsum('mI,mjk->Ijk', t2abT[b,a], vooo[c,:]) * 2
* v = numpy.einsum('kj,I->Ijk', vvoo, t1bT[a])
* v += numpy.einsum('Ik,j->Ijk', VvOo, t1aT[b]) * 2
* v += numpy.einsum('jk,I->Ijk', t2aaT[b,c], fVO[a]) * .5
* v += numpy.einsum('kI,j->Ijk', t2abT[c,a], fvo[b]) * 2
* v += w
*/
dgemm_(&TRANS_T, &TRANS_T, &nocca, &nOo, &nvira,
&D2, vvop+nocca, &nmoa, t2abT+a*nOo, &nVoO,
&D0, v, &nocca);
dgemm_(&TRANS_T, &TRANS_T, &nocca, &nOo, &nvirb,
&D2, vVoP+noccb, &nmob, t2abT+b*(size_t)nVoO, &nOo,
&D1, v, &nocca);
dgemm_(&TRANS_N, &TRANS_T, &nOo, &nocca, &nocca,
&D1, VoOo+a*noOo, &nOo, t2aaT+b*nvoo+c*noo, &nocca,
&D1, v, &nOo);
dgemm_(&TRANS_T, &TRANS_T, &nocca, &nOo, &noccb,
&D2, t2abT+b*(size_t)nVoO+a*nOo, &noccb, vOoO+c*nOoO, &nOo,
&D1, v, &nocca);
for (n = 0, i = 0; i < noccb; i++) {
for (j = 0; j < nocca; j++) {
for (k = 0; k < nocca; k++, n++) {
w[n] = v[j*nOo+i*nocca+k];
} } }
dgemm_(&TRANS_N, &TRANS_N, &noo, &noccb, &nvira,
&D1, t2aaT+b*nvoo, &noo, VvOp+nocca, &nmoa,
&D1, w, &noo);
dgemm_(&TRANS_N, &TRANS_T, &noo, &noccb, &nocca,
&D2, vooo+c*nooo, &noo, t2abT+b*(size_t)nVoO+a*nOo, &noccb,
&D1, w, &noo);
double t1aT2[nocca];
double fvo2[nocca];
double fVOhalf[noccb];
for (i = 0; i < nocca; i++) {
t1aT2[i] = t1aT[b*nocca+i] * 2;
fvo2[i] = fvo[b*nocca+i] * 2;
}
for (i = 0; i < noccb; i++) {
fVOhalf[i] = fVO[a*noccb+i] * .5;
}
double *pt2aaT = t2aaT + b * nvoo + c * noo;
double *pt2abT = t2abT + (c*nvirb+a) * nOo;
for (n = 0, i = 0; i < noccb; i++) {
for (j = 0; j < nocca; j++) {
for (k = 0; k < nocca; k++, n++) {
v[n] = (w[n] + vvop[k*nmoa+j] * t1bT[a*noccb+i]
+ VvOp[i*nmoa+k] * t1aT2[j]
+ pt2aaT[j*nocca+k] * fVOhalf[i]
+ pt2abT[k*noccb+i] * fvo2[j]);
} } }
}
//static void get_wv_baa_ec(double *w, double *v, double **vs_ts, double **cache,
// int nocca, int noccb, int nvira, int nvirb,
// int nocc_cas, int nvir_cas, int nocc_iact, int nocc2,
// int a, int b, int c)
//{
// double *fvo = vs_ts[2];
// double *fVO = vs_ts[3];
// double *vooo = vs_ts[4];
// double *vOoO = vs_ts[5];
// double *VoOo = vs_ts[6];
// double *t1aT = vs_ts[7];
// double *t1bT = vs_ts[8];
// double *t2aaT = vs_ts[9];
// double *t2abT = vs_ts[10];
// double *pbaa = vs_ts[11];
// double *vvop = cache[0];
// double *vVoP = cache[1];
// double *VvOp = cache[2];
// const double D0 = 0;
// const double D1 = 1;
// const double D2 = 2;
// const char TRANS_T = 'T';
// const char TRANS_N = 'N';
// const int nmoa = nocca + nvira;
// const int nmob = noccb + nvirb;
// const int noo = nocca * nocca;
// const int nOo = noccb * nocca;
// const size_t nooo = nocca * noo;
// const size_t noOo = nocca * nOo;
// const size_t nOoO = noccb * nOo;
// const size_t nvoo = nvira * noo;
// const int nVoO = nvirb * nOo;
// int i, j, k, n;
//
///*
// * t2aaT = t2aa.transpose(2,3,0,1)
// * w = numpy.einsum('ejI,ke->Ijk', t2abT[:,a], vvov) * 2
// * w += numpy.einsum('EjI,kE->Ijk', t2abT[b,:], vVoV) * 2
// * w += numpy.einsum('mj,mIk->Ijk', t2aaT[b,c], VoOo[a,:])
// * w += numpy.einsum('kM,MjI->Ijk', t2abT[b,a], vOoO[c,:]) * 2
// * w += numpy.einsum('ejk,Ie->Ijk', t2aaT[b,:], VvOv)
// * w += numpy.einsum('mI,mjk->Ijk', t2abT[b,a], vooo[c,:]) * 2
// * v = numpy.einsum('kj,I->Ijk', vvoo, t1bT[a])
// * v += numpy.einsum('Ik,j->Ijk', VvOo, t1aT[b]) * 2
// * v += numpy.einsum('jk,I->Ijk', t2aaT[b,c], fVO[a]) * .5
// * v += numpy.einsum('kI,j->Ijk', t2abT[c,a], fvo[b]) * 2
// * v += w
// */
// dgemm_(&TRANS_T, &TRANS_T, &nocca, &nOo, &nvira,
// &D2, vvop+nocca, &nmoa, t2abT+a*nOo, &nVoO,
// &D0, v, &nocca);
// dgemm_(&TRANS_T, &TRANS_T, &nocca, &nOo, &nvirb,
// &D2, vVoP+noccb, &nmob, t2abT+b*(size_t)nVoO, &nOo,
// &D1, v, &nocca);
// dgemm_(&TRANS_N, &TRANS_T, &nOo, &nocca, &nocca,
// &D1, VoOo+a*noOo, &nOo, t2aaT+b*nvoo+c*noo, &nocca,
// &D1, v, &nOo);
// dgemm_(&TRANS_T, &TRANS_T, &nocca, &nOo, &noccb,
// &D2, t2abT+b*(size_t)nVoO+a*nOo, &noccb, vOoO+c*nOoO, &nOo,
// &D1, v, &nocca);
// for (n = 0, i = 0; i < noccb; i++) {
// for (j = 0; j < nocca; j++) {
// for (k = 0; k < nocca; k++, n++) {
// w[n] = v[j*nOo+i*nocca+k] * pbaa[T(i-nocc_iact, j-nocc_iact, k-nocc_iact, a, b, c, nocca, nvira)];
// } } }
// dgemm_(&TRANS_N, &TRANS_N, &noo, &noccb, &nvira,
// &D1, t2aaT+b*nvoo, &noo, VvOp+nocca, &nmoa,
// &D1, w, &noo);
// dgemm_(&TRANS_N, &TRANS_T, &noo, &noccb, &nocca,
// &D2, vooo+c*nooo, &noo, t2abT+b*(size_t)nVoO+a*nOo, &noccb,
// &D1, w, &noo);
//
// double t1aT2[nocca];
// double fvo2[nocca];
// double fVOhalf[noccb];
// for (i = 0; i < nocca; i++) {
// t1aT2[i] = t1aT[b*nocca+i] * 2;
// fvo2[i] = fvo[b*nocca+i] * 2;
// }
// for (i = 0; i < noccb; i++) {
// fVOhalf[i] = fVO[a*noccb+i] * .5;
// }
// double *pt2aaT = t2aaT + b * nvoo + c * noo;
// double *pt2abT = t2abT + (c*nvirb+a) * nOo;
// for (n = 0, i = 0; i < noccb; i++) {
// for (j = 0; j < nocca; j++) {
// for (k = 0; k < nocca; k++, n++) {
// v[n] = (w[n] + vvop[k*nmoa+j] * t1bT[a*noccb+i]
// + VvOp[i*nmoa+k] * t1aT2[j]
// + pt2aaT[j*nocca+k] * fVOhalf[i]
// + pt2abT[k*noccb+i] * fvo2[j]) * pbaa[T(i-nocc_iact, j-nocc_iact, k-nocc_iact, a, b, c, nocca, nvira)];
// } } }
//}
// off diag version
static void get_wv_baa_ec(double *w, double *v, double **vs_ts, double **cache,
int nocca, int noccb, int nvira, int nvirb,
int nocc_cas, int nvir_cas, int nocc_iact, int nocc2,
int a, int b, int c)
{
double *fvo = vs_ts[2];
double *fVO = vs_ts[3];
double *vooo = vs_ts[4];
double *vOoO = vs_ts[5];
double *VoOo = vs_ts[6];
double *t1aT = vs_ts[7];
double *t1bT = vs_ts[8];
double *t2aaT = vs_ts[9];
double *t2abT = vs_ts[10];
double *pbaa = vs_ts[11];
double *vvop = cache[0];
double *vVoP = cache[1];
double *VvOp = cache[2];
const double D0 = 0;
const double D1 = 1;
const double D2 = 2;
const char TRANS_T = 'T';
const char TRANS_N = 'N';
const int nmoa = nocca + nvira;
const int nmob = noccb + nvirb;
const int noo = nocca * nocca;
const int nOo = noccb * nocca;
const size_t nooo = nocca * noo;
const size_t noOo = nocca * nOo;
const size_t nOoO = noccb * nOo;
const size_t nvoo = nvira * noo;
const int nVoO = nvirb * nOo;
int i, j, k, n;
int it, jt, kt, at, bt, ct;
double exclude = 1.0;
int arr[2] = {b, c};
qsort(arr, 2, sizeof(int), comparator);
at = a;
bt = arr[0];
ct = arr[1];
/*
* t2aaT = t2aa.transpose(2,3,0,1)
* w = numpy.einsum('ejI,ke->Ijk', t2abT[:,a], vvov) * 2
* w += numpy.einsum('EjI,kE->Ijk', t2abT[b,:], vVoV) * 2
* w += numpy.einsum('mj,mIk->Ijk', t2aaT[b,c], VoOo[a,:])
* w += numpy.einsum('kM,MjI->Ijk', t2abT[b,a], vOoO[c,:]) * 2
* w += numpy.einsum('ejk,Ie->Ijk', t2aaT[b,:], VvOv)
* w += numpy.einsum('mI,mjk->Ijk', t2abT[b,a], vooo[c,:]) * 2
* v = numpy.einsum('kj,I->Ijk', vvoo, t1bT[a])
* v += numpy.einsum('Ik,j->Ijk', VvOo, t1aT[b]) * 2
* v += numpy.einsum('jk,I->Ijk', t2aaT[b,c], fVO[a]) * .5
* v += numpy.einsum('kI,j->Ijk', t2abT[c,a], fvo[b]) * 2
* v += w
*/
dgemm_(&TRANS_T, &TRANS_T, &nocca, &nOo, &nvira,
&D2, vvop+nocca, &nmoa, t2abT+a*nOo, &nVoO,
&D0, v, &nocca);
dgemm_(&TRANS_T, &TRANS_T, &nocca, &nOo, &nvirb,
&D2, vVoP+noccb, &nmob, t2abT+b*(size_t)nVoO, &nOo,
&D1, v, &nocca);
dgemm_(&TRANS_N, &TRANS_T, &nOo, &nocca, &nocca,
&D1, VoOo+a*noOo, &nOo, t2aaT+b*nvoo+c*noo, &nocca,
&D1, v, &nOo);
dgemm_(&TRANS_T, &TRANS_T, &nocca, &nOo, &noccb,
&D2, t2abT+b*(size_t)nVoO+a*nOo, &noccb, vOoO+c*nOoO, &nOo,
&D1, v, &nocca);
for (n = 0, i = 0; i < noccb; i++) {
for (j = 0; j < nocca; j++) {
for (k = 0; k < nocca; k++, n++) {
arr[0]=j;
arr[1]=k;
qsort(arr, 2, sizeof(int), comparator);
it = i-nocc_iact;
jt = arr[0]-nocc_iact;
kt = arr[1]-nocc_iact;
if ( it >=0 && jt >= 0 && kt >= 0
&& jt != kt && bt != ct
&& at < nvir_cas && bt < nvir_cas && ct < nvir_cas )
exclude = pbaa[SDc(it, jt, kt, at, bt, ct, nocc_cas, nvir_cas, nocc2)];
else
exclude = 1.0;
//printf("%d %d %d %d %d %d, exclude: %f\n",i,j,k,a,b,c,exclude);
assert(exclude == 1.0 || exclude == 0.0);
w[n] = v[j*nOo+i*nocca+k] * exclude;
} } }
dgemm_(&TRANS_N, &TRANS_N, &noo, &noccb, &nvira,
&D1, t2aaT+b*nvoo, &noo, VvOp+nocca, &nmoa,
&D1, w, &noo);
dgemm_(&TRANS_N, &TRANS_T, &noo, &noccb, &nocca,
&D2, vooo+c*nooo, &noo, t2abT+b*(size_t)nVoO+a*nOo, &noccb,
&D1, w, &noo);
double t1aT2[nocca];
double fvo2[nocca];
double fVOhalf[noccb];
for (i = 0; i < nocca; i++) {
t1aT2[i] = t1aT[b*nocca+i] * 2;
fvo2[i] = fvo[b*nocca+i] * 2;
}
for (i = 0; i < noccb; i++) {
fVOhalf[i] = fVO[a*noccb+i] * .5;
}
double *pt2aaT = t2aaT + b * nvoo + c * noo;
double *pt2abT = t2abT + (c*nvirb+a) * nOo;
for (n = 0, i = 0; i < noccb; i++) {
for (j = 0; j < nocca; j++) {
for (k = 0; k < nocca; k++, n++) {
arr[0]=j;
arr[1]=k;
qsort(arr, 2, sizeof(int), comparator);
it = i-nocc_iact;
jt = arr[0]-nocc_iact;
kt = arr[1]-nocc_iact;
if ( it >=0 && jt >= 0 && kt >= 0
&& jt != kt && bt != ct
&& at < nvir_cas && bt < nvir_cas && ct < nvir_cas )
exclude = pbaa[SDc(it, jt, kt, at, bt, ct, nocc_cas, nvir_cas, nocc2)];
else
exclude = 1.0;
assert(exclude == 1.0 || exclude == 0.0);
v[n] = (w[n] + vvop[k*nmoa+j] * t1bT[a*noccb+i]
+ VvOp[i*nmoa+k] * t1aT2[j]
+ pt2aaT[j*nocca+k] * fVOhalf[i]
+ pt2abT[k*noccb+i] * fvo2[j]) * exclude;
} } }
}
/*
* w - w.transpose(0,2,1)
*/
static void permute_baa(double *out, double *w, int nocca, int noccb)
{
int noo = nocca * nocca;
int n;
int i, j, k;
for (n = 0, i = 0; i < noccb; i++) {
for (j = 0; j < nocca; j++) {
for (k = 0; k < nocca; k++, n++) {
out[n] = w[i*noo+j*nocca+k] - w[i*noo+k*nocca+j];
} } }
}
static double _get_energy_baa_ec(double *z0, double *z1, double *w0, double *w1,
double *mo_ea, double *mo_eb, double *pbaa,
int nocca, int noccb, int nocc_cas, int nvir_cas, int nocc_iact, int nocc2,
int a, int b, int c, double fac)
{
int noo = nocca * nocca;
int i, j, k;
int it, jt, kt, at, bt, ct;
double abc = mo_eb[noccb+a] + mo_ea[nocca+b] + mo_ea[nocca+c];
double et = 0;
double exclude = 1.0;
int arr[3] = {a, b, c};
qsort(arr, 3, sizeof(int), comparator);
at = arr[0];
bt = arr[1];
ct = arr[2];
for (i = 0; i < noccb; i++) {
for (j = 0; j < nocca; j++) {
for (k = 0; k < nocca; k++) {
arr[0]=i;
arr[1]=j;
arr[2]=k;
qsort(arr, 3, sizeof(int), comparator);
it = arr[0]-nocc_iact;
jt = arr[1]-nocc_iact;
kt = arr[2]-nocc_iact;
if ( it != jt && jt != kt ) exclude = pbaa[DSc(it, jt, kt, at, bt, ct, nocc_cas, nvir_cas, nocc2)];
et += (z0[i*noo+j*nocca+k] + z1[i*noo+k*nocca+j])
* (w0[i*noo+j*nocca+k] + w1[i*noo+k*nocca+j])
* fac / (mo_eb[i] + mo_ea[j] + mo_ea[k] - abc) * exclude;
} } }
return et;
}
static double _get_energy_baa(double *z0, double *z1, double *w0, double *w1,
double *mo_ea, double *mo_eb, int nocca, int noccb,
int a, int b, int c, double fac)
{
int noo = nocca * nocca;
int i, j, k;
double abc = mo_eb[noccb+a] + mo_ea[nocca+b] + mo_ea[nocca+c];
double et = 0;
for (i = 0; i < noccb; i++) {
for (j = 0; j < nocca; j++) {
for (k = 0; k < nocca; k++) {
et += (z0[i*noo+j*nocca+k] + z1[i*noo+k*nocca+j])
* (w0[i*noo+j*nocca+k] + w1[i*noo+k*nocca+j])
* fac / (mo_eb[i] + mo_ea[j] + mo_ea[k] - abc);
} } }
return et;
}
static double contract6_baa(int nocca, int noccb, int nvira, int nvirb,
int a, int b, int c,
double **vs_ts, void **cache, double *cache1)
{
int nOoo = noccb * nocca * nocca;
double *v0 = cache1;
double *v1 = v0 + nOoo;
double *w0 = v1 + nOoo;
double *w1 = w0 + nOoo;
double *z0 = w1 + nOoo;
double *z1 = v0;
get_wv_baa(w0, v0, vs_ts, ((double **)cache) , nocca, noccb, nvira, nvirb, a, b, c);
get_wv_baa(w1, v1, vs_ts, ((double **)cache)+3, nocca, noccb, nvira, nvirb, a, c, b);
permute_baa(z0, v0, nocca, noccb);
permute_baa(z1, v1, nocca, noccb);
double *mo_ea = vs_ts[0];
double *mo_eb = vs_ts[1];
double et;
if (b == c) {
et = _get_energy_baa(z0, z1, w0, w1, mo_ea, mo_eb, nocca, noccb, a, b, c, .5);
} else {
et = _get_energy_baa(z0, z1, w0, w1, mo_ea, mo_eb, nocca, noccb, a, b, c, 1.);
}
return et;
}
static double eccontract6_baa(int nocca, int noccb, int nvira, int nvirb,
int nocc_cas, int nvir_cas, int nocc_iact, int nocc2,
int a, int b, int c,
double **vs_ts, void **cache, double *cache1)
{
int nOoo = noccb * nocca * nocca;
double *v0 = cache1;
double *v1 = v0 + nOoo;
double *w0 = v1 + nOoo;
double *w1 = w0 + nOoo;
double *z0 = w1 + nOoo;
double *z1 = v0;
get_wv_baa_ec(w0, v0, vs_ts, ((double **)cache) , nocca, noccb, nvira, nvirb, nocc_cas, nvir_cas, nocc_iact, nocc2, a, b, c);
get_wv_baa_ec(w1, v1, vs_ts, ((double **)cache)+3, nocca, noccb, nvira, nvirb, nocc_cas, nvir_cas, nocc_iact, nocc2, a, c, b);
// get_wv_baa(w0, v0, vs_ts, ((double **)cache) , nocca, noccb, nvira, nvirb, a, b, c);
// get_wv_baa(w1, v1, vs_ts, ((double **)cache)+3, nocca, noccb, nvira, nvirb, a, c, b);
permute_baa(z0, v0, nocca, noccb);
permute_baa(z1, v1, nocca, noccb);
double *mo_ea = vs_ts[0];
double *mo_eb = vs_ts[1];
double et;
if (b == c) {
// printf("b==c in tabb\n");
et = _get_energy_baa(z0, z1, w0, w1, mo_ea, mo_eb, nocca, noccb, a, b, c, .5);
} else {
et = _get_energy_baa(z0, z1, w0, w1, mo_ea, mo_eb, nocca, noccb, a, b, c, 1.);
//et = _get_energy_baa_ec(z0, z1, w0, w1, mo_ea, mo_eb, pbaa, nocca, noccb,
// nocc_cas, nvir_cas, nocc_iact, nocc2, a, b, c, 1.);
}
return et;
}
static size_t gen_baa_jobs(CacheJob *jobs,
int nocca, int noccb, int nvira, int nvirb,
int a0, int a1, int b0, int b1,
void *cache_row_a, void *cache_col_a,
void *cache_row_b, void *cache_col_b, size_t stride)
{
size_t nov = nocca * (nocca+nvira) * stride;
size_t noV = nocca * (noccb+nvirb) * stride;
size_t nOv = noccb * (nocca+nvira) * stride;
int da = a1 - a0;
int db = b1 - b0;
int a, b, c;
size_t m = 0;
for (a = a0; a < a1; a++) {
for (b = b0; b < b1; b++) {
for (c = 0; c <= b; c++, m++) {
jobs[m].a = a;
jobs[m].b = b;
jobs[m].c = c;
if (c < b0) {
jobs[m].cache[0] = cache_col_b + nov*(db*(c )+b-b0);
} else {
jobs[m].cache[0] = cache_row_b + nov*(b1*(c-b0)+b );
}
jobs[m].cache[1] = cache_col_a + noV*(da *(c )+a-a0);
jobs[m].cache[2] = cache_row_a + nOv*(nvira*(a-a0)+c );
jobs[m].cache[3] = cache_row_b + nov*(b1 *(b-b0)+c );
jobs[m].cache[4] = cache_col_a + noV*(da *(b )+a-a0);
jobs[m].cache[5] = cache_row_a + nOv*(nvira*(a-a0)+b );
} } }
return m;
}
void CCuccsd_t_baa(double complex *e_tot,
double *mo_ea, double *mo_eb,
double *t1aT, double *t1bT, double *t2aaT, double *t2abT,
double *vooo, double *vOoO, double *VoOo,
double *fvo, double *fVO,
int nocca, int noccb, int nvira, int nvirb,
int a0, int a1, int b0, int b1,
void *cache_row_a, void *cache_col_a,
void *cache_row_b, void *cache_col_b)
{
int da = a1 - a0;
int db = b1 - b0;
CacheJob *jobs = malloc(sizeof(CacheJob) * da*db*b1);
size_t njobs = gen_baa_jobs(jobs, nocca, noccb, nvira, nvirb,
a0, a1, b0, b1,
cache_row_a, cache_col_a,
cache_row_b, cache_col_b, sizeof(double));
double *vs_ts[] = {mo_ea, mo_eb, fvo, fVO, vooo, vOoO, VoOo,
t1aT, t1bT, t2aaT, t2abT};
#pragma omp parallel default(none) \
shared(njobs, nocca, noccb, nvira, nvirb, vs_ts, jobs, e_tot)
{
int a, b, c;
size_t k;
double *cache1 = malloc(sizeof(double) * (noccb*nocca*nocca*5+1));
double e = 0;
#pragma omp for schedule (dynamic, 4)
for (k = 0; k < njobs; k++) {
a = jobs[k].a;
b = jobs[k].b;
c = jobs[k].c;
e += contract6_baa(nocca, noccb, nvira, nvirb, a, b, c, vs_ts,
jobs[k].cache, cache1);
}
free(cache1);
#pragma omp critical
*e_tot += e;
}
}
void CCecccsd_t_baa(double complex *e_tot,
double *mo_ea, double *mo_eb,
double *t1aT, double *t1bT, double *t2aaT, double *t2abT,
double *vooo, double *vOoO, double *VoOo,
double *fvo, double *fVO, double *pbaa,
int nocca, int noccb, int nvira, int nvirb,
const int nocc_cas, const int nvir_cas, const int nocc_iact, const int nocc2,
int a0, int a1, int b0, int b1,
void *cache_row_a, void *cache_col_a,
void *cache_row_b, void *cache_col_b)
{
int da = a1 - a0;
int db = b1 - b0;
CacheJob *jobs = malloc(sizeof(CacheJob) * da*db*b1);
size_t njobs = gen_baa_jobs(jobs, nocca, noccb, nvira, nvirb,
a0, a1, b0, b1,
cache_row_a, cache_col_a,
cache_row_b, cache_col_b, sizeof(double));
double *vs_ts[] = {mo_ea, mo_eb, fvo, fVO, vooo, vOoO, VoOo,
t1aT, t1bT, t2aaT, t2abT, pbaa};
#pragma omp parallel default(none) \
shared(njobs, nocca, noccb, nvira, nvirb, vs_ts, jobs, e_tot)
{
int a, b, c;
size_t k;
double *cache1 = malloc(sizeof(double) * (noccb*nocca*nocca*5+1));
double e = 0;
#pragma omp for schedule (dynamic, 4)
for (k = 0; k < njobs; k++) {
a = jobs[k].a;
b = jobs[k].b;
c = jobs[k].c;
e += eccontract6_baa(nocca, noccb, nvira, nvirb,
nocc_cas, nvir_cas, nocc_iact, nocc2,
a, b, c, vs_ts,
jobs[k].cache, cache1);
}
free(cache1);
#pragma omp critical
*e_tot += e;
}
}
/*
* Complex version of all functions
*/
static void zadd_and_permute(double complex *out, double complex *w,
double complex *v, int n)
{
int nn = n * n;
int nnn = nn * n;
int i, j, k;
for (i = 0; i < nnn; i++) {
v[i] += w[i];
}
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
for (k = 0; k < n; k++) {
out[i*nn+j*n+k] = v[i*nn+j*n+k] + v[j*nn+k*n+i] + v[k*nn+i*n+j]
- v[k*nn+j*n+i] - v[i*nn+k*n+j] - v[j*nn+i*n+k];
} } }
}
static void zget_wv(double complex *w, double complex *v, double complex *cache,
double complex *fvohalf, double complex *vooo,
double complex *vv_op, double complex *t1T, double complex *t2T,
int nocc, int nvir, int a, int b, int c, int *idx)
{
const double complex D0 = 0;
const double complex D1 = 1;
const double complex DN1 =-1;
const char TRANS_N = 'N';
const char TRANS_T = 'T';
const int nmo = nocc + nvir;
const int noo = nocc * nocc;
const size_t nooo = nocc * noo;
const size_t nvoo = nvir * noo;
int i, j, k, n;
double complex *pt2T;
zgemm_(&TRANS_N, &TRANS_N, &noo, &nocc, &nvir,
&DN1, t2T+c*nvoo, &noo, vv_op+nocc, &nmo,
&D0, cache, &noo);
zgemm_(&TRANS_N, &TRANS_T, &nocc, &noo, &nocc,
&DN1, t2T+b*nvoo+c*noo, &nocc, vooo+a*nooo, &noo,
&D1, cache, &nocc);
pt2T = t2T + a * nvoo + b * noo;
for (n = 0, i = 0; i < nocc; i++) {
for (j = 0; j < nocc; j++) {
for (k = 0; k < nocc; k++, n++) {
w[idx[n]] += cache[n];
v[idx[n]] +=(vv_op[i*nmo+j] * t1T[c*nocc+k]
+ pt2T[i*nocc+j] * fvohalf[c*nocc+k]);
} } }
}
static double complex
zcontract6_aaa(int nocc, int nvir, int a, int b, int c,
double *mo_energy, double complex *t1T, double complex *t2T,
int nirrep, int *o_ir_loc, int *v_ir_loc,
int *oo_ir_loc, int *orbsym, double complex *fvo,
double complex *vooo, double complex *cache1, void **cache,
int *permute_idx)
{
int nooo = nocc * nocc * nocc;
int *idx0 = permute_idx;
int *idx1 = idx0 + nooo;
int *idx2 = idx1 + nooo;
int *idx3 = idx2 + nooo;
int *idx4 = idx3 + nooo;
int *idx5 = idx4 + nooo;
double complex *v0 = cache1;
double complex *w0 = v0 + nooo;
double complex *z0 = w0 + nooo;
double complex *wtmp = z0;
int i;
for (i = 0; i < nooo; i++) {
w0[i] = 0;
v0[i] = 0;
}
zget_wv(w0, v0, wtmp, fvo, vooo, cache[0], t1T, t2T, nocc, nvir, a, b, c, idx0);
zget_wv(w0, v0, wtmp, fvo, vooo, cache[1], t1T, t2T, nocc, nvir, a, c, b, idx1);
zget_wv(w0, v0, wtmp, fvo, vooo, cache[2], t1T, t2T, nocc, nvir, b, a, c, idx2);
zget_wv(w0, v0, wtmp, fvo, vooo, cache[3], t1T, t2T, nocc, nvir, b, c, a, idx3);
zget_wv(w0, v0, wtmp, fvo, vooo, cache[4], t1T, t2T, nocc, nvir, c, a, b, idx4);
zget_wv(w0, v0, wtmp, fvo, vooo, cache[5], t1T, t2T, nocc, nvir, c, b, a, idx5);
zadd_and_permute(z0, w0, v0, nocc);
double complex et;
if (a == c) {
et = _ccsd_t_zget_energy(w0, z0, mo_energy, nocc, a, b, c, 1./6);
} else if (a == b || b == c) {
et = _ccsd_t_zget_energy(w0, z0, mo_energy, nocc, a, b, c, .5);
} else {
et = _ccsd_t_zget_energy(w0, z0, mo_energy, nocc, a, b, c, 1.);
}
return et;
}
void CCuccsd_t_zaaa(double complex *e_tot,
double *mo_energy, double complex *t1T, double complex *t2T,
double complex *vooo, double complex *fvo,
int nocc, int nvir, int a0, int a1, int b0, int b1,
int nirrep, int *o_ir_loc, int *v_ir_loc,
int *oo_ir_loc, int *orbsym,
void *cache_row_a, void *cache_col_a,
void *cache_row_b, void *cache_col_b)
{
int da = a1 - a0;
int db = b1 - b0;
CacheJob *jobs = malloc(sizeof(CacheJob) * da*db*b1);
size_t njobs = _ccsd_t_gen_jobs(jobs, nocc, nvir, a0, a1, b0, b1,
cache_row_a, cache_col_a,
cache_row_b, cache_col_b,
sizeof(double complex));
double complex fvohalf[nvir*nocc];
int i;
for (i = 0; i < nvir*nocc; i++) {
fvohalf[i] = fvo[i] * .5;
}
int *permute_idx = malloc(sizeof(int) * nocc*nocc*nocc * 6);
_make_permute_indices(permute_idx, nocc);
#pragma omp parallel default(none) \
shared(njobs, nocc, nvir, mo_energy, t1T, t2T, nirrep, o_ir_loc, \
v_ir_loc, oo_ir_loc, orbsym, vooo, fvohalf, jobs, e_tot, \
permute_idx)
{
int a, b, c;
size_t k;
double complex *cache1 = malloc(sizeof(double complex) *
(nocc*nocc*nocc*3+2));
double complex e = 0;
#pragma omp for schedule (dynamic, 4)
for (k = 0; k < njobs; k++) {
a = jobs[k].a;
b = jobs[k].b;
c = jobs[k].c;
e += zcontract6_aaa(nocc, nvir, a, b, c, mo_energy, t1T, t2T,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym,
fvohalf, vooo, cache1, jobs[k].cache,
permute_idx);
}
free(cache1);
#pragma omp critical
*e_tot += e;
}
free(permute_idx);
}
/*************************************************
*
* UCCSD(T) beta-alpha-alpha
*
*************************************************/
static void zget_wv_baa(double complex *w, double complex *v,
double complex **vs_ts, double complex **cache,
int nocca, int noccb, int nvira, int nvirb,
int a, int b, int c)
{
double complex *fvo = vs_ts[2];
double complex *fVO = vs_ts[3];
double complex *vooo = vs_ts[4];
double complex *vOoO = vs_ts[5];
double complex *VoOo = vs_ts[6];
double complex *t1aT = vs_ts[7];
double complex *t1bT = vs_ts[8];
double complex *t2aaT = vs_ts[9];
double complex *t2abT = vs_ts[10];
double complex *vvop = cache[0];
double complex *vVoP = cache[1];
double complex *VvOp = cache[2];
const double complex D0 = 0;
const double complex D1 = 1;
const double complex D2 = 2;
const char TRANS_T = 'T';
const char TRANS_N = 'N';
const int nmoa = nocca + nvira;
const int nmob = noccb + nvirb;
const int noo = nocca * nocca;
const int nOo = noccb * nocca;
const size_t nooo = nocca * noo;
const size_t noOo = nocca * nOo;
const size_t nOoO = noccb * nOo;
const size_t nvoo = nvira * noo;
const int nVoO = nvirb * nOo;
int i, j, k, n;
zgemm_(&TRANS_T, &TRANS_T, &nocca, &nOo, &nvira,
&D2, vvop+nocca, &nmoa, t2abT+a*nOo, &nVoO,
&D0, v, &nocca);
zgemm_(&TRANS_T, &TRANS_T, &nocca, &nOo, &nvirb,
&D2, vVoP+noccb, &nmob, t2abT+b*(size_t)nVoO, &nOo,
&D1, v, &nocca);
zgemm_(&TRANS_N, &TRANS_T, &nOo, &nocca, &nocca,
&D1, VoOo+a*noOo, &nOo, t2aaT+b*nvoo+c*noo, &nocca,
&D1, v, &nOo);
zgemm_(&TRANS_T, &TRANS_T, &nocca, &nOo, &noccb,
&D2, t2abT+b*(size_t)nVoO+a*nOo, &noccb, vOoO+c*nOoO, &nOo,
&D1, v, &nocca);
for (n = 0, i = 0; i < noccb; i++) {
for (j = 0; j < nocca; j++) {
for (k = 0; k < nocca; k++, n++) {
w[n] = v[j*nOo+i*nocca+k];
} } }
zgemm_(&TRANS_N, &TRANS_N, &noo, &noccb, &nvira,
&D1, t2aaT+b*nvoo, &noo, VvOp+nocca, &nmoa,
&D1, w, &noo);
zgemm_(&TRANS_N, &TRANS_T, &noo, &noccb, &nocca,
&D2, vooo+c*nooo, &noo, t2abT+b*(size_t)nVoO+a*nOo, &noccb,
&D1, w, &noo);
double complex t1aT2[nocca];
double complex fvo2[nocca];
double complex fVOhalf[noccb];
for (i = 0; i < nocca; i++) {
t1aT2[i] = t1aT[b*nocca+i] * 2;
fvo2[i] = fvo[b*nocca+i] * 2;
}
for (i = 0; i < noccb; i++) {
fVOhalf[i] = fVO[a*noccb+i] * .5;
}
double complex *pt2aaT = t2aaT + b * nvoo + c * noo;
double complex *pt2abT = t2abT + (c*nvirb+a) * nOo;
for (n = 0, i = 0; i < noccb; i++) {
for (j = 0; j < nocca; j++) {
for (k = 0; k < nocca; k++, n++) {
v[n] = (w[n] + vvop[k*nmoa+j] * t1bT[a*noccb+i]
+ VvOp[i*nmoa+k] * t1aT2[j]
+ pt2aaT[j*nocca+k] * fVOhalf[i]
+ pt2abT[k*noccb+i] * fvo2[j]);
} } }
}
/*
* w - w.transpose(0,2,1)
*/
static void zpermute_baa(double complex *out, double complex *w, int nocca, int noccb)
{
int noo = nocca * nocca;
int n;
int i, j, k;
for (n = 0, i = 0; i < noccb; i++) {
for (j = 0; j < nocca; j++) {
for (k = 0; k < nocca; k++, n++) {
out[n] = w[i*noo+j*nocca+k] - w[i*noo+k*nocca+j];
} } }
}
static double complex
_zget_energy_baa(double complex *z0, double complex *z1,
double complex *w0, double complex *w1,
double *mo_ea, double *mo_eb, int nocca, int noccb,
int a, int b, int c, double fac)
{
int noo = nocca * nocca;
int i, j, k;
double abc = mo_eb[noccb+a] + mo_ea[nocca+b] + mo_ea[nocca+c];
double complex et = 0;
for (i = 0; i < noccb; i++) {
for (j = 0; j < nocca; j++) {
for (k = 0; k < nocca; k++) {
et += conj(z0[i*noo+j*nocca+k] + z1[i*noo+k*nocca+j])
* (w0[i*noo+j*nocca+k] + w1[i*noo+k*nocca+j])
* (fac / (mo_eb[i] + mo_ea[j] + mo_ea[k] - abc));
} } }
return et;
}
static double complex
zcontract6_baa(int nocca, int noccb, int nvira, int nvirb,
int a, int b, int c,
double complex **vs_ts, void **cache, double complex *cache1)
{
int nOoo = noccb * nocca * nocca;
double complex *v0 = cache1;
double complex *v1 = v0 + nOoo;
double complex *w0 = v1 + nOoo;
double complex *w1 = w0 + nOoo;
double complex *z0 = w1 + nOoo;
double complex *z1 = v0;
zget_wv_baa(w0, v0, vs_ts, ((double complex **)cache) , nocca, noccb, nvira, nvirb, a, b, c);
zget_wv_baa(w1, v1, vs_ts, ((double complex **)cache)+3, nocca, noccb, nvira, nvirb, a, c, b);
zpermute_baa(z0, v0, nocca, noccb);
zpermute_baa(z1, v1, nocca, noccb);
double *mo_ea = (double *)vs_ts[0];
double *mo_eb = (double *)vs_ts[1];
double complex et;
if (b == c) {
et = _zget_energy_baa(z0, z1, w0, w1, mo_ea, mo_eb, nocca, noccb, a, b, c, .5);
} else {
et = _zget_energy_baa(z0, z1, w0, w1, mo_ea, mo_eb, nocca, noccb, a, b, c, 1.);
}
return et;
}
void CCuccsd_t_zbaa(double complex *e_tot,
double *mo_ea, double *mo_eb,
double complex *t1aT, double complex *t1bT,
double complex *t2aaT, double complex *t2abT,
double complex *vooo, double complex *vOoO, double complex *VoOo,
double complex *fvo, double complex *fVO,
int nocca, int noccb, int nvira, int nvirb,
int a0, int a1, int b0, int b1,
void *cache_row_a, void *cache_col_a,
void *cache_row_b, void *cache_col_b)
{
int da = a1 - a0;
int db = b1 - b0;
CacheJob *jobs = malloc(sizeof(CacheJob) * da*db*b1);
size_t njobs = gen_baa_jobs(jobs, nocca, noccb, nvira, nvirb,
a0, a1, b0, b1,
cache_row_a, cache_col_a,
cache_row_b, cache_col_b,
sizeof(double complex));
double complex *vs_ts[] = {(double complex *)mo_ea,
(double complex *)mo_eb, fvo, fVO, vooo, vOoO, VoOo,
t1aT, t1bT, t2aaT, t2abT};
#pragma omp parallel default(none) \
shared(njobs, nocca, noccb, nvira, nvirb, vs_ts, jobs, e_tot)
{
int a, b, c;
size_t k;
double complex *cache1 = malloc(sizeof(double complex) *
(noccb*nocca*nocca*5+1));
double complex e = 0;
#pragma omp for schedule (dynamic, 4)
for (k = 0; k < njobs; k++) {
a = jobs[k].a;
b = jobs[k].b;
c = jobs[k].c;
e += zcontract6_baa(nocca, noccb, nvira, nvirb, a, b, c, vs_ts,
jobs[k].cache, cache1);
}
free(cache1);
#pragma omp critical
*e_tot += e;
}
}
|
Example_tasking.8.c | /*
* @@name: tasking.8c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: success
* @@version: omp_3.0
*/
int tp;
#pragma omp threadprivate(tp)
int var;
void work()
{
#pragma omp parallel
{
/* do work here */
#pragma omp task
{
tp++;
/* do work here */
#pragma omp task
{
/* do work here but don't modify tp */
}
var = tp; //Value does not change after write above
}
}
}
|
QuEST_cpu.c | // Distributed under MIT licence. See https://github.com/QuEST-Kit/QuEST/blob/master/LICENCE.txt for details
/** @file
* The core of the CPU backend functionality. The CPU/MPI implementations of the pure state functions in
* ../QuEST_ops_pure.h are in QuEST_cpu_local.c and QuEST_cpu_distributed.c which mostly wrap the core
* functions defined here. Some additional hardware-agnostic functions are defined here
*/
# include "QuEST.h"
# include "QuEST_internal.h"
# include "QuEST_precision.h"
# include "mt19937ar.h"
# include "QuEST_cpu_internal.h"
# include <math.h>
# include <stdio.h>
# include <stdlib.h>
# include <assert.h>
# ifdef _OPENMP
# include <omp.h>
# endif
/** Get the value of the bit at a particular index in a number.
SCB edit: new definition of extractBit is much faster ***
* @param[in] locationOfBitFromRight location of bit in theEncodedNumber
* @param[in] theEncodedNumber number to search
* @return the value of the bit in theEncodedNumber
*/
static int extractBit (const int locationOfBitFromRight, const long long int theEncodedNumber)
{
return (theEncodedNumber & ( 1LL << locationOfBitFromRight )) >> locationOfBitFromRight;
}
void densmatr_oneQubitDegradeOffDiagonal(Qureg qureg, const int targetQubit, qreal retain){
const long long int numTasks = qureg.numAmpsPerChunk;
long long int innerMask = 1LL << targetQubit;
long long int outerMask = 1LL << (targetQubit + (qureg.numQubitsRepresented));
long long int thisTask;
long long int thisPattern;
long long int totMask = innerMask|outerMask;
# ifdef _OPENMP
# pragma omp parallel \
shared (innerMask,outerMask,totMask,qureg,retain) \
private (thisTask,thisPattern)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (thisTask=0; thisTask<numTasks; thisTask++){
thisPattern = (thisTask+qureg.numAmpsPerChunk*qureg.chunkId)&totMask;
if ((thisPattern==innerMask) || (thisPattern==outerMask)){
// do dephase
// the lines below will degrade the off-diagonal terms |..0..><..1..| and |..1..><..0..|
qureg.stateVec.real[thisTask] = retain*qureg.stateVec.real[thisTask];
qureg.stateVec.imag[thisTask] = retain*qureg.stateVec.imag[thisTask];
}
}
}
}
void densmatr_oneQubitDephase(Qureg qureg, const int targetQubit, qreal dephase) {
qreal retain=1-dephase;
densmatr_oneQubitDegradeOffDiagonal(qureg, targetQubit, retain);
}
void densmatr_oneQubitDampingDephase(Qureg qureg, const int targetQubit, qreal dephase) {
qreal retain=sqrt(1-dephase);
densmatr_oneQubitDegradeOffDiagonal(qureg, targetQubit, retain);
}
void densmatr_twoQubitDephase(Qureg qureg, const int qubit1, const int qubit2, qreal dephase) {
qreal retain=1-dephase;
const long long int numTasks = qureg.numAmpsPerChunk;
long long int innerMaskQubit1 = 1LL << qubit1;
long long int outerMaskQubit1 = 1LL << (qubit1 + (qureg.numQubitsRepresented));
long long int innerMaskQubit2 = 1LL << qubit2;
long long int outerMaskQubit2 = 1LL << (qubit2 + (qureg.numQubitsRepresented));
long long int totMaskQubit1 = innerMaskQubit1|outerMaskQubit1;
long long int totMaskQubit2 = innerMaskQubit2|outerMaskQubit2;
long long int thisTask;
long long int thisPatternQubit1, thisPatternQubit2;
# ifdef _OPENMP
# pragma omp parallel \
shared (innerMaskQubit1,outerMaskQubit1,totMaskQubit1,innerMaskQubit2,outerMaskQubit2, \
totMaskQubit2,qureg,retain) \
private (thisTask,thisPatternQubit1,thisPatternQubit2)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (thisTask=0; thisTask<numTasks; thisTask++){
thisPatternQubit1 = (thisTask+qureg.numAmpsPerChunk*qureg.chunkId)&totMaskQubit1;
thisPatternQubit2 = (thisTask+qureg.numAmpsPerChunk*qureg.chunkId)&totMaskQubit2;
// any mismatch |...0...><...1...| etc
if ( (thisPatternQubit1==innerMaskQubit1) || (thisPatternQubit1==outerMaskQubit1) ||
(thisPatternQubit2==innerMaskQubit2) || (thisPatternQubit2==outerMaskQubit2) ){
// do dephase
// the lines below will degrade the off-diagonal terms |..0..><..1..| and |..1..><..0..|
qureg.stateVec.real[thisTask] = retain*qureg.stateVec.real[thisTask];
qureg.stateVec.imag[thisTask] = retain*qureg.stateVec.imag[thisTask];
}
}
}
}
void densmatr_oneQubitDepolariseLocal(Qureg qureg, const int targetQubit, qreal depolLevel) {
qreal retain=1-depolLevel;
const long long int numTasks = qureg.numAmpsPerChunk;
long long int innerMask = 1LL << targetQubit;
long long int outerMask = 1LL << (targetQubit + (qureg.numQubitsRepresented));
long long int totMask = innerMask|outerMask;
long long int thisTask;
long long int partner;
long long int thisPattern;
qreal realAv, imagAv;
# ifdef _OPENMP
# pragma omp parallel \
shared (innerMask,outerMask,totMask,qureg,retain,depolLevel) \
private (thisTask,partner,thisPattern,realAv,imagAv)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (thisTask=0; thisTask<numTasks; thisTask++){
thisPattern = (thisTask+qureg.numAmpsPerChunk*qureg.chunkId)&totMask;
if ((thisPattern==innerMask) || (thisPattern==outerMask)){
// do dephase
// the lines below will degrade the off-diagonal terms |..0..><..1..| and |..1..><..0..|
qureg.stateVec.real[thisTask] = retain*qureg.stateVec.real[thisTask];
qureg.stateVec.imag[thisTask] = retain*qureg.stateVec.imag[thisTask];
} else {
if ((thisTask&totMask)==0){ //this element relates to targetQubit in state 0
// do depolarise
partner = thisTask | totMask;
realAv = (qureg.stateVec.real[thisTask] + qureg.stateVec.real[partner]) /2 ;
imagAv = (qureg.stateVec.imag[thisTask] + qureg.stateVec.imag[partner]) /2 ;
qureg.stateVec.real[thisTask] = retain*qureg.stateVec.real[thisTask] + depolLevel*realAv;
qureg.stateVec.imag[thisTask] = retain*qureg.stateVec.imag[thisTask] + depolLevel*imagAv;
qureg.stateVec.real[partner] = retain*qureg.stateVec.real[partner] + depolLevel*realAv;
qureg.stateVec.imag[partner] = retain*qureg.stateVec.imag[partner] + depolLevel*imagAv;
}
}
}
}
}
void densmatr_oneQubitDampingLocal(Qureg qureg, const int targetQubit, qreal damping) {
qreal retain=1-damping;
qreal dephase=sqrt(retain);
const long long int numTasks = qureg.numAmpsPerChunk;
long long int innerMask = 1LL << targetQubit;
long long int outerMask = 1LL << (targetQubit + (qureg.numQubitsRepresented));
long long int totMask = innerMask|outerMask;
long long int thisTask;
long long int partner;
long long int thisPattern;
//qreal realAv, imagAv;
# ifdef _OPENMP
# pragma omp parallel \
shared (innerMask,outerMask,totMask,qureg,retain,damping,dephase) \
private (thisTask,partner,thisPattern)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (thisTask=0; thisTask<numTasks; thisTask++){
thisPattern = (thisTask+qureg.numAmpsPerChunk*qureg.chunkId)&totMask;
if ((thisPattern==innerMask) || (thisPattern==outerMask)){
// do dephase
// the lines below will degrade the off-diagonal terms |..0..><..1..| and |..1..><..0..|
qureg.stateVec.real[thisTask] = dephase*qureg.stateVec.real[thisTask];
qureg.stateVec.imag[thisTask] = dephase*qureg.stateVec.imag[thisTask];
} else {
if ((thisTask&totMask)==0){ //this element relates to targetQubit in state 0
// do depolarise
partner = thisTask | totMask;
//realAv = (qureg.stateVec.real[thisTask] + qureg.stateVec.real[partner]) /2 ;
//imagAv = (qureg.stateVec.imag[thisTask] + qureg.stateVec.imag[partner]) /2 ;
qureg.stateVec.real[thisTask] = qureg.stateVec.real[thisTask] + damping*qureg.stateVec.real[partner];
qureg.stateVec.imag[thisTask] = qureg.stateVec.imag[thisTask] + damping*qureg.stateVec.imag[partner];
qureg.stateVec.real[partner] = retain*qureg.stateVec.real[partner];
qureg.stateVec.imag[partner] = retain*qureg.stateVec.imag[partner];
}
}
}
}
}
void densmatr_oneQubitDepolariseDistributed(Qureg qureg, const int targetQubit, qreal depolLevel) {
// first do dephase part.
// TODO -- this might be more efficient to do at the same time as the depolarise if we move to
// iterating over all elements in the state vector for the purpose of vectorisation
// TODO -- if we keep this split, move this function to densmatr_oneQubitDepolarise()
densmatr_oneQubitDephase(qureg, targetQubit, depolLevel);
long long int sizeInnerBlock, sizeInnerHalfBlock;
long long int sizeOuterColumn, sizeOuterHalfColumn;
long long int thisInnerBlock, // current block
thisOuterColumn, // current column in density matrix
thisIndex, // current index in (density matrix representation) state vector
thisIndexInOuterColumn,
thisIndexInInnerBlock;
int outerBit;
long long int thisTask;
const long long int numTasks=qureg.numAmpsPerChunk>>1;
// set dimensions
sizeInnerHalfBlock = 1LL << targetQubit;
sizeInnerBlock = 2LL * sizeInnerHalfBlock;
sizeOuterColumn = 1LL << qureg.numQubitsRepresented;
sizeOuterHalfColumn = sizeOuterColumn >> 1;
# ifdef _OPENMP
# pragma omp parallel \
shared (sizeInnerBlock,sizeInnerHalfBlock,sizeOuterColumn,sizeOuterHalfColumn,qureg,depolLevel) \
private (thisTask,thisInnerBlock,thisOuterColumn,thisIndex,thisIndexInOuterColumn, \
thisIndexInInnerBlock,outerBit)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
// thisTask iterates over half the elements in this process' chunk of the density matrix
// treat this as iterating over all columns, then iterating over half the values
// within one column.
// If this function has been called, this process' chunk contains half an
// outer block or less
for (thisTask=0; thisTask<numTasks; thisTask++) {
// we want to process all columns in the density matrix,
// updating the values for half of each column (one half of each inner block)
thisOuterColumn = thisTask / sizeOuterHalfColumn;
thisIndexInOuterColumn = thisTask&(sizeOuterHalfColumn-1); // thisTask % sizeOuterHalfColumn
thisInnerBlock = thisIndexInOuterColumn/sizeInnerHalfBlock;
// get index in state vector corresponding to upper inner block
thisIndexInInnerBlock = thisTask&(sizeInnerHalfBlock-1); // thisTask % sizeInnerHalfBlock
thisIndex = thisOuterColumn*sizeOuterColumn + thisInnerBlock*sizeInnerBlock
+ thisIndexInInnerBlock;
// check if we are in the upper or lower half of an outer block
outerBit = extractBit(targetQubit, (thisIndex+qureg.numAmpsPerChunk*qureg.chunkId)>>qureg.numQubitsRepresented);
// if we are in the lower half of an outer block, shift to be in the lower half
// of the inner block as well (we want to dephase |0><0| and |1><1| only)
thisIndex += outerBit*(sizeInnerHalfBlock);
// NOTE: at this point thisIndex should be the index of the element we want to
// dephase in the chunk of the state vector on this process, in the
// density matrix representation.
// thisTask is the index of the pair element in pairStateVec
// state[thisIndex] = (1-depolLevel)*state[thisIndex] + depolLevel*(state[thisIndex]
// + pair[thisTask])/2
qureg.stateVec.real[thisIndex] = (1-depolLevel)*qureg.stateVec.real[thisIndex] +
depolLevel*(qureg.stateVec.real[thisIndex] + qureg.pairStateVec.real[thisTask])/2;
qureg.stateVec.imag[thisIndex] = (1-depolLevel)*qureg.stateVec.imag[thisIndex] +
depolLevel*(qureg.stateVec.imag[thisIndex] + qureg.pairStateVec.imag[thisTask])/2;
}
}
}
void densmatr_oneQubitDampingDistributed(Qureg qureg, const int targetQubit, qreal damping) {
qreal retain=1-damping;
qreal dephase=sqrt(1-damping);
// first do dephase part.
// TODO -- this might be more efficient to do at the same time as the depolarise if we move to
// iterating over all elements in the state vector for the purpose of vectorisation
// TODO -- if we keep this split, move this function to densmatr_oneQubitDepolarise()
densmatr_oneQubitDampingDephase(qureg, targetQubit, dephase);
long long int sizeInnerBlock, sizeInnerHalfBlock;
long long int sizeOuterColumn, sizeOuterHalfColumn;
long long int thisInnerBlock, // current block
thisOuterColumn, // current column in density matrix
thisIndex, // current index in (density matrix representation) state vector
thisIndexInOuterColumn,
thisIndexInInnerBlock;
int outerBit;
int stateBit;
long long int thisTask;
const long long int numTasks=qureg.numAmpsPerChunk>>1;
// set dimensions
sizeInnerHalfBlock = 1LL << targetQubit;
sizeInnerBlock = 2LL * sizeInnerHalfBlock;
sizeOuterColumn = 1LL << qureg.numQubitsRepresented;
sizeOuterHalfColumn = sizeOuterColumn >> 1;
# ifdef _OPENMP
# pragma omp parallel \
shared (sizeInnerBlock,sizeInnerHalfBlock,sizeOuterColumn,sizeOuterHalfColumn,qureg,damping, retain, dephase) \
private (thisTask,thisInnerBlock,thisOuterColumn,thisIndex,thisIndexInOuterColumn, \
thisIndexInInnerBlock,outerBit, stateBit)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
// thisTask iterates over half the elements in this process' chunk of the density matrix
// treat this as iterating over all columns, then iterating over half the values
// within one column.
// If this function has been called, this process' chunk contains half an
// outer block or less
for (thisTask=0; thisTask<numTasks; thisTask++) {
// we want to process all columns in the density matrix,
// updating the values for half of each column (one half of each inner block)
thisOuterColumn = thisTask / sizeOuterHalfColumn;
thisIndexInOuterColumn = thisTask&(sizeOuterHalfColumn-1); // thisTask % sizeOuterHalfColumn
thisInnerBlock = thisIndexInOuterColumn/sizeInnerHalfBlock;
// get index in state vector corresponding to upper inner block
thisIndexInInnerBlock = thisTask&(sizeInnerHalfBlock-1); // thisTask % sizeInnerHalfBlock
thisIndex = thisOuterColumn*sizeOuterColumn + thisInnerBlock*sizeInnerBlock
+ thisIndexInInnerBlock;
// check if we are in the upper or lower half of an outer block
outerBit = extractBit(targetQubit, (thisIndex+qureg.numAmpsPerChunk*qureg.chunkId)>>qureg.numQubitsRepresented);
// if we are in the lower half of an outer block, shift to be in the lower half
// of the inner block as well (we want to dephase |0><0| and |1><1| only)
thisIndex += outerBit*(sizeInnerHalfBlock);
// NOTE: at this point thisIndex should be the index of the element we want to
// dephase in the chunk of the state vector on this process, in the
// density matrix representation.
// thisTask is the index of the pair element in pairStateVec
// Extract state bit, is 0 if thisIndex corresponds to a state with 0 in the target qubit
// and is 1 if thisIndex corresponds to a state with 1 in the target qubit
stateBit = extractBit(targetQubit, (thisIndex+qureg.numAmpsPerChunk*qureg.chunkId));
// state[thisIndex] = (1-depolLevel)*state[thisIndex] + depolLevel*(state[thisIndex]
// + pair[thisTask])/2
if(stateBit == 0){
qureg.stateVec.real[thisIndex] = qureg.stateVec.real[thisIndex] +
damping*( qureg.pairStateVec.real[thisTask]);
qureg.stateVec.imag[thisIndex] = qureg.stateVec.imag[thisIndex] +
damping*( qureg.pairStateVec.imag[thisTask]);
} else{
qureg.stateVec.real[thisIndex] = retain*qureg.stateVec.real[thisIndex];
qureg.stateVec.imag[thisIndex] = retain*qureg.stateVec.imag[thisIndex];
}
}
}
}
// @TODO
void densmatr_twoQubitDepolariseLocal(Qureg qureg, int qubit1, int qubit2, qreal delta, qreal gamma) {
const long long int numTasks = qureg.numAmpsPerChunk;
long long int innerMaskQubit1 = 1LL << qubit1;
long long int outerMaskQubit1= 1LL << (qubit1 + qureg.numQubitsRepresented);
long long int totMaskQubit1 = innerMaskQubit1 | outerMaskQubit1;
long long int innerMaskQubit2 = 1LL << qubit2;
long long int outerMaskQubit2 = 1LL << (qubit2 + qureg.numQubitsRepresented);
long long int totMaskQubit2 = innerMaskQubit2 | outerMaskQubit2;
long long int thisTask;
long long int partner;
long long int thisPatternQubit1, thisPatternQubit2;
qreal real00, imag00;
# ifdef _OPENMP
# pragma omp parallel \
shared (totMaskQubit1,totMaskQubit2,qureg,delta,gamma) \
private (thisTask,partner,thisPatternQubit1,thisPatternQubit2,real00,imag00)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
//--------------------------------------- STEP ONE ---------------------
for (thisTask=0; thisTask<numTasks; thisTask++){
thisPatternQubit1 = (thisTask+qureg.numAmpsPerChunk*qureg.chunkId)&totMaskQubit1;
thisPatternQubit2 = (thisTask+qureg.numAmpsPerChunk*qureg.chunkId)&totMaskQubit2;
if ((thisPatternQubit1==0) && ((thisPatternQubit2==0)
|| (thisPatternQubit2==totMaskQubit2))){
//this element of form |...X...0...><...X...0...| for X either 0 or 1.
partner = thisTask | totMaskQubit1;
real00 = qureg.stateVec.real[thisTask];
imag00 = qureg.stateVec.imag[thisTask];
qureg.stateVec.real[thisTask] = qureg.stateVec.real[thisTask]
+ delta*qureg.stateVec.real[partner];
qureg.stateVec.imag[thisTask] = qureg.stateVec.imag[thisTask]
+ delta*qureg.stateVec.imag[partner];
qureg.stateVec.real[partner] = qureg.stateVec.real[partner] + delta*real00;
qureg.stateVec.imag[partner] = qureg.stateVec.imag[partner] + delta*imag00;
}
}
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
//--------------------------------------- STEP TWO ---------------------
for (thisTask=0; thisTask<numTasks; thisTask++){
thisPatternQubit1 = (thisTask+qureg.numAmpsPerChunk*qureg.chunkId)&totMaskQubit1;
thisPatternQubit2 = (thisTask+qureg.numAmpsPerChunk*qureg.chunkId)&totMaskQubit2;
if ((thisPatternQubit2==0) && ((thisPatternQubit1==0)
|| (thisPatternQubit1==totMaskQubit1))){
//this element of form |...0...X...><...0...X...| for X either 0 or 1.
partner = thisTask | totMaskQubit2;
real00 = qureg.stateVec.real[thisTask];
imag00 = qureg.stateVec.imag[thisTask];
qureg.stateVec.real[thisTask] = qureg.stateVec.real[thisTask]
+ delta*qureg.stateVec.real[partner];
qureg.stateVec.imag[thisTask] = qureg.stateVec.imag[thisTask]
+ delta*qureg.stateVec.imag[partner];
qureg.stateVec.real[partner] = qureg.stateVec.real[partner] + delta*real00;
qureg.stateVec.imag[partner] = qureg.stateVec.imag[partner] + delta*imag00;
}
}
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
//--------------------------------------- STEP THREE ---------------------
for (thisTask=0; thisTask<numTasks; thisTask++){
thisPatternQubit1 = (thisTask+qureg.numAmpsPerChunk*qureg.chunkId)&totMaskQubit1;
thisPatternQubit2 = (thisTask+qureg.numAmpsPerChunk*qureg.chunkId)&totMaskQubit2;
if ((thisPatternQubit2==0) && ((thisPatternQubit1==0)
|| (thisPatternQubit1==totMaskQubit1))){
//this element of form |...0...X...><...0...X...| for X either 0 or 1.
partner = thisTask | totMaskQubit2;
partner = partner ^ totMaskQubit1;
real00 = qureg.stateVec.real[thisTask];
imag00 = qureg.stateVec.imag[thisTask];
qureg.stateVec.real[thisTask] = gamma * (qureg.stateVec.real[thisTask]
+ delta*qureg.stateVec.real[partner]);
qureg.stateVec.imag[thisTask] = gamma * (qureg.stateVec.imag[thisTask]
+ delta*qureg.stateVec.imag[partner]);
qureg.stateVec.real[partner] = gamma * (qureg.stateVec.real[partner]
+ delta*real00);
qureg.stateVec.imag[partner] = gamma * (qureg.stateVec.imag[partner]
+ delta*imag00);
}
}
}
}
void densmatr_twoQubitDepolariseLocalPart1(Qureg qureg, int qubit1, int qubit2, qreal delta) {
const long long int numTasks = qureg.numAmpsPerChunk;
long long int innerMaskQubit1 = 1LL << qubit1;
long long int outerMaskQubit1= 1LL << (qubit1 + qureg.numQubitsRepresented);
long long int totMaskQubit1 = innerMaskQubit1 | outerMaskQubit1;
long long int innerMaskQubit2 = 1LL << qubit2;
long long int outerMaskQubit2 = 1LL << (qubit2 + qureg.numQubitsRepresented);
long long int totMaskQubit2 = innerMaskQubit2 | outerMaskQubit2;
// correct for being in a particular chunk
//totMaskQubit2 = totMaskQubit2&(qureg.numAmpsPerChunk-1); // totMaskQubit2 % numAmpsPerChunk
long long int thisTask;
long long int partner;
long long int thisPatternQubit1, thisPatternQubit2;
qreal real00, imag00;
# ifdef _OPENMP
# pragma omp parallel \
shared (totMaskQubit1,totMaskQubit2,qureg,delta) \
private (thisTask,partner,thisPatternQubit1,thisPatternQubit2,real00,imag00)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
//--------------------------------------- STEP ONE ---------------------
for (thisTask=0; thisTask<numTasks; thisTask ++){
thisPatternQubit1 = (thisTask+qureg.numAmpsPerChunk*qureg.chunkId)&totMaskQubit1;
thisPatternQubit2 = (thisTask+qureg.numAmpsPerChunk*qureg.chunkId)&totMaskQubit2;
if ((thisPatternQubit1==0) && ((thisPatternQubit2==0)
|| (thisPatternQubit2==totMaskQubit2))){
//this element of form |...X...0...><...X...0...| for X either 0 or 1.
partner = thisTask | totMaskQubit1;
real00 = qureg.stateVec.real[thisTask];
imag00 = qureg.stateVec.imag[thisTask];
qureg.stateVec.real[thisTask] = qureg.stateVec.real[thisTask]
+ delta*qureg.stateVec.real[partner];
qureg.stateVec.imag[thisTask] = qureg.stateVec.imag[thisTask]
+ delta*qureg.stateVec.imag[partner];
qureg.stateVec.real[partner] = qureg.stateVec.real[partner] + delta*real00;
qureg.stateVec.imag[partner] = qureg.stateVec.imag[partner] + delta*imag00;
}
}
}
}
void densmatr_twoQubitDepolariseDistributed(Qureg qureg, const int targetQubit,
const int qubit2, qreal delta, qreal gamma) {
long long int sizeInnerBlockQ1, sizeInnerHalfBlockQ1;
long long int sizeInnerBlockQ2, sizeInnerHalfBlockQ2, sizeInnerQuarterBlockQ2;
long long int sizeOuterColumn, sizeOuterQuarterColumn;
long long int thisInnerBlockQ2,
thisOuterColumn, // current column in density matrix
thisIndex, // current index in (density matrix representation) state vector
thisIndexInOuterColumn,
thisIndexInInnerBlockQ1,
thisIndexInInnerBlockQ2,
thisInnerBlockQ1InInnerBlockQ2;
int outerBitQ1, outerBitQ2;
long long int thisTask;
const long long int numTasks=qureg.numAmpsPerChunk>>2;
// set dimensions
sizeInnerHalfBlockQ1 = 1LL << targetQubit;
sizeInnerHalfBlockQ2 = 1LL << qubit2;
sizeInnerQuarterBlockQ2 = sizeInnerHalfBlockQ2 >> 1;
sizeInnerBlockQ2 = sizeInnerHalfBlockQ2 << 1;
sizeInnerBlockQ1 = 2LL * sizeInnerHalfBlockQ1;
sizeOuterColumn = 1LL << qureg.numQubitsRepresented;
sizeOuterQuarterColumn = sizeOuterColumn >> 2;
# ifdef _OPENMP
# pragma omp parallel \
shared (sizeInnerBlockQ1,sizeInnerHalfBlockQ1,sizeInnerBlockQ2,sizeInnerHalfBlockQ2,sizeInnerQuarterBlockQ2,\
sizeOuterColumn,sizeOuterQuarterColumn,qureg,delta,gamma) \
private (thisTask,thisInnerBlockQ2,thisInnerBlockQ1InInnerBlockQ2, \
thisOuterColumn,thisIndex,thisIndexInOuterColumn, \
thisIndexInInnerBlockQ1,thisIndexInInnerBlockQ2,outerBitQ1,outerBitQ2)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
// thisTask iterates over half the elements in this process' chunk of the density matrix
// treat this as iterating over all columns, then iterating over half the values
// within one column.
// If this function has been called, this process' chunk contains half an
// outer block or less
for (thisTask=0; thisTask<numTasks; thisTask++) {
// we want to process all columns in the density matrix,
// updating the values for half of each column (one half of each inner block)
thisOuterColumn = thisTask / sizeOuterQuarterColumn;
// thisTask % sizeOuterQuarterColumn
thisIndexInOuterColumn = thisTask&(sizeOuterQuarterColumn-1);
thisInnerBlockQ2 = thisIndexInOuterColumn / sizeInnerQuarterBlockQ2;
// thisTask % sizeInnerQuarterBlockQ2;
thisIndexInInnerBlockQ2 = thisTask&(sizeInnerQuarterBlockQ2-1);
thisInnerBlockQ1InInnerBlockQ2 = thisIndexInInnerBlockQ2 / sizeInnerHalfBlockQ1;
// thisTask % sizeInnerHalfBlockQ1;
thisIndexInInnerBlockQ1 = thisTask&(sizeInnerHalfBlockQ1-1);
// get index in state vector corresponding to upper inner block
thisIndex = thisOuterColumn*sizeOuterColumn + thisInnerBlockQ2*sizeInnerBlockQ2
+ thisInnerBlockQ1InInnerBlockQ2*sizeInnerBlockQ1 + thisIndexInInnerBlockQ1;
// check if we are in the upper or lower half of an outer block for Q1
outerBitQ1 = extractBit(targetQubit, (thisIndex+qureg.numAmpsPerChunk*qureg.chunkId)>>qureg.numQubitsRepresented);
// if we are in the lower half of an outer block, shift to be in the lower half
// of the inner block as well (we want to dephase |0><0| and |1><1| only)
thisIndex += outerBitQ1*(sizeInnerHalfBlockQ1);
// check if we are in the upper or lower half of an outer block for Q2
outerBitQ2 = extractBit(qubit2, (thisIndex+qureg.numAmpsPerChunk*qureg.chunkId)>>qureg.numQubitsRepresented);
// if we are in the lower half of an outer block, shift to be in the lower half
// of the inner block as well (we want to dephase |0><0| and |1><1| only)
thisIndex += outerBitQ2*(sizeInnerQuarterBlockQ2<<1);
// NOTE: at this point thisIndex should be the index of the element we want to
// dephase in the chunk of the state vector on this process, in the
// density matrix representation.
// thisTask is the index of the pair element in pairStateVec
// state[thisIndex] = (1-depolLevel)*state[thisIndex] + depolLevel*(state[thisIndex]
// + pair[thisTask])/2
// NOTE: must set gamma=1 if using this function for steps 1 or 2
qureg.stateVec.real[thisIndex] = gamma*(qureg.stateVec.real[thisIndex] +
delta*qureg.pairStateVec.real[thisTask]);
qureg.stateVec.imag[thisIndex] = gamma*(qureg.stateVec.imag[thisIndex] +
delta*qureg.pairStateVec.imag[thisTask]);
}
}
}
void densmatr_twoQubitDepolariseQ1LocalQ2DistributedPart3(Qureg qureg, const int targetQubit,
const int qubit2, qreal delta, qreal gamma) {
long long int sizeInnerBlockQ1, sizeInnerHalfBlockQ1;
long long int sizeInnerBlockQ2, sizeInnerHalfBlockQ2, sizeInnerQuarterBlockQ2;
long long int sizeOuterColumn, sizeOuterQuarterColumn;
long long int thisInnerBlockQ2,
thisOuterColumn, // current column in density matrix
thisIndex, // current index in (density matrix representation) state vector
thisIndexInPairVector,
thisIndexInOuterColumn,
thisIndexInInnerBlockQ1,
thisIndexInInnerBlockQ2,
thisInnerBlockQ1InInnerBlockQ2;
int outerBitQ1, outerBitQ2;
long long int thisTask;
const long long int numTasks=qureg.numAmpsPerChunk>>2;
// set dimensions
sizeInnerHalfBlockQ1 = 1LL << targetQubit;
sizeInnerHalfBlockQ2 = 1LL << qubit2;
sizeInnerQuarterBlockQ2 = sizeInnerHalfBlockQ2 >> 1;
sizeInnerBlockQ2 = sizeInnerHalfBlockQ2 << 1;
sizeInnerBlockQ1 = 2LL * sizeInnerHalfBlockQ1;
sizeOuterColumn = 1LL << qureg.numQubitsRepresented;
sizeOuterQuarterColumn = sizeOuterColumn >> 2;
//# if 0
# ifdef _OPENMP
# pragma omp parallel \
shared (sizeInnerBlockQ1,sizeInnerHalfBlockQ1,sizeInnerBlockQ2,sizeInnerHalfBlockQ2,sizeInnerQuarterBlockQ2,\
sizeOuterColumn,sizeOuterQuarterColumn,qureg,delta,gamma) \
private (thisTask,thisInnerBlockQ2,thisInnerBlockQ1InInnerBlockQ2, \
thisOuterColumn,thisIndex,thisIndexInPairVector,thisIndexInOuterColumn, \
thisIndexInInnerBlockQ1,thisIndexInInnerBlockQ2,outerBitQ1,outerBitQ2)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
//# endif
// thisTask iterates over half the elements in this process' chunk of the density matrix
// treat this as iterating over all columns, then iterating over half the values
// within one column.
// If this function has been called, this process' chunk contains half an
// outer block or less
for (thisTask=0; thisTask<numTasks; thisTask++) {
// we want to process all columns in the density matrix,
// updating the values for half of each column (one half of each inner block)
thisOuterColumn = thisTask / sizeOuterQuarterColumn;
// thisTask % sizeOuterQuarterColumn
thisIndexInOuterColumn = thisTask&(sizeOuterQuarterColumn-1);
thisInnerBlockQ2 = thisIndexInOuterColumn / sizeInnerQuarterBlockQ2;
// thisTask % sizeInnerQuarterBlockQ2;
thisIndexInInnerBlockQ2 = thisTask&(sizeInnerQuarterBlockQ2-1);
thisInnerBlockQ1InInnerBlockQ2 = thisIndexInInnerBlockQ2 / sizeInnerHalfBlockQ1;
// thisTask % sizeInnerHalfBlockQ1;
thisIndexInInnerBlockQ1 = thisTask&(sizeInnerHalfBlockQ1-1);
// get index in state vector corresponding to upper inner block
thisIndex = thisOuterColumn*sizeOuterColumn + thisInnerBlockQ2*sizeInnerBlockQ2
+ thisInnerBlockQ1InInnerBlockQ2*sizeInnerBlockQ1 + thisIndexInInnerBlockQ1;
// check if we are in the upper or lower half of an outer block for Q1
outerBitQ1 = extractBit(targetQubit, (thisIndex+qureg.numAmpsPerChunk*qureg.chunkId)>>qureg.numQubitsRepresented);
// if we are in the lower half of an outer block, shift to be in the lower half
// of the inner block as well (we want to dephase |0><0| and |1><1| only)
thisIndex += outerBitQ1*(sizeInnerHalfBlockQ1);
// For part 3 we need to match elements such that (my Q1 != pair Q1) AND (my Q2 != pair Q2)
// Find correct index in pairStateVector
thisIndexInPairVector = thisTask + (1-outerBitQ1)*sizeInnerHalfBlockQ1*sizeOuterQuarterColumn -
outerBitQ1*sizeInnerHalfBlockQ1*sizeOuterQuarterColumn;
// check if we are in the upper or lower half of an outer block for Q2
outerBitQ2 = extractBit(qubit2, (thisIndex+qureg.numAmpsPerChunk*qureg.chunkId)>>qureg.numQubitsRepresented);
// if we are in the lower half of an outer block, shift to be in the lower half
// of the inner block as well (we want to dephase |0><0| and |1><1| only)
thisIndex += outerBitQ2*(sizeInnerQuarterBlockQ2<<1);
// NOTE: at this point thisIndex should be the index of the element we want to
// dephase in the chunk of the state vector on this process, in the
// density matrix representation.
// state[thisIndex] = (1-depolLevel)*state[thisIndex] + depolLevel*(state[thisIndex]
// + pair[thisIndexInPairVector])/2
qureg.stateVec.real[thisIndex] = gamma*(qureg.stateVec.real[thisIndex] +
delta*qureg.pairStateVec.real[thisIndexInPairVector]);
qureg.stateVec.imag[thisIndex] = gamma*(qureg.stateVec.imag[thisIndex] +
delta*qureg.pairStateVec.imag[thisIndexInPairVector]);
}
}
}
/* Without nested parallelisation, only the outer most loops which call below are parallelised */
void zeroSomeAmps(Qureg qureg, long long int startInd, long long int numAmps) {
# ifdef _OPENMP
# pragma omp parallel for schedule (static)
# endif
for (long long int i=startInd; i < startInd+numAmps; i++) {
qureg.stateVec.real[i] = 0;
qureg.stateVec.imag[i] = 0;
}
}
void normaliseSomeAmps(Qureg qureg, qreal norm, long long int startInd, long long int numAmps) {
# ifdef _OPENMP
# pragma omp parallel for schedule (static)
# endif
for (long long int i=startInd; i < startInd+numAmps; i++) {
qureg.stateVec.real[i] /= norm;
qureg.stateVec.imag[i] /= norm;
}
}
void alternateNormZeroingSomeAmpBlocks(
Qureg qureg, qreal norm, int normFirst,
long long int startAmpInd, long long int numAmps, long long int blockSize
) {
long long int numDubBlocks = numAmps / (2*blockSize);
long long int blockStartInd;
if (normFirst) {
# ifdef _OPENMP
# pragma omp parallel for schedule (static) private (blockStartInd)
# endif
for (long long int dubBlockInd=0; dubBlockInd < numDubBlocks; dubBlockInd++) {
blockStartInd = startAmpInd + dubBlockInd*2*blockSize;
normaliseSomeAmps(qureg, norm, blockStartInd, blockSize); // |0><0|
zeroSomeAmps( qureg, blockStartInd + blockSize, blockSize);
}
} else {
# ifdef _OPENMP
# pragma omp parallel for schedule (static) private (blockStartInd)
# endif
for (long long int dubBlockInd=0; dubBlockInd < numDubBlocks; dubBlockInd++) {
blockStartInd = startAmpInd + dubBlockInd*2*blockSize;
zeroSomeAmps( qureg, blockStartInd, blockSize);
normaliseSomeAmps(qureg, norm, blockStartInd + blockSize, blockSize); // |1><1|
}
}
}
/** Renorms (/prob) every | * outcome * >< * outcome * | state, setting all others to zero */
void densmatr_collapseToKnownProbOutcome(Qureg qureg, const int measureQubit, int outcome, qreal totalStateProb) {
// only (global) indices (as bit sequence): '* outcome *(n+q) outcome *q are spared
// where n = measureQubit, q = qureg.numQubitsRepresented.
// We can thus step in blocks of 2^q+n, killing every second, and inside the others,
// stepping in sub-blocks of 2^q, killing every second.
// When outcome=1, we offset the start of these blocks by their size.
long long int innerBlockSize = (1LL << measureQubit);
long long int outerBlockSize = (1LL << (measureQubit + qureg.numQubitsRepresented));
// Because there are 2^a number of nodes(/chunks), each node will contain 2^b number of blocks,
// or each block will span 2^c number of nodes. Similarly for the innerblocks.
long long int locNumAmps = qureg.numAmpsPerChunk;
long long int globalStartInd = qureg.chunkId * locNumAmps;
int innerBit = extractBit(measureQubit, globalStartInd);
int outerBit = extractBit(measureQubit + qureg.numQubitsRepresented, globalStartInd);
// If this chunk's amps are entirely inside an outer block
if (locNumAmps <= outerBlockSize) {
// if this is an undesired outer block, kill all elems
if (outerBit != outcome)
return zeroSomeAmps(qureg, 0, qureg.numAmpsPerChunk);
// othwerwise, if this is a desired outer block, and also entirely an inner block
if (locNumAmps <= innerBlockSize) {
// and that inner block is undesired, kill all elems
if (innerBit != outcome)
return zeroSomeAmps(qureg, 0, qureg.numAmpsPerChunk);
// otherwise normalise all elems
else
return normaliseSomeAmps(qureg, totalStateProb, 0, qureg.numAmpsPerChunk);
}
// otherwise this is a desired outer block which contains 2^a inner blocks; kill/renorm every second inner block
return alternateNormZeroingSomeAmpBlocks(
qureg, totalStateProb, innerBit==outcome, 0, qureg.numAmpsPerChunk, innerBlockSize);
}
// Otherwise, this chunk's amps contain multiple outer blocks (and hence multiple inner blocks)
long long int numOuterDoubleBlocks = locNumAmps / (2*outerBlockSize);
long long int firstBlockInd;
// alternate norming* and zeroing the outer blocks (with order based on the desired outcome)
// These loops aren't parallelised, since they could have 1 or 2 iterations and will prevent
// inner parallelisation
if (outerBit == outcome) {
for (long long int outerDubBlockInd = 0; outerDubBlockInd < numOuterDoubleBlocks; outerDubBlockInd++) {
firstBlockInd = outerDubBlockInd*2*outerBlockSize;
// *norm only the desired inner blocks in the desired outer block
alternateNormZeroingSomeAmpBlocks(
qureg, totalStateProb, innerBit==outcome,
firstBlockInd, outerBlockSize, innerBlockSize);
// zero the undesired outer block
zeroSomeAmps(qureg, firstBlockInd + outerBlockSize, outerBlockSize);
}
} else {
for (long long int outerDubBlockInd = 0; outerDubBlockInd < numOuterDoubleBlocks; outerDubBlockInd++) {
firstBlockInd = outerDubBlockInd*2*outerBlockSize;
// same thing but undesired outer blocks come first
zeroSomeAmps(qureg, firstBlockInd, outerBlockSize);
alternateNormZeroingSomeAmpBlocks(
qureg, totalStateProb, innerBit==outcome,
firstBlockInd + outerBlockSize, outerBlockSize, innerBlockSize);
}
}
}
qreal densmatr_calcPurityLocal(Qureg qureg) {
/* sum of qureg^2, which is sum_i |qureg[i]|^2 */
long long int index;
long long int numAmps = qureg.numAmpsPerChunk;
qreal trace = 0;
qreal *vecRe = qureg.stateVec.real;
qreal *vecIm = qureg.stateVec.imag;
# ifdef _OPENMP
# pragma omp parallel \
shared (vecRe, vecIm, numAmps) \
private (index) \
reduction ( +:trace )
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (index=0LL; index<numAmps; index++) {
trace += vecRe[index]*vecRe[index] + vecIm[index]*vecIm[index];
}
}
return trace;
}
void densmatr_addDensityMatrix(Qureg combineQureg, qreal otherProb, Qureg otherQureg) {
/* corresponding amplitudes live on the same node (same dimensions) */
// unpack vars for OpenMP
qreal* combineVecRe = combineQureg.stateVec.real;
qreal* combineVecIm = combineQureg.stateVec.imag;
qreal* otherVecRe = otherQureg.stateVec.real;
qreal* otherVecIm = otherQureg.stateVec.imag;
long long int numAmps = combineQureg.numAmpsPerChunk;
long long int index;
# ifdef _OPENMP
# pragma omp parallel \
shared (combineVecRe,combineVecIm,otherVecRe,otherVecIm, otherProb, numAmps) \
private (index)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (index=0; index < numAmps; index++) {
combineVecRe[index] *= 1-otherProb;
combineVecIm[index] *= 1-otherProb;
combineVecRe[index] += otherProb * otherVecRe[index];
combineVecIm[index] += otherProb * otherVecIm[index];
}
}
}
/** computes a few dens-columns-worth of (vec^*T) dens * vec */
qreal densmatr_calcFidelityLocal(Qureg qureg, Qureg pureState) {
/* Here, elements of pureState are not accessed (instead grabbed from qureg.pair).
* We only consult the attributes.
*
* qureg is a density matrix, and pureState is a statevector.
* Every node contains as many columns of qureg as amps by pureState.
* Ergo, this node contains columns:
* qureg.chunkID * pureState.numAmpsPerChunk to
* (qureg.chunkID + 1) * pureState.numAmpsPerChunk
*
* The first pureState.numAmpsTotal elements of qureg.pairStateVec are the
* full pure state-vector
*/
// unpack everything for OPENMP
qreal* vecRe = qureg.pairStateVec.real;
qreal* vecIm = qureg.pairStateVec.imag;
qreal* densRe = qureg.stateVec.real;
qreal* densIm = qureg.stateVec.imag;
int row, col;
int dim = pureState.numAmpsTotal;
int colsPerNode = pureState.numAmpsPerChunk;
qreal densElemRe, densElemIm;
qreal prefacRe, prefacIm;
qreal rowSumRe, rowSumIm;
qreal vecElemRe, vecElemIm;
// starting GLOBAL column index of the qureg columns on this node
int startCol = qureg.chunkId * pureState.numAmpsPerChunk;
// quantity computed by this node
qreal globalSumRe = 0; // imag-component is assumed zero
# ifdef _OPENMP
# pragma omp parallel \
shared (vecRe,vecIm,densRe,densIm, dim,colsPerNode,startCol) \
private (row,col, prefacRe,prefacIm, rowSumRe,rowSumIm, densElemRe,densElemIm, vecElemRe,vecElemIm) \
reduction ( +:globalSumRe )
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
// indices of my GLOBAL row
for (row=0; row < dim; row++) {
// single element of conj(pureState)
prefacRe = vecRe[row];
prefacIm = - vecIm[row];
rowSumRe = 0;
rowSumIm = 0;
// indices of my LOCAL column
for (col=0; col < colsPerNode; col++) {
// my local density element
densElemRe = densRe[row + dim*col];
densElemIm = densIm[row + dim*col];
// state-vector element
vecElemRe = vecRe[startCol + col];
vecElemIm = vecIm[startCol + col];
rowSumRe += densElemRe*vecElemRe - densElemIm*vecElemIm;
rowSumIm += densElemRe*vecElemIm + densElemIm*vecElemRe;
}
globalSumRe += rowSumRe*prefacRe - rowSumIm*prefacIm;
}
}
return globalSumRe;
}
Complex statevec_calcInnerProductLocal(Qureg bra, Qureg ket) {
qreal innerProdReal = 0;
qreal innerProdImag = 0;
long long int index;
long long int numAmps = bra.numAmpsPerChunk;
qreal *braVecReal = bra.stateVec.real;
qreal *braVecImag = bra.stateVec.imag;
qreal *ketVecReal = ket.stateVec.real;
qreal *ketVecImag = ket.stateVec.imag;
qreal braRe, braIm, ketRe, ketIm;
# ifdef _OPENMP
# pragma omp parallel \
shared (braVecReal, braVecImag, ketVecReal, ketVecImag, numAmps) \
private (index, braRe, braIm, ketRe, ketIm) \
reduction ( +:innerProdReal, innerProdImag )
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (index=0; index < numAmps; index++) {
braRe = braVecReal[index];
braIm = braVecImag[index];
ketRe = ketVecReal[index];
ketIm = ketVecImag[index];
// conj(bra_i) * ket_i
innerProdReal += braRe*ketRe + braIm*ketIm;
innerProdImag += braRe*ketIm - braIm*ketRe;
}
}
Complex innerProd;
innerProd.real = innerProdReal;
innerProd.imag = innerProdImag;
return innerProd;
}
void densmatr_initClassicalState (Qureg qureg, long long int stateInd)
{
// dimension of the state vector
long long int densityNumElems = qureg.numAmpsPerChunk;
// Can't use qureg->stateVec as a private OMP var
qreal *densityReal = qureg.stateVec.real;
qreal *densityImag = qureg.stateVec.imag;
// initialise the state to all zeros
long long int index;
# ifdef _OPENMP
# pragma omp parallel \
shared (densityNumElems, densityReal, densityImag) \
private (index)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (index=0; index<densityNumElems; index++) {
densityReal[index] = 0.0;
densityImag[index] = 0.0;
}
}
// index of the single density matrix elem to set non-zero
long long int densityDim = 1LL << qureg.numQubitsRepresented;
long long int densityInd = (densityDim + 1)*stateInd;
// give the specified classical state prob 1
if (qureg.chunkId == densityInd / densityNumElems){
densityReal[densityInd % densityNumElems] = 1.0;
densityImag[densityInd % densityNumElems] = 0.0;
}
}
void densmatr_initPlusState (Qureg qureg)
{
// |+><+| = sum_i 1/sqrt(2^N) |i> 1/sqrt(2^N) <j| = sum_ij 1/2^N |i><j|
long long int dim = (1LL << qureg.numQubitsRepresented);
qreal probFactor = 1.0/((qreal) dim);
// Can't use qureg->stateVec as a private OMP var
qreal *densityReal = qureg.stateVec.real;
qreal *densityImag = qureg.stateVec.imag;
long long int index;
long long int chunkSize = qureg.numAmpsPerChunk;
// initialise the state to |+++..+++> = 1/normFactor {1, 1, 1, ...}
# ifdef _OPENMP
# pragma omp parallel \
shared (chunkSize, densityReal, densityImag, probFactor) \
private (index)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (index=0; index<chunkSize; index++) {
densityReal[index] = probFactor;
densityImag[index] = 0.0;
}
}
}
void densmatr_initPureStateLocal(Qureg targetQureg, Qureg copyQureg) {
/* copyQureg amps aren't explicitly used - they're accessed through targetQureg.pair,
* which contains the full pure statevector.
* targetQureg has as many columns on node as copyQureg has amps
*/
long long int colOffset = targetQureg.chunkId * copyQureg.numAmpsPerChunk;
long long int colsPerNode = copyQureg.numAmpsPerChunk;
long long int rowsPerNode = copyQureg.numAmpsTotal;
// unpack vars for OpenMP
qreal* vecRe = targetQureg.pairStateVec.real;
qreal* vecIm = targetQureg.pairStateVec.imag;
qreal* densRe = targetQureg.stateVec.real;
qreal* densIm = targetQureg.stateVec.imag;
long long int col, row, index;
// a_i conj(a_j) |i><j|
qreal ketRe, ketIm, braRe, braIm;
# ifdef _OPENMP
# pragma omp parallel \
shared (colOffset, colsPerNode,rowsPerNode, vecRe,vecIm,densRe,densIm) \
private (col,row, ketRe,ketIm,braRe,braIm, index)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
// local column
for (col=0; col < colsPerNode; col++) {
// global row
for (row=0; row < rowsPerNode; row++) {
// get pure state amps
ketRe = vecRe[row];
ketIm = vecIm[row];
braRe = vecRe[col + colOffset];
braIm = vecIm[col + colOffset];
// update density matrix
index = row + col*rowsPerNode; // local ind
densRe[index] = ketRe*braRe - ketIm*braIm;
densIm[index] = ketRe*braIm - ketIm*braRe;
}
}
}
}
void statevec_setAmps(Qureg qureg, long long int startInd, qreal* reals, qreal* imags, long long int numAmps) {
/* this is actually distributed, since the user's code runs on every node */
// local start/end indices of the given amplitudes, assuming they fit in this chunk
// these may be negative or above qureg.numAmpsPerChunk
long long int localStartInd = startInd - qureg.chunkId*qureg.numAmpsPerChunk;
long long int localEndInd = localStartInd + numAmps; // exclusive
// add this to a local index to get corresponding elem in reals & imags
long long int offset = qureg.chunkId*qureg.numAmpsPerChunk - startInd;
// restrict these indices to fit into this chunk
if (localStartInd < 0)
localStartInd = 0;
if (localEndInd > qureg.numAmpsPerChunk)
localEndInd = qureg.numAmpsPerChunk;
// they may now be out of order = no iterations
// unpacking OpenMP vars
long long int index;
qreal* vecRe = qureg.stateVec.real;
qreal* vecIm = qureg.stateVec.imag;
# ifdef _OPENMP
# pragma omp parallel \
shared (localStartInd,localEndInd, vecRe,vecIm, reals,imags, offset) \
private (index)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
// iterate these local inds - this might involve no iterations
for (index=localStartInd; index < localEndInd; index++) {
vecRe[index] = reals[index + offset];
vecIm[index] = imags[index + offset];
}
}
}
void statevec_createQureg(Qureg *qureg, int numQubits, QuESTEnv env)
{
long long int numAmps = 1L << numQubits;
long long int numAmpsPerRank = numAmps/env.numRanks;
qureg->stateVec.real = malloc(numAmpsPerRank * sizeof(*(qureg->stateVec.real)));
qureg->stateVec.imag = malloc(numAmpsPerRank * sizeof(*(qureg->stateVec.imag)));
if (env.numRanks>1){
qureg->pairStateVec.real = malloc(numAmpsPerRank * sizeof(*(qureg->pairStateVec.real)));
qureg->pairStateVec.imag = malloc(numAmpsPerRank * sizeof(*(qureg->pairStateVec.imag)));
}
if ( (!(qureg->stateVec.real) || !(qureg->stateVec.imag))
&& numAmpsPerRank ) {
printf("Could not allocate memory!");
exit (EXIT_FAILURE);
}
if ( env.numRanks>1 && (!(qureg->pairStateVec.real) || !(qureg->pairStateVec.imag))
&& numAmpsPerRank ) {
printf("Could not allocate memory!");
exit (EXIT_FAILURE);
}
qureg->numQubitsInStateVec = numQubits;
qureg->numAmpsTotal = numAmps;
qureg->numAmpsPerChunk = numAmpsPerRank;
qureg->chunkId = env.rank;
qureg->numChunks = env.numRanks;
qureg->isDensityMatrix = 0;
}
void statevec_destroyQureg(Qureg qureg, QuESTEnv env){
qureg.numQubitsInStateVec = 0;
qureg.numAmpsTotal = 0;
qureg.numAmpsPerChunk = 0;
free(qureg.stateVec.real);
free(qureg.stateVec.imag);
if (env.numRanks>1){
free(qureg.pairStateVec.real);
free(qureg.pairStateVec.imag);
}
qureg.stateVec.real = NULL;
qureg.stateVec.imag = NULL;
qureg.pairStateVec.real = NULL;
qureg.pairStateVec.imag = NULL;
}
void statevec_reportStateToScreen(Qureg qureg, QuESTEnv env, int reportRank){
long long int index;
int rank;
if (qureg.numQubitsInStateVec<=5){
for (rank=0; rank<qureg.numChunks; rank++){
if (qureg.chunkId==rank){
if (reportRank) {
printf("Reporting state from rank %d [\n", qureg.chunkId);
printf("real, imag\n");
} else if (rank==0) {
printf("Reporting state [\n");
printf("real, imag\n");
}
for(index=0; index<qureg.numAmpsPerChunk; index++){
//printf(REAL_STRING_FORMAT ", " REAL_STRING_FORMAT "\n", qureg.pairStateVec.real[index], qureg.pairStateVec.imag[index]);
printf(REAL_STRING_FORMAT ", " REAL_STRING_FORMAT "\n", qureg.stateVec.real[index], qureg.stateVec.imag[index]);
}
if (reportRank || rank==qureg.numChunks-1) printf("]\n");
}
syncQuESTEnv(env);
}
} else printf("Error: reportStateToScreen will not print output for systems of more than 5 qubits.\n");
}
void statevec_getEnvironmentString(QuESTEnv env, Qureg qureg, char str[200]){
int numThreads=1;
# ifdef _OPENMP
numThreads=omp_get_max_threads();
# endif
sprintf(str, "%dqubits_CPU_%dranksx%dthreads", qureg.numQubitsInStateVec, env.numRanks, numThreads);
}
void statevec_initZeroState (Qureg qureg)
{
long long int stateVecSize;
long long int index;
// dimension of the state vector
stateVecSize = qureg.numAmpsPerChunk;
// Can't use qureg->stateVec as a private OMP var
qreal *stateVecReal = qureg.stateVec.real;
qreal *stateVecImag = qureg.stateVec.imag;
// initialise the state-vector to all-zeroes
# ifdef _OPENMP
# pragma omp parallel \
shared (stateVecSize, stateVecReal, stateVecImag) \
private (index)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (index=0; index<stateVecSize; index++) {
stateVecReal[index] = 0.0;
stateVecImag[index] = 0.0;
}
}
if (qureg.chunkId==0){
// zero state |0000..0000> has probability 1
stateVecReal[0] = 1.0;
stateVecImag[0] = 0.0;
}
}
void statevec_initPlusState (Qureg qureg)
{
long long int chunkSize, stateVecSize;
long long int index;
// dimension of the state vector
chunkSize = qureg.numAmpsPerChunk;
stateVecSize = chunkSize*qureg.numChunks;
qreal normFactor = 1.0/sqrt((qreal)stateVecSize);
// Can't use qureg->stateVec as a private OMP var
qreal *stateVecReal = qureg.stateVec.real;
qreal *stateVecImag = qureg.stateVec.imag;
// initialise the state to |+++..+++> = 1/normFactor {1, 1, 1, ...}
# ifdef _OPENMP
# pragma omp parallel \
shared (chunkSize, stateVecReal, stateVecImag, normFactor) \
private (index)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (index=0; index<chunkSize; index++) {
stateVecReal[index] = normFactor;
stateVecImag[index] = 0.0;
}
}
}
void statevec_initClassicalState (Qureg qureg, long long int stateInd)
{
long long int stateVecSize;
long long int index;
// dimension of the state vector
stateVecSize = qureg.numAmpsPerChunk;
// Can't use qureg->stateVec as a private OMP var
qreal *stateVecReal = qureg.stateVec.real;
qreal *stateVecImag = qureg.stateVec.imag;
// initialise the state to vector to all zeros
# ifdef _OPENMP
# pragma omp parallel \
shared (stateVecSize, stateVecReal, stateVecImag) \
private (index)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (index=0; index<stateVecSize; index++) {
stateVecReal[index] = 0.0;
stateVecImag[index] = 0.0;
}
}
// give the specified classical state prob 1
if (qureg.chunkId == stateInd/stateVecSize){
stateVecReal[stateInd % stateVecSize] = 1.0;
stateVecImag[stateInd % stateVecSize] = 0.0;
}
}
void statevec_cloneQureg(Qureg targetQureg, Qureg copyQureg) {
// registers are equal sized, so nodes hold the same state-vector partitions
long long int stateVecSize;
long long int index;
// dimension of the state vector
stateVecSize = targetQureg.numAmpsPerChunk;
// Can't use qureg->stateVec as a private OMP var
qreal *targetStateVecReal = targetQureg.stateVec.real;
qreal *targetStateVecImag = targetQureg.stateVec.imag;
qreal *copyStateVecReal = copyQureg.stateVec.real;
qreal *copyStateVecImag = copyQureg.stateVec.imag;
// initialise the state to |0000..0000>
# ifdef _OPENMP
# pragma omp parallel \
shared (stateVecSize, targetStateVecReal, targetStateVecImag, copyStateVecReal, copyStateVecImag) \
private (index)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (index=0; index<stateVecSize; index++) {
targetStateVecReal[index] = copyStateVecReal[index];
targetStateVecImag[index] = copyStateVecImag[index];
}
}
}
/**
* Initialise the state vector of probability amplitudes such that one qubit is set to 'outcome' and all other qubits are in an equal superposition of zero and one.
* @param[in,out] qureg object representing the set of qubits to be initialised
* @param[in] qubitId id of qubit to set to state 'outcome'
* @param[in] value of qubit 'qubitId'
*/
void statevec_initStateOfSingleQubit(Qureg *qureg, int qubitId, int outcome)
{
long long int chunkSize, stateVecSize;
long long int index;
int bit;
const long long int chunkId=qureg->chunkId;
// dimension of the state vector
chunkSize = qureg->numAmpsPerChunk;
stateVecSize = chunkSize*qureg->numChunks;
qreal normFactor = 1.0/sqrt((qreal)stateVecSize/2.0);
// Can't use qureg->stateVec as a private OMP var
qreal *stateVecReal = qureg->stateVec.real;
qreal *stateVecImag = qureg->stateVec.imag;
// initialise the state to |0000..0000>
# ifdef _OPENMP
# pragma omp parallel \
shared (chunkSize, stateVecReal, stateVecImag, normFactor, qubitId, outcome) \
private (index, bit)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (index=0; index<chunkSize; index++) {
bit = extractBit(qubitId, index+chunkId*chunkSize);
if (bit==outcome) {
stateVecReal[index] = normFactor;
stateVecImag[index] = 0.0;
} else {
stateVecReal[index] = 0.0;
stateVecImag[index] = 0.0;
}
}
}
}
/**
* Initialise the state vector of probability amplitudes to an (unphysical) state with
* each component of each probability amplitude a unique floating point value. For debugging processes
* @param[in,out] qureg object representing the set of qubits to be initialised
*/
void statevec_initStateDebug (Qureg qureg)
{
long long int chunkSize;
long long int index;
long long int indexOffset;
// dimension of the state vector
chunkSize = qureg.numAmpsPerChunk;
// Can't use qureg->stateVec as a private OMP var
qreal *stateVecReal = qureg.stateVec.real;
qreal *stateVecImag = qureg.stateVec.imag;
indexOffset = chunkSize * qureg.chunkId;
// initialise the state to |0000..0000>
# ifdef _OPENMP
# pragma omp parallel \
shared (chunkSize, stateVecReal, stateVecImag, indexOffset) \
private (index)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (index=0; index<chunkSize; index++) {
stateVecReal[index] = ((indexOffset + index)*2.0)/10.0;
stateVecImag[index] = ((indexOffset + index)*2.0+1.0)/10.0;
}
}
}
// returns 1 if successful, else 0
int statevec_initStateFromSingleFile(Qureg *qureg, char filename[200], QuESTEnv env){
long long int chunkSize, stateVecSize;
long long int indexInChunk, totalIndex;
chunkSize = qureg->numAmpsPerChunk;
stateVecSize = chunkSize*qureg->numChunks;
qreal *stateVecReal = qureg->stateVec.real;
qreal *stateVecImag = qureg->stateVec.imag;
FILE *fp;
char line[200];
for (int rank=0; rank<(qureg->numChunks); rank++){
if (rank==qureg->chunkId){
fp = fopen(filename, "r");
// indicate file open failure
if (fp == NULL)
return 0;
indexInChunk = 0; totalIndex = 0;
while (fgets(line, sizeof(char)*200, fp) != NULL && totalIndex<stateVecSize){
if (line[0]!='#'){
int chunkId = totalIndex/chunkSize;
if (chunkId==qureg->chunkId){
# if QuEST_PREC==1
sscanf(line, "%f, %f", &(stateVecReal[indexInChunk]),
&(stateVecImag[indexInChunk]));
# elif QuEST_PREC==2
sscanf(line, "%lf, %lf", &(stateVecReal[indexInChunk]),
&(stateVecImag[indexInChunk]));
# elif QuEST_PREC==4
sscanf(line, "%Lf, %Lf", &(stateVecReal[indexInChunk]),
&(stateVecImag[indexInChunk]));
# endif
indexInChunk += 1;
}
totalIndex += 1;
}
}
fclose(fp);
}
syncQuESTEnv(env);
}
// indicate success
return 1;
}
int statevec_compareStates(Qureg mq1, Qureg mq2, qreal precision){
qreal diff;
int chunkSize = mq1.numAmpsPerChunk;
for (int i=0; i<chunkSize; i++){
diff = absReal(mq1.stateVec.real[i] - mq2.stateVec.real[i]);
if (diff>precision) return 0;
diff = absReal(mq1.stateVec.imag[i] - mq2.stateVec.imag[i]);
if (diff>precision) return 0;
}
return 1;
}
void statevec_compactUnitaryLocal (Qureg qureg, const int targetQubit, Complex alpha, Complex beta)
{
long long int sizeBlock, sizeHalfBlock;
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
qreal stateRealUp,stateRealLo,stateImagUp,stateImagLo;
long long int thisTask;
const long long int numTasks=qureg.numAmpsPerChunk>>1;
// set dimensions
sizeHalfBlock = 1LL << targetQubit;
sizeBlock = 2LL * sizeHalfBlock;
// Can't use qureg.stateVec as a private OMP var
qreal *stateVecReal = qureg.stateVec.real;
qreal *stateVecImag = qureg.stateVec.imag;
qreal alphaImag=alpha.imag, alphaReal=alpha.real;
qreal betaImag=beta.imag, betaReal=beta.real;
# ifdef _OPENMP
# pragma omp parallel \
shared (sizeBlock,sizeHalfBlock, stateVecReal,stateVecImag, alphaReal,alphaImag, betaReal,betaImag) \
private (thisTask,thisBlock ,indexUp,indexLo, stateRealUp,stateImagUp,stateRealLo,stateImagLo)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (thisTask=0; thisTask<numTasks; thisTask++) {
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
// state[indexUp] = alpha * state[indexUp] - conj(beta) * state[indexLo]
stateVecReal[indexUp] = alphaReal*stateRealUp - alphaImag*stateImagUp
- betaReal*stateRealLo - betaImag*stateImagLo;
stateVecImag[indexUp] = alphaReal*stateImagUp + alphaImag*stateRealUp
- betaReal*stateImagLo + betaImag*stateRealLo;
// state[indexLo] = beta * state[indexUp] + conj(alpha) * state[indexLo]
stateVecReal[indexLo] = betaReal*stateRealUp - betaImag*stateImagUp
+ alphaReal*stateRealLo + alphaImag*stateImagLo;
stateVecImag[indexLo] = betaReal*stateImagUp + betaImag*stateRealUp
+ alphaReal*stateImagLo - alphaImag*stateRealLo;
}
}
}
void statevec_unitaryLocal(Qureg qureg, const int targetQubit, ComplexMatrix2 u)
{
long long int sizeBlock, sizeHalfBlock;
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
qreal stateRealUp,stateRealLo,stateImagUp,stateImagLo;
long long int thisTask;
const long long int numTasks=qureg.numAmpsPerChunk>>1;
// set dimensions
sizeHalfBlock = 1LL << targetQubit;
sizeBlock = 2LL * sizeHalfBlock;
// Can't use qureg.stateVec as a private OMP var
qreal *stateVecReal = qureg.stateVec.real;
qreal *stateVecImag = qureg.stateVec.imag;
# ifdef _OPENMP
# pragma omp parallel \
shared (sizeBlock,sizeHalfBlock, stateVecReal,stateVecImag, u) \
private (thisTask,thisBlock ,indexUp,indexLo, stateRealUp,stateImagUp,stateRealLo,stateImagLo)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (thisTask=0; thisTask<numTasks; thisTask++) {
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
// state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo]
stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp
+ u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo;
stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp
+ u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo;
// state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo]
stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp
+ u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo;
stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp
+ u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo;
}
}
}
/** Rotate a single qubit in the state vector of probability amplitudes,
* given two complex numbers alpha and beta,
* and a subset of the state vector with upper and lower block values stored seperately.
*
* @param[in,out] qureg object representing the set of qubits
* @param[in] targetQubit qubit to rotate
* @param[in] rot1 rotation angle
* @param[in] rot2 rotation angle
* @param[in] stateVecUp probability amplitudes in upper half of a block
* @param[in] stateVecLo probability amplitudes in lower half of a block
* @param[out] stateVecOut array section to update (will correspond to either the lower or upper half of a block)
*/
void statevec_compactUnitaryDistributed (Qureg qureg, const int targetQubit,
Complex rot1, Complex rot2,
ComplexArray stateVecUp,
ComplexArray stateVecLo,
ComplexArray stateVecOut)
{
qreal stateRealUp,stateRealLo,stateImagUp,stateImagLo;
long long int thisTask;
const long long int numTasks=qureg.numAmpsPerChunk;
qreal rot1Real=rot1.real, rot1Imag=rot1.imag;
qreal rot2Real=rot2.real, rot2Imag=rot2.imag;
qreal *stateVecRealUp=stateVecUp.real, *stateVecImagUp=stateVecUp.imag;
qreal *stateVecRealLo=stateVecLo.real, *stateVecImagLo=stateVecLo.imag;
qreal *stateVecRealOut=stateVecOut.real, *stateVecImagOut=stateVecOut.imag;
# ifdef _OPENMP
# pragma omp parallel \
shared (stateVecRealUp,stateVecImagUp,stateVecRealLo,stateVecImagLo,stateVecRealOut,stateVecImagOut, \
rot1Real,rot1Imag, rot2Real,rot2Imag) \
private (thisTask,stateRealUp,stateImagUp,stateRealLo,stateImagLo)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (thisTask=0; thisTask<numTasks; thisTask++) {
// store current state vector values in temp variables
stateRealUp = stateVecRealUp[thisTask];
stateImagUp = stateVecImagUp[thisTask];
stateRealLo = stateVecRealLo[thisTask];
stateImagLo = stateVecImagLo[thisTask];
// state[indexUp] = alpha * state[indexUp] - conj(beta) * state[indexLo]
stateVecRealOut[thisTask] = rot1Real*stateRealUp - rot1Imag*stateImagUp + rot2Real*stateRealLo + rot2Imag*stateImagLo;
stateVecImagOut[thisTask] = rot1Real*stateImagUp + rot1Imag*stateRealUp + rot2Real*stateImagLo - rot2Imag*stateRealLo;
}
}
}
/** Apply a unitary operation to a single qubit
* given a subset of the state vector with upper and lower block values
* stored seperately.
*
* @remarks Qubits are zero-based and the first qubit is the rightmost
*
* @param[in,out] qureg object representing the set of qubits
* @param[in] targetQubit qubit to rotate
* @param[in] u unitary matrix to apply
* @param[in] stateVecUp probability amplitudes in upper half of a block
* @param[in] stateVecLo probability amplitudes in lower half of a block
* @param[out] stateVecOut array section to update (will correspond to either the lower or upper half of a block)
*/
void statevec_unitaryDistributed (Qureg qureg, const int targetQubit,
Complex rot1, Complex rot2,
ComplexArray stateVecUp,
ComplexArray stateVecLo,
ComplexArray stateVecOut)
{
qreal stateRealUp,stateRealLo,stateImagUp,stateImagLo;
long long int thisTask;
const long long int numTasks=qureg.numAmpsPerChunk;
qreal rot1Real=rot1.real, rot1Imag=rot1.imag;
qreal rot2Real=rot2.real, rot2Imag=rot2.imag;
qreal *stateVecRealUp=stateVecUp.real, *stateVecImagUp=stateVecUp.imag;
qreal *stateVecRealLo=stateVecLo.real, *stateVecImagLo=stateVecLo.imag;
qreal *stateVecRealOut=stateVecOut.real, *stateVecImagOut=stateVecOut.imag;
# ifdef _OPENMP
# pragma omp parallel \
shared (stateVecRealUp,stateVecImagUp,stateVecRealLo,stateVecImagLo,stateVecRealOut,stateVecImagOut, \
rot1Real, rot1Imag, rot2Real, rot2Imag) \
private (thisTask,stateRealUp,stateImagUp,stateRealLo,stateImagLo)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (thisTask=0; thisTask<numTasks; thisTask++) {
// store current state vector values in temp variables
stateRealUp = stateVecRealUp[thisTask];
stateImagUp = stateVecImagUp[thisTask];
stateRealLo = stateVecRealLo[thisTask];
stateImagLo = stateVecImagLo[thisTask];
stateVecRealOut[thisTask] = rot1Real*stateRealUp - rot1Imag*stateImagUp
+ rot2Real*stateRealLo - rot2Imag*stateImagLo;
stateVecImagOut[thisTask] = rot1Real*stateImagUp + rot1Imag*stateRealUp
+ rot2Real*stateImagLo + rot2Imag*stateRealLo;
}
}
}
void statevec_controlledCompactUnitaryLocal (Qureg qureg, const int controlQubit, const int targetQubit,
Complex alpha, Complex beta)
{
int pos = controlQubit;
if(pos > targetQubit) pos = targetQubit;
long long int numTasks = 1LL << (qureg.numQubitsRepresented - pos);
qreal alphaImag=alpha.imag, alphaReal=alpha.real;
qreal betaImag=beta.imag, betaReal=beta.real;
qreal *stateVecReal = qureg.stateVec.real;
qreal *stateVecImag = qureg.stateVec.imag;
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
for(long long int task = 0; task < numTasks; ++task) {
long long int p_st = task << pos,
pup_st = (task << pos | (1LL << targetQubit));
qreal stateRealUp,stateRealLo,stateImagUp,stateImagLo;
// ensure [controlQubit]=1 and [targetQubit]=0
if(!(p_st >> controlQubit & 1) || (p_st >> targetQubit & 1)) continue;
long long int numSubtasks = 1LL << pos;
#pragma omp simd
for(long long int i = 0; i < numSubtasks; ++i) {
long long int indexUp = p_st + i, indexLo = pup_st + i;
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
// state[indexUp] = alpha * state[indexUp] - conj(beta) * state[indexLo]
stateVecReal[indexUp] = alphaReal*stateRealUp - alphaImag*stateImagUp
- betaReal*stateRealLo - betaImag*stateImagLo;
stateVecImag[indexUp] = alphaReal*stateImagUp + alphaImag*stateRealUp
- betaReal*stateImagLo + betaImag*stateRealLo;
// state[indexLo] = beta * state[indexUp] + conj(alpha) * state[indexLo]
stateVecReal[indexLo] = betaReal*stateRealUp - betaImag*stateImagUp
+ alphaReal*stateRealLo + alphaImag*stateImagLo;
stateVecImag[indexLo] = betaReal*stateImagUp + betaImag*stateRealUp
+ alphaReal*stateImagLo - alphaImag*stateRealLo;
}
}
}
void statevec_controlledCompactUnitaryLocalOld (Qureg qureg, const int controlQubit, const int targetQubit,
Complex alpha, Complex beta)
{
long long int sizeBlock, sizeHalfBlock;
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
qreal stateRealUp,stateRealLo,stateImagUp,stateImagLo;
long long int thisTask;
const long long int numTasks=qureg.numAmpsPerChunk>>1;
const long long int chunkSize=qureg.numAmpsPerChunk;
const long long int chunkId=qureg.chunkId;
int controlBit;
// set dimensions
sizeHalfBlock = 1LL << targetQubit;
sizeBlock = 2LL * sizeHalfBlock;
// Can't use qureg.stateVec as a private OMP var
qreal *stateVecReal = qureg.stateVec.real;
qreal *stateVecImag = qureg.stateVec.imag;
qreal alphaImag=alpha.imag, alphaReal=alpha.real;
qreal betaImag=beta.imag, betaReal=beta.real;
# ifdef _OPENMP
# pragma omp parallel \
shared (sizeBlock,sizeHalfBlock, stateVecReal,stateVecImag, alphaReal,alphaImag, betaReal,betaImag) \
private (thisTask,thisBlock ,indexUp,indexLo, stateRealUp,stateImagUp,stateRealLo,stateImagLo,controlBit)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (thisTask=0; thisTask<numTasks; thisTask++) {
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
controlBit = extractBit (controlQubit, indexUp+chunkId*chunkSize);
if (controlBit){
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
// state[indexUp] = alpha * state[indexUp] - conj(beta) * state[indexLo]
stateVecReal[indexUp] = alphaReal*stateRealUp - alphaImag*stateImagUp
- betaReal*stateRealLo - betaImag*stateImagLo;
stateVecImag[indexUp] = alphaReal*stateImagUp + alphaImag*stateRealUp
- betaReal*stateImagLo + betaImag*stateRealLo;
// state[indexLo] = beta * state[indexUp] + conj(alpha) * state[indexLo]
stateVecReal[indexLo] = betaReal*stateRealUp - betaImag*stateImagUp
+ alphaReal*stateRealLo + alphaImag*stateImagLo;
stateVecImag[indexLo] = betaReal*stateImagUp + betaImag*stateRealUp
+ alphaReal*stateImagLo - alphaImag*stateRealLo;
}
}
}
}
void statevec_multiControlledUnitaryLocal(Qureg qureg, const int targetQubit,
long long int mask, ComplexMatrix2 u)
{
long long int sizeBlock, sizeHalfBlock;
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
qreal stateRealUp,stateRealLo,stateImagUp,stateImagLo;
long long int thisTask;
const long long int numTasks=qureg.numAmpsPerChunk>>1;
const long long int chunkSize=qureg.numAmpsPerChunk;
const long long int chunkId=qureg.chunkId;
// set dimensions
sizeHalfBlock = 1LL << targetQubit;
sizeBlock = 2LL * sizeHalfBlock;
// Can't use qureg.stateVec as a private OMP var
qreal *stateVecReal = qureg.stateVec.real;
qreal *stateVecImag = qureg.stateVec.imag;
# ifdef _OPENMP
# pragma omp parallel \
shared (sizeBlock,sizeHalfBlock, stateVecReal,stateVecImag, u, mask) \
private (thisTask,thisBlock ,indexUp,indexLo, stateRealUp,stateImagUp,stateRealLo,stateImagLo)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (thisTask=0; thisTask<numTasks; thisTask++) {
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
if (mask == (mask & (indexUp+chunkId*chunkSize)) ){
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
// state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo]
stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp
+ u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo;
stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp
+ u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo;
// state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo]
stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp
+ u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo;
stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp
+ u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo;
}
}
}
}
void statevec_controlledUnitaryLocal(Qureg qureg, const int controlQubit, const int targetQubit,
ComplexMatrix2 u)
{
long long int sizeBlock, sizeHalfBlock;
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
qreal stateRealUp,stateRealLo,stateImagUp,stateImagLo;
long long int thisTask;
const long long int numTasks=qureg.numAmpsPerChunk>>1;
const long long int chunkSize=qureg.numAmpsPerChunk;
const long long int chunkId=qureg.chunkId;
int controlBit;
// set dimensions
sizeHalfBlock = 1LL << targetQubit;
sizeBlock = 2LL * sizeHalfBlock;
// Can't use qureg.stateVec as a private OMP var
qreal *stateVecReal = qureg.stateVec.real;
qreal *stateVecImag = qureg.stateVec.imag;
# ifdef _OPENMP
# pragma omp parallel \
shared (sizeBlock,sizeHalfBlock, stateVecReal,stateVecImag, u) \
private (thisTask,thisBlock ,indexUp,indexLo, stateRealUp,stateImagUp,stateRealLo,stateImagLo,controlBit)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (thisTask=0; thisTask<numTasks; thisTask++) {
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
controlBit = extractBit (controlQubit, indexUp+chunkId*chunkSize);
if (controlBit){
// store current state vector values in temp variables
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
// state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo]
stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp
+ u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo;
stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp
+ u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo;
// state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo]
stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp
+ u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo;
stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp
+ u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo;
}
}
}
}
/** Rotate a single qubit in the state vector of probability amplitudes, given two complex
* numbers alpha and beta and a subset of the state vector with upper and lower block values
* stored seperately. Only perform the rotation where the control qubit is one.
*
* @param[in,out] qureg object representing the set of qubits
* @param[in] targetQubit qubit to rotate
* @param[in] controlQubit qubit to determine whether or not to perform a rotation
* @param[in] rot1 rotation angle
* @param[in] rot2 rotation angle
* @param[in] stateVecUp probability amplitudes in upper half of a block
* @param[in] stateVecLo probability amplitudes in lower half of a block
* @param[out] stateVecOut array section to update (will correspond to either the lower or upper half of a block)
*/
void statevec_controlledCompactUnitaryDistributed (Qureg qureg, const int controlQubit, const int targetQubit,
Complex rot1, Complex rot2,
ComplexArray stateVecUp,
ComplexArray stateVecLo,
ComplexArray stateVecOut)
{
qreal stateRealUp,stateRealLo,stateImagUp,stateImagLo;
long long int thisTask;
const long long int numTasks=qureg.numAmpsPerChunk;
const long long int chunkSize=qureg.numAmpsPerChunk;
const long long int chunkId=qureg.chunkId;
int controlBit;
qreal rot1Real=rot1.real, rot1Imag=rot1.imag;
qreal rot2Real=rot2.real, rot2Imag=rot2.imag;
qreal *stateVecRealUp=stateVecUp.real, *stateVecImagUp=stateVecUp.imag;
qreal *stateVecRealLo=stateVecLo.real, *stateVecImagLo=stateVecLo.imag;
qreal *stateVecRealOut=stateVecOut.real, *stateVecImagOut=stateVecOut.imag;
# ifdef _OPENMP
# pragma omp parallel \
shared (stateVecRealUp,stateVecImagUp,stateVecRealLo,stateVecImagLo,stateVecRealOut,stateVecImagOut, \
rot1Real,rot1Imag, rot2Real,rot2Imag) \
private (thisTask,stateRealUp,stateImagUp,stateRealLo,stateImagLo,controlBit)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (thisTask=0; thisTask<numTasks; thisTask++) {
controlBit = extractBit (controlQubit, thisTask+chunkId*chunkSize);
if (controlBit){
// store current state vector values in temp variables
stateRealUp = stateVecRealUp[thisTask];
stateImagUp = stateVecImagUp[thisTask];
stateRealLo = stateVecRealLo[thisTask];
stateImagLo = stateVecImagLo[thisTask];
// state[indexUp] = alpha * state[indexUp] - conj(beta) * state[indexLo]
stateVecRealOut[thisTask] = rot1Real*stateRealUp - rot1Imag*stateImagUp + rot2Real*stateRealLo + rot2Imag*stateImagLo;
stateVecImagOut[thisTask] = rot1Real*stateImagUp + rot1Imag*stateRealUp + rot2Real*stateImagLo - rot2Imag*stateRealLo;
}
}
}
}
/** Rotate a single qubit in the state vector of probability amplitudes, given two complex
* numbers alpha and beta and a subset of the state vector with upper and lower block values
* stored seperately. Only perform the rotation where the control qubit is one.
*
* @param[in,out] qureg object representing the set of qubits
* @param[in] targetQubit qubit to rotate
* @param[in] controlQubit qubit to determine whether or not to perform a rotation
* @param[in] rot1 rotation angle
* @param[in] rot2 rotation angle
* @param[in] stateVecUp probability amplitudes in upper half of a block
* @param[in] stateVecLo probability amplitudes in lower half of a block
* @param[out] stateVecOut array section to update (will correspond to either the lower or upper half of a block)
*/
void statevec_controlledUnitaryDistributed (Qureg qureg, const int controlQubit, const int targetQubit,
Complex rot1, Complex rot2,
ComplexArray stateVecUp,
ComplexArray stateVecLo,
ComplexArray stateVecOut)
{
qreal stateRealUp,stateRealLo,stateImagUp,stateImagLo;
long long int thisTask;
const long long int numTasks=qureg.numAmpsPerChunk;
const long long int chunkSize=qureg.numAmpsPerChunk;
const long long int chunkId=qureg.chunkId;
int controlBit;
qreal rot1Real=rot1.real, rot1Imag=rot1.imag;
qreal rot2Real=rot2.real, rot2Imag=rot2.imag;
qreal *stateVecRealUp=stateVecUp.real, *stateVecImagUp=stateVecUp.imag;
qreal *stateVecRealLo=stateVecLo.real, *stateVecImagLo=stateVecLo.imag;
qreal *stateVecRealOut=stateVecOut.real, *stateVecImagOut=stateVecOut.imag;
# ifdef _OPENMP
# pragma omp parallel \
shared (stateVecRealUp,stateVecImagUp,stateVecRealLo,stateVecImagLo,stateVecRealOut,stateVecImagOut, \
rot1Real,rot1Imag, rot2Real,rot2Imag) \
private (thisTask,stateRealUp,stateImagUp,stateRealLo,stateImagLo,controlBit)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (thisTask=0; thisTask<numTasks; thisTask++) {
controlBit = extractBit (controlQubit, thisTask+chunkId*chunkSize);
if (controlBit){
// store current state vector values in temp variables
stateRealUp = stateVecRealUp[thisTask];
stateImagUp = stateVecImagUp[thisTask];
stateRealLo = stateVecRealLo[thisTask];
stateImagLo = stateVecImagLo[thisTask];
stateVecRealOut[thisTask] = rot1Real*stateRealUp - rot1Imag*stateImagUp
+ rot2Real*stateRealLo - rot2Imag*stateImagLo;
stateVecImagOut[thisTask] = rot1Real*stateImagUp + rot1Imag*stateRealUp
+ rot2Real*stateImagLo + rot2Imag*stateRealLo;
}
}
}
}
/** Apply a unitary operation to a single qubit in the state vector of probability amplitudes, given
* a subset of the state vector with upper and lower block values
stored seperately. Only perform the rotation where all the control qubits are 1.
*
* @param[in,out] qureg object representing the set of qubits
* @param[in] targetQubit qubit to rotate
* @param[in] controlQubit qubit to determine whether or not to perform a rotation
* @param[in] rot1 rotation angle
* @param[in] rot2 rotation angle
* @param[in] stateVecUp probability amplitudes in upper half of a block
* @param[in] stateVecLo probability amplitudes in lower half of a block
* @param[out] stateVecOut array section to update (will correspond to either the lower or upper half of a block)
*/
void statevec_multiControlledUnitaryDistributed (Qureg qureg,
const int targetQubit,
long long int mask,
Complex rot1, Complex rot2,
ComplexArray stateVecUp,
ComplexArray stateVecLo,
ComplexArray stateVecOut)
{
qreal stateRealUp,stateRealLo,stateImagUp,stateImagLo;
long long int thisTask;
const long long int numTasks=qureg.numAmpsPerChunk;
const long long int chunkSize=qureg.numAmpsPerChunk;
const long long int chunkId=qureg.chunkId;
qreal rot1Real=rot1.real, rot1Imag=rot1.imag;
qreal rot2Real=rot2.real, rot2Imag=rot2.imag;
qreal *stateVecRealUp=stateVecUp.real, *stateVecImagUp=stateVecUp.imag;
qreal *stateVecRealLo=stateVecLo.real, *stateVecImagLo=stateVecLo.imag;
qreal *stateVecRealOut=stateVecOut.real, *stateVecImagOut=stateVecOut.imag;
# ifdef _OPENMP
# pragma omp parallel \
shared (stateVecRealUp,stateVecImagUp,stateVecRealLo,stateVecImagLo,stateVecRealOut,stateVecImagOut, \
rot1Real,rot1Imag, rot2Real,rot2Imag, mask) \
private (thisTask,stateRealUp,stateImagUp,stateRealLo,stateImagLo)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (thisTask=0; thisTask<numTasks; thisTask++) {
if (mask == (mask & (thisTask+chunkId*chunkSize)) ){
// store current state vector values in temp variables
stateRealUp = stateVecRealUp[thisTask];
stateImagUp = stateVecImagUp[thisTask];
stateRealLo = stateVecRealLo[thisTask];
stateImagLo = stateVecImagLo[thisTask];
stateVecRealOut[thisTask] = rot1Real*stateRealUp - rot1Imag*stateImagUp
+ rot2Real*stateRealLo - rot2Imag*stateImagLo;
stateVecImagOut[thisTask] = rot1Real*stateImagUp + rot1Imag*stateRealUp
+ rot2Real*stateImagLo + rot2Imag*stateRealLo;
}
}
}
}
void statevec_pauliXLocal(Qureg qureg, const int targetQubit)
{
long long int sizeBlock, sizeHalfBlock;
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
qreal stateRealUp,stateImagUp;
long long int thisTask;
const long long int numTasks=qureg.numAmpsPerChunk>>1;
// set dimensions
sizeHalfBlock = 1LL << targetQubit;
sizeBlock = 2LL * sizeHalfBlock;
// Can't use qureg.stateVec as a private OMP var
qreal *stateVecReal = qureg.stateVec.real;
qreal *stateVecImag = qureg.stateVec.imag;
# ifdef _OPENMP
# pragma omp parallel \
shared (sizeBlock,sizeHalfBlock, stateVecReal,stateVecImag) \
private (thisTask,thisBlock ,indexUp,indexLo, stateRealUp,stateImagUp)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (thisTask=0; thisTask<numTasks; thisTask++) {
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateVecReal[indexUp] = stateVecReal[indexLo];
stateVecImag[indexUp] = stateVecImag[indexLo];
stateVecReal[indexLo] = stateRealUp;
stateVecImag[indexLo] = stateImagUp;
}
}
}
/** Rotate a single qubit by {{0,1},{1,0}.
* Operate on a subset of the state vector with upper and lower block values
* stored seperately. This rotation is just swapping upper and lower values, and
* stateVecIn must already be the correct section for this chunk
*
* @remarks Qubits are zero-based and the
* the first qubit is the rightmost
*
* @param[in,out] qureg object representing the set of qubits
* @param[in] targetQubit qubit to rotate
* @param[in] stateVecIn probability amplitudes in lower or upper half of a block depending on chunkId
* @param[out] stateVecOut array section to update (will correspond to either the lower or upper half of a block)
*/
void statevec_pauliXDistributed (Qureg qureg, const int targetQubit,
ComplexArray stateVecIn,
ComplexArray stateVecOut)
{
long long int thisTask;
const long long int numTasks=qureg.numAmpsPerChunk;
qreal *stateVecRealIn=stateVecIn.real, *stateVecImagIn=stateVecIn.imag;
qreal *stateVecRealOut=stateVecOut.real, *stateVecImagOut=stateVecOut.imag;
# ifdef _OPENMP
# pragma omp parallel \
shared (stateVecRealIn,stateVecImagIn,stateVecRealOut,stateVecImagOut) \
private (thisTask)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (thisTask=0; thisTask<numTasks; thisTask++) {
stateVecRealOut[thisTask] = stateVecRealIn[thisTask];
stateVecImagOut[thisTask] = stateVecImagIn[thisTask];
}
}
}
void statevec_controlledNotLocal(Qureg qureg, const int controlQubit, const int targetQubit)
{
long long int sizeBlock, sizeHalfBlock;
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
qreal stateRealUp,stateImagUp;
long long int thisTask;
const long long int numTasks=qureg.numAmpsPerChunk>>1;
const long long int chunkSize=qureg.numAmpsPerChunk;
const long long int chunkId=qureg.chunkId;
int controlBit;
// set dimensions
sizeHalfBlock = 1LL << targetQubit;
sizeBlock = 2LL * sizeHalfBlock;
// Can't use qureg.stateVec as a private OMP var
qreal *stateVecReal = qureg.stateVec.real;
qreal *stateVecImag = qureg.stateVec.imag;
# ifdef _OPENMP
# pragma omp parallel \
shared (sizeBlock,sizeHalfBlock, stateVecReal,stateVecImag) \
private (thisTask,thisBlock ,indexUp,indexLo, stateRealUp,stateImagUp,controlBit)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (thisTask=0; thisTask<numTasks; thisTask++) {
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
controlBit = extractBit(controlQubit, indexUp+chunkId*chunkSize);
if (controlBit){
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateVecReal[indexUp] = stateVecReal[indexLo];
stateVecImag[indexUp] = stateVecImag[indexLo];
stateVecReal[indexLo] = stateRealUp;
stateVecImag[indexLo] = stateImagUp;
}
}
}
}
/** Rotate a single qubit by {{0,1},{1,0}.
* Operate on a subset of the state vector with upper and lower block values
* stored seperately. This rotation is just swapping upper and lower values, and
* stateVecIn must already be the correct section for this chunk. Only perform the rotation
* for elements where controlQubit is one.
*
* @param[in,out] qureg object representing the set of qubits
* @param[in] targetQubit qubit to rotate
* @param[in] stateVecIn probability amplitudes in lower or upper half of a block depending on chunkId
* @param[out] stateVecOut array section to update (will correspond to either the lower or upper half of a block)
*/
void statevec_controlledNotDistributed (Qureg qureg, const int controlQubit, const int targetQubit,
ComplexArray stateVecIn,
ComplexArray stateVecOut)
{
long long int thisTask;
const long long int numTasks=qureg.numAmpsPerChunk;
const long long int chunkSize=qureg.numAmpsPerChunk;
const long long int chunkId=qureg.chunkId;
int controlBit;
qreal *stateVecRealIn=stateVecIn.real, *stateVecImagIn=stateVecIn.imag;
qreal *stateVecRealOut=stateVecOut.real, *stateVecImagOut=stateVecOut.imag;
# ifdef _OPENMP
# pragma omp parallel \
shared (stateVecRealIn,stateVecImagIn,stateVecRealOut,stateVecImagOut) \
private (thisTask,controlBit)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (thisTask=0; thisTask<numTasks; thisTask++) {
controlBit = extractBit (controlQubit, thisTask+chunkId*chunkSize);
if (controlBit){
stateVecRealOut[thisTask] = stateVecRealIn[thisTask];
stateVecImagOut[thisTask] = stateVecImagIn[thisTask];
}
}
}
}
void statevec_pauliYLocal(Qureg qureg, const int targetQubit, const int conjFac)
{
long long int sizeBlock, sizeHalfBlock;
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
qreal stateRealUp,stateImagUp;
long long int thisTask;
const long long int numTasks=qureg.numAmpsPerChunk>>1;
// set dimensions
sizeHalfBlock = 1LL << targetQubit;
sizeBlock = 2LL * sizeHalfBlock;
// Can't use qureg.stateVec as a private OMP var
qreal *stateVecReal = qureg.stateVec.real;
qreal *stateVecImag = qureg.stateVec.imag;
# ifdef _OPENMP
# pragma omp parallel \
shared (sizeBlock,sizeHalfBlock, stateVecReal,stateVecImag) \
private (thisTask,thisBlock ,indexUp,indexLo, stateRealUp,stateImagUp)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (thisTask=0; thisTask<numTasks; thisTask++) {
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateVecReal[indexUp] = conjFac * stateVecImag[indexLo];
stateVecImag[indexUp] = conjFac * -stateVecReal[indexLo];
stateVecReal[indexLo] = conjFac * -stateImagUp;
stateVecImag[indexLo] = conjFac * stateRealUp;
}
}
}
/** Rotate a single qubit by +-{{0,-i},{i,0}.
* Operate on a subset of the state vector with upper and lower block values
* stored seperately. This rotation is just swapping upper and lower values, and
* stateVecIn must already be the correct section for this chunk
*
* @remarks Qubits are zero-based and the
* the first qubit is the rightmost
*
* @param[in,out] qureg object representing the set of qubits
* @param[in] targetQubit qubit to rotate
* @param[in] stateVecIn probability amplitudes in lower or upper half of a block depending on chunkId
* @param[in] updateUpper flag, 1: updating upper values, 0: updating lower values in block
* @param[out] stateVecOut array section to update (will correspond to either the lower or upper half of a block)
*/
void statevec_pauliYDistributed(Qureg qureg, const int targetQubit,
ComplexArray stateVecIn,
ComplexArray stateVecOut,
int updateUpper, const int conjFac)
{
long long int thisTask;
const long long int numTasks=qureg.numAmpsPerChunk;
qreal *stateVecRealIn=stateVecIn.real, *stateVecImagIn=stateVecIn.imag;
qreal *stateVecRealOut=stateVecOut.real, *stateVecImagOut=stateVecOut.imag;
int realSign=1, imagSign=1;
if (updateUpper) imagSign=-1;
else realSign = -1;
# ifdef _OPENMP
# pragma omp parallel \
shared (stateVecRealIn,stateVecImagIn,stateVecRealOut,stateVecImagOut,realSign,imagSign) \
private (thisTask)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (thisTask=0; thisTask<numTasks; thisTask++) {
stateVecRealOut[thisTask] = conjFac * realSign * stateVecImagIn[thisTask];
stateVecImagOut[thisTask] = conjFac * imagSign * stateVecRealIn[thisTask];
}
}
}
void statevec_controlledPauliYLocal(Qureg qureg, const int controlQubit, const int targetQubit, const int conjFac)
{
long long int sizeBlock, sizeHalfBlock;
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
qreal stateRealUp,stateImagUp;
long long int thisTask;
const long long int numTasks=qureg.numAmpsPerChunk>>1;
const long long int chunkSize=qureg.numAmpsPerChunk;
const long long int chunkId=qureg.chunkId;
int controlBit;
// set dimensions
sizeHalfBlock = 1LL << targetQubit;
sizeBlock = 2LL * sizeHalfBlock;
// Can't use qureg.stateVec as a private OMP var
qreal *stateVecReal = qureg.stateVec.real;
qreal *stateVecImag = qureg.stateVec.imag;
# ifdef _OPENMP
# pragma omp parallel \
shared (sizeBlock,sizeHalfBlock, stateVecReal,stateVecImag) \
private (thisTask,thisBlock ,indexUp,indexLo, stateRealUp,stateImagUp,controlBit)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (thisTask=0; thisTask<numTasks; thisTask++) {
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
controlBit = extractBit(controlQubit, indexUp+chunkId*chunkSize);
if (controlBit){
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
// update under +-{{0, -i}, {i, 0}}
stateVecReal[indexUp] = conjFac * stateVecImag[indexLo];
stateVecImag[indexUp] = conjFac * -stateVecReal[indexLo];
stateVecReal[indexLo] = conjFac * -stateImagUp;
stateVecImag[indexLo] = conjFac * stateRealUp;
}
}
}
}
void statevec_controlledPauliYDistributed (Qureg qureg, const int controlQubit, const int targetQubit,
ComplexArray stateVecIn,
ComplexArray stateVecOut, const int conjFac)
{
long long int thisTask;
const long long int numTasks=qureg.numAmpsPerChunk;
const long long int chunkSize=qureg.numAmpsPerChunk;
const long long int chunkId=qureg.chunkId;
int controlBit;
qreal *stateVecRealIn=stateVecIn.real, *stateVecImagIn=stateVecIn.imag;
qreal *stateVecRealOut=stateVecOut.real, *stateVecImagOut=stateVecOut.imag;
# ifdef _OPENMP
# pragma omp parallel \
shared (stateVecRealIn,stateVecImagIn,stateVecRealOut,stateVecImagOut) \
private (thisTask,controlBit)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (thisTask=0; thisTask<numTasks; thisTask++) {
controlBit = extractBit (controlQubit, thisTask+chunkId*chunkSize);
if (controlBit){
stateVecRealOut[thisTask] = conjFac * stateVecImagIn[thisTask];
stateVecImagOut[thisTask] = conjFac * -stateVecRealIn[thisTask];
}
}
}
}
void statevec_hadamardLocal(Qureg qureg, const int targetQubit)
{
long long int sizeBlock, sizeHalfBlock;
long long int thisBlock, // current block
indexUp,indexLo; // current index and corresponding index in lower half block
qreal stateRealUp,stateRealLo,stateImagUp,stateImagLo;
long long int thisTask;
const long long int numTasks=qureg.numAmpsPerChunk>>1;
// set dimensions
sizeHalfBlock = 1LL << targetQubit;
sizeBlock = 2LL * sizeHalfBlock;
// Can't use qureg.stateVec as a private OMP var
qreal *stateVecReal = qureg.stateVec.real;
qreal *stateVecImag = qureg.stateVec.imag;
qreal recRoot2 = 1.0/sqrt(2);
# ifdef _OPENMP
# pragma omp parallel \
shared (sizeBlock,sizeHalfBlock, stateVecReal,stateVecImag, recRoot2) \
private (thisTask,thisBlock ,indexUp,indexLo, stateRealUp,stateImagUp,stateRealLo,stateImagLo)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (thisTask=0; thisTask<numTasks; thisTask++) {
thisBlock = thisTask / sizeHalfBlock;
indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
indexLo = indexUp + sizeHalfBlock;
stateRealUp = stateVecReal[indexUp];
stateImagUp = stateVecImag[indexUp];
stateRealLo = stateVecReal[indexLo];
stateImagLo = stateVecImag[indexLo];
stateVecReal[indexUp] = recRoot2*(stateRealUp + stateRealLo);
stateVecImag[indexUp] = recRoot2*(stateImagUp + stateImagLo);
stateVecReal[indexLo] = recRoot2*(stateRealUp - stateRealLo);
stateVecImag[indexLo] = recRoot2*(stateImagUp - stateImagLo);
}
}
}
/** Rotate a single qubit by {{1,1},{1,-1}}/sqrt2.
* Operate on a subset of the state vector with upper and lower block values
* stored seperately. This rotation is just swapping upper and lower values, and
* stateVecIn must already be the correct section for this chunk
*
* @param[in,out] qureg object representing the set of qubits
* @param[in] targetQubit qubit to rotate
* @param[in] stateVecIn probability amplitudes in lower or upper half of a block depending on chunkId
* @param[in] updateUpper flag, 1: updating upper values, 0: updating lower values in block
* @param[out] stateVecOut array section to update (will correspond to either the lower or upper half of a block)
*/
void statevec_hadamardDistributed(Qureg qureg, const int targetQubit,
ComplexArray stateVecUp,
ComplexArray stateVecLo,
ComplexArray stateVecOut,
int updateUpper)
{
qreal stateRealUp,stateRealLo,stateImagUp,stateImagLo;
long long int thisTask;
const long long int numTasks=qureg.numAmpsPerChunk;
int sign;
if (updateUpper) sign=1;
else sign=-1;
qreal recRoot2 = 1.0/sqrt(2);
qreal *stateVecRealUp=stateVecUp.real, *stateVecImagUp=stateVecUp.imag;
qreal *stateVecRealLo=stateVecLo.real, *stateVecImagLo=stateVecLo.imag;
qreal *stateVecRealOut=stateVecOut.real, *stateVecImagOut=stateVecOut.imag;
# ifdef _OPENMP
# pragma omp parallel \
shared (stateVecRealUp,stateVecImagUp,stateVecRealLo,stateVecImagLo,stateVecRealOut,stateVecImagOut, \
recRoot2, sign) \
private (thisTask,stateRealUp,stateImagUp,stateRealLo,stateImagLo)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (thisTask=0; thisTask<numTasks; thisTask++) {
// store current state vector values in temp variables
stateRealUp = stateVecRealUp[thisTask];
stateImagUp = stateVecImagUp[thisTask];
stateRealLo = stateVecRealLo[thisTask];
stateImagLo = stateVecImagLo[thisTask];
stateVecRealOut[thisTask] = recRoot2*(stateRealUp + sign*stateRealLo);
stateVecImagOut[thisTask] = recRoot2*(stateImagUp + sign*stateImagLo);
}
}
}
void statevec_phaseShiftByTerm (Qureg qureg, const int targetQubit, Complex term)
{
long long int index;
long long int stateVecSize;
int targetBit;
const long long int chunkSize=qureg.numAmpsPerChunk;
const long long int chunkId=qureg.chunkId;
// dimension of the state vector
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.stateVec.real;
qreal *stateVecImag = qureg.stateVec.imag;
qreal stateRealLo, stateImagLo;
const qreal cosAngle = term.real;
const qreal sinAngle = term.imag;
# ifdef _OPENMP
# pragma omp parallel for \
shared (stateVecSize, stateVecReal,stateVecImag ) \
private (index,targetBit,stateRealLo,stateImagLo) \
schedule (static)
# endif
for (index=0; index<stateVecSize; index++) {
// update the coeff of the |1> state of the target qubit
targetBit = extractBit (targetQubit, index+chunkId*chunkSize);
if (targetBit) {
stateRealLo = stateVecReal[index];
stateImagLo = stateVecImag[index];
stateVecReal[index] = cosAngle*stateRealLo - sinAngle*stateImagLo;
stateVecImag[index] = sinAngle*stateRealLo + cosAngle*stateImagLo;
}
}
}
void statevec_controlledPhaseShift (Qureg qureg, const int idQubit1, const int idQubit2, qreal angle)
{
long long int index;
long long int stateVecSize;
int bit1, bit2;
const long long int chunkSize=qureg.numAmpsPerChunk;
const long long int chunkId=qureg.chunkId;
// dimension of the state vector
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.stateVec.real;
qreal *stateVecImag = qureg.stateVec.imag;
qreal stateRealLo, stateImagLo;
const qreal cosAngle = cos(angle);
const qreal sinAngle = sin(angle);
# ifdef _OPENMP
# pragma omp parallel for \
shared (stateVecSize, stateVecReal,stateVecImag ) \
private (index,bit1,bit2,stateRealLo,stateImagLo) \
schedule (static)
# endif
for (index=0; index<stateVecSize; index++) {
bit1 = extractBit (idQubit1, index+chunkId*chunkSize);
bit2 = extractBit (idQubit2, index+chunkId*chunkSize);
if (bit1 && bit2) {
stateRealLo = stateVecReal[index];
stateImagLo = stateVecImag[index];
stateVecReal[index] = cosAngle*stateRealLo - sinAngle*stateImagLo;
stateVecImag[index] = sinAngle*stateRealLo + cosAngle*stateImagLo;
}
}
}
void statevec_multiControlledPhaseShift(Qureg qureg, int *controlQubits, int numControlQubits, qreal angle)
{
long long int index;
long long int stateVecSize;
const long long int chunkSize=qureg.numAmpsPerChunk;
const long long int chunkId=qureg.chunkId;
long long int mask=0;
for (int i=0; i<numControlQubits; i++)
mask = mask | (1LL<<controlQubits[i]);
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.stateVec.real;
qreal *stateVecImag = qureg.stateVec.imag;
qreal stateRealLo, stateImagLo;
const qreal cosAngle = cos(angle);
const qreal sinAngle = sin(angle);
# ifdef _OPENMP
# pragma omp parallel \
shared (stateVecSize, stateVecReal, stateVecImag, mask) \
private (index, stateRealLo, stateImagLo)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (index=0; index<stateVecSize; index++) {
if (mask == (mask & (index+chunkId*chunkSize)) ){
stateRealLo = stateVecReal[index];
stateImagLo = stateVecImag[index];
stateVecReal[index] = cosAngle*stateRealLo - sinAngle*stateImagLo;
stateVecImag[index] = sinAngle*stateRealLo + cosAngle*stateImagLo;
}
}
}
}
qreal densmatr_findProbabilityOfZeroLocal(Qureg qureg, const int measureQubit) {
// computes first local index containing a diagonal element
long long int localNumAmps = qureg.numAmpsPerChunk;
long long int densityDim = (1LL << qureg.numQubitsRepresented);
long long int diagSpacing = 1LL + densityDim;
long long int maxNumDiagsPerChunk = 1 + localNumAmps / diagSpacing;
long long int numPrevDiags = (qureg.chunkId>0)? 1+(qureg.chunkId*localNumAmps)/diagSpacing : 0;
long long int globalIndNextDiag = diagSpacing * numPrevDiags;
long long int localIndNextDiag = globalIndNextDiag % localNumAmps;
// computes how many diagonals are contained in this chunk
long long int numDiagsInThisChunk = maxNumDiagsPerChunk;
if (localIndNextDiag + (numDiagsInThisChunk-1)*diagSpacing >= localNumAmps)
numDiagsInThisChunk -= 1;
long long int visitedDiags; // number of visited diagonals in this chunk so far
long long int basisStateInd; // current diagonal index being considered
long long int index; // index in the local chunk
qreal zeroProb = 0;
qreal *stateVecReal = qureg.stateVec.real;
# ifdef _OPENMP
# pragma omp parallel \
shared (localIndNextDiag, numPrevDiags, diagSpacing, stateVecReal, numDiagsInThisChunk) \
private (visitedDiags, basisStateInd, index) \
reduction ( +:zeroProb )
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
// sums the diagonal elems of the density matrix where measureQubit=0
for (visitedDiags = 0; visitedDiags < numDiagsInThisChunk; visitedDiags++) {
basisStateInd = numPrevDiags + visitedDiags;
index = localIndNextDiag + diagSpacing * visitedDiags;
if (extractBit(measureQubit, basisStateInd) == 0)
zeroProb += stateVecReal[index]; // assume imag[diagonls] ~ 0
}
}
return zeroProb;
}
/** Measure the total probability of a specified qubit being in the zero state across all amplitudes in this chunk.
* Size of regions to skip is less than the size of one chunk.
*
* @param[in] qureg object representing the set of qubits
* @param[in] measureQubit qubit to measure
* @return probability of qubit measureQubit being zero
*/
qreal statevec_findProbabilityOfZeroLocal (Qureg qureg,
const int measureQubit)
{
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
index; // current index for first half block
// ----- measured probability
qreal totalProbability; // probability (returned) value
// ----- temp variables
long long int thisTask;
long long int numTasks=qureg.numAmpsPerChunk>>1;
// ---------------------------------------------------------------- //
// dimensions //
// ---------------------------------------------------------------- //
sizeHalfBlock = 1LL << (measureQubit); // number of state vector elements to sum,
// and then the number to skip
sizeBlock = 2LL * sizeHalfBlock; // size of blocks (pairs of measure and skip entries)
// initialise returned value
totalProbability = 0.0;
qreal *stateVecReal = qureg.stateVec.real;
qreal *stateVecImag = qureg.stateVec.imag;
# ifdef _OPENMP
# pragma omp parallel \
shared (numTasks,sizeBlock,sizeHalfBlock, stateVecReal,stateVecImag) \
private (thisTask,thisBlock,index) \
reduction ( +:totalProbability )
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (thisTask=0; thisTask<numTasks; thisTask++) {
thisBlock = thisTask / sizeHalfBlock;
index = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
totalProbability += stateVecReal[index]*stateVecReal[index]
+ stateVecImag[index]*stateVecImag[index];
}
}
return totalProbability;
}
/** Measure the probability of a specified qubit being in the zero state across all amplitudes held in this chunk.
* Size of regions to skip is a multiple of chunkSize.
* The results are communicated and aggregated by the caller
*
* @param[in] qureg object representing the set of qubits
* @param[in] measureQubit qubit to measure
* @return probability of qubit measureQubit being zero
*/
qreal statevec_findProbabilityOfZeroDistributed (Qureg qureg,
const int measureQubit)
{
// ----- measured probability
qreal totalProbability; // probability (returned) value
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
long long int numTasks=qureg.numAmpsPerChunk;
// ---------------------------------------------------------------- //
// find probability //
// ---------------------------------------------------------------- //
// initialise returned value
totalProbability = 0.0;
qreal *stateVecReal = qureg.stateVec.real;
qreal *stateVecImag = qureg.stateVec.imag;
# ifdef _OPENMP
# pragma omp parallel \
shared (numTasks,stateVecReal,stateVecImag) \
private (thisTask) \
reduction ( +:totalProbability )
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (thisTask=0; thisTask<numTasks; thisTask++) {
totalProbability += stateVecReal[thisTask]*stateVecReal[thisTask]
+ stateVecImag[thisTask]*stateVecImag[thisTask];
}
}
return totalProbability;
}
void statevec_controlledPhaseFlip (Qureg qureg, const int idQubit1, const int idQubit2)
{
long long int index;
long long int stateVecSize;
int bit1, bit2;
const long long int chunkSize=qureg.numAmpsPerChunk;
const long long int chunkId=qureg.chunkId;
// dimension of the state vector
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.stateVec.real;
qreal *stateVecImag = qureg.stateVec.imag;
# ifdef _OPENMP
# pragma omp parallel for \
shared (stateVecSize, stateVecReal,stateVecImag ) \
private (index,bit1,bit2) \
schedule (static)
# endif
for (index=0; index<stateVecSize; index++) {
bit1 = extractBit (idQubit1, index+chunkId*chunkSize);
bit2 = extractBit (idQubit2, index+chunkId*chunkSize);
if (bit1 && bit2) {
stateVecReal [index] = - stateVecReal [index];
stateVecImag [index] = - stateVecImag [index];
}
}
}
void statevec_multiControlledPhaseFlip(Qureg qureg, int *controlQubits, int numControlQubits)
{
long long int index;
long long int stateVecSize;
const long long int chunkSize=qureg.numAmpsPerChunk;
const long long int chunkId=qureg.chunkId;
long long int mask=0;
for (int i=0; i<numControlQubits; i++)
mask = mask | (1LL<<controlQubits[i]);
stateVecSize = qureg.numAmpsPerChunk;
qreal *stateVecReal = qureg.stateVec.real;
qreal *stateVecImag = qureg.stateVec.imag;
# ifdef _OPENMP
# pragma omp parallel \
shared (stateVecSize, stateVecReal,stateVecImag, mask ) \
private (index)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (index=0; index<stateVecSize; index++) {
if (mask == (mask & (index+chunkId*chunkSize)) ){
stateVecReal [index] = - stateVecReal [index];
stateVecImag [index] = - stateVecImag [index];
}
}
}
}
/** Update the state vector to be consistent with measuring measureQubit=0 if outcome=0 and measureQubit=1
* if outcome=1.
* Performs an irreversible change to the state vector: it updates the vector according
* to the event that an outcome have been measured on the qubit indicated by measureQubit (where
* this label starts from 0, of course). It achieves this by setting all inconsistent
* amplitudes to 0 and
* then renormalising based on the total probability of measuring measureQubit=0 or 1 according to the
* value of outcome.
* In the local version, one or more blocks (with measureQubit=0 in the first half of the block and
* measureQubit=1 in the second half of the block) fit entirely into one chunk.
*
* @param[in,out] qureg object representing the set of qubits
* @param[in] measureQubit qubit to measure
* @param[in] totalProbability probability of qubit measureQubit being either zero or one
* @param[in] outcome to measure the probability of and set the state to -- either zero or one
*/
void statevec_collapseToKnownProbOutcomeLocal(Qureg qureg, int measureQubit, int outcome, qreal totalProbability)
{
// ----- sizes
long long int sizeBlock, // size of blocks
sizeHalfBlock; // size of blocks halved
// ----- indices
long long int thisBlock, // current block
index; // current index for first half block
// ----- measured probability
qreal renorm; // probability (returned) value
// ----- temp variables
long long int thisTask; // task based approach for expose loop with small granularity
// (good for shared memory parallelism)
long long int numTasks=qureg.numAmpsPerChunk>>1;
// ---------------------------------------------------------------- //
// dimensions //
// ---------------------------------------------------------------- //
sizeHalfBlock = 1LL << (measureQubit); // number of state vector elements to sum,
// and then the number to skip
sizeBlock = 2LL * sizeHalfBlock; // size of blocks (pairs of measure and skip entries)
renorm=1/sqrt(totalProbability);
qreal *stateVecReal = qureg.stateVec.real;
qreal *stateVecImag = qureg.stateVec.imag;
# ifdef _OPENMP
# pragma omp parallel \
shared (numTasks,sizeBlock,sizeHalfBlock, stateVecReal,stateVecImag,renorm,outcome) \
private (thisTask,thisBlock,index)
# endif
{
if (outcome==0){
// measure qubit is 0
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (thisTask=0; thisTask<numTasks; thisTask++) {
thisBlock = thisTask / sizeHalfBlock;
index = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
stateVecReal[index]=stateVecReal[index]*renorm;
stateVecImag[index]=stateVecImag[index]*renorm;
stateVecReal[index+sizeHalfBlock]=0;
stateVecImag[index+sizeHalfBlock]=0;
}
} else {
// measure qubit is 1
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (thisTask=0; thisTask<numTasks; thisTask++) {
thisBlock = thisTask / sizeHalfBlock;
index = thisBlock*sizeBlock + thisTask%sizeHalfBlock;
stateVecReal[index]=0;
stateVecImag[index]=0;
stateVecReal[index+sizeHalfBlock]=stateVecReal[index+sizeHalfBlock]*renorm;
stateVecImag[index+sizeHalfBlock]=stateVecImag[index+sizeHalfBlock]*renorm;
}
}
}
}
/** Renormalise parts of the state vector where measureQubit=0 or 1, based on the total probability of that qubit being
* in state 0 or 1.
* Measure in Zero performs an irreversible change to the state vector: it updates the vector according
* to the event that the value 'outcome' has been measured on the qubit indicated by measureQubit (where
* this label starts from 0, of course). It achieves this by setting all inconsistent amplitudes to 0 and
* then renormalising based on the total probability of measuring measureQubit=0 if outcome=0 and
* measureQubit=1 if outcome=1.
* In the distributed version, one block (with measureQubit=0 in the first half of the block and
* measureQubit=1 in the second half of the block) is spread over multiple chunks, meaning that each chunks performs
* only renormalisation or only setting amplitudes to 0. This function handles the renormalisation.
*
* @param[in,out] qureg object representing the set of qubits
* @param[in] measureQubit qubit to measure
* @param[in] totalProbability probability of qubit measureQubit being zero
*/
void statevec_collapseToKnownProbOutcomeDistributedRenorm (Qureg qureg, const int measureQubit, const qreal totalProbability)
{
// ----- temp variables
long long int thisTask;
long long int numTasks=qureg.numAmpsPerChunk;
qreal renorm=1/sqrt(totalProbability);
qreal *stateVecReal = qureg.stateVec.real;
qreal *stateVecImag = qureg.stateVec.imag;
# ifdef _OPENMP
# pragma omp parallel \
shared (numTasks,stateVecReal,stateVecImag) \
private (thisTask)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (thisTask=0; thisTask<numTasks; thisTask++) {
stateVecReal[thisTask] = stateVecReal[thisTask]*renorm;
stateVecImag[thisTask] = stateVecImag[thisTask]*renorm;
}
}
}
/** Set all amplitudes in one chunk to 0.
* Measure in Zero performs an irreversible change to the state vector: it updates the vector according
* to the event that a zero have been measured on the qubit indicated by measureQubit (where
* this label starts from 0, of course). It achieves this by setting all inconsistent amplitudes to 0 and
* then renormalising based on the total probability of measuring measureQubit=0 or 1.
* In the distributed version, one block (with measureQubit=0 in the first half of the block and
* measureQubit=1 in the second half of the block) is spread over multiple chunks, meaning that each chunks performs
* only renormalisation or only setting amplitudes to 0. This function handles setting amplitudes to 0.
*
* @param[in,out] qureg object representing the set of qubits
* @param[in] measureQubit qubit to measure
*/
void statevec_collapseToOutcomeDistributedSetZero(Qureg qureg)
{
// ----- temp variables
long long int thisTask;
long long int numTasks=qureg.numAmpsPerChunk;
// ---------------------------------------------------------------- //
// find probability //
// ---------------------------------------------------------------- //
qreal *stateVecReal = qureg.stateVec.real;
qreal *stateVecImag = qureg.stateVec.imag;
# ifdef _OPENMP
# pragma omp parallel \
shared (numTasks,stateVecReal,stateVecImag) \
private (thisTask)
# endif
{
# ifdef _OPENMP
# pragma omp for schedule (static)
# endif
for (thisTask=0; thisTask<numTasks; thisTask++) {
stateVecReal[thisTask] = 0;
stateVecImag[thisTask] = 0;
}
}
}
|
ProgressBar.h | /**
* Copyright (c) 2021 Darius Rückert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#pragma once
#include "saiga/config.h"
#include "saiga/core/math/imath.h"
#include "saiga/core/time/all.h"
#include "saiga/core/util/Thread/SpinLock.h"
#include "saiga/core/util/Thread/threadName.h"
#include "saiga/core/util/assert.h"
#include "saiga/core/util/tostring.h"
#include <atomic>
#include <iostream>
#include <mutex>
#include <string>
#include <condition_variable>
namespace Saiga
{
/**
* A synchronized progress bar for console output.
* You must not write to the given stream while the progress bar is active.
*
* Usage Parallel Image loading:
*
* ProgressBar loadingBar(std::cout, "Loading " + to_string(N) + " images ", N);
* #pragma omp parallel for
* for (int i = 0; i < N; ++i)
* {
* images[i].load("...");
* loadingBar.addProgress(1);
* }
*
*/
struct ProgressBar
{
ProgressBar(std::ostream& strm, const std::string header, int end, int length = 30,
bool show_remaining_time = false, int update_time_ms = 100, std::string element_name = "e")
: strm(strm),
prefix(header),
end(end),
length(length),
show_remaining_time(show_remaining_time),
update_time_ms(update_time_ms),
element_name(element_name)
{
SAIGA_ASSERT(end >= 0);
print();
if (end > 0)
{
run();
}
timer.start();
}
~ProgressBar() { Quit(); }
void addProgress(int i) { current += i; }
void SetPostfix(const std::string& str)
{
std::unique_lock l(lock);
postfix = str;
}
void Quit()
{
running = false;
cv.notify_one();
if (st.joinable())
{
st.join();
}
}
private:
TimerBase timer;
std::ostream& strm;
ScopedThread st;
std::string prefix;
std::string postfix;
std::atomic_bool running = true;
std::atomic_int current = 0;
std::mutex lock;
std::condition_variable cv;
int end;
int length;
bool show_remaining_time;
int update_time_ms;
std::string element_name;
void run()
{
st = ScopedThread(
[this]()
{
while (running && current.load() < end)
{
print();
std::unique_lock<std::mutex> l(lock);
cv.wait_for(l, std::chrono::milliseconds(update_time_ms));
}
print();
strm << std::endl;
});
}
void print()
{
auto f = strm.flags();
// SAIGA_ASSERT(current <= end);
double progress = end == 0 ? 0 : double(current) / end;
auto time = timer.stop();
int progress_pro = iRound(progress * 100);
int barLength = progress * length;
strm << "\r" << prefix << " ";
strm << std::setw(3) << progress_pro << "%";
{
// bar
strm << " |";
for (auto i = 0; i < barLength; ++i)
{
strm << "#";
}
for (auto i = barLength; i < length; ++i)
{
strm << " ";
}
strm << "| ";
}
{
// element count
auto end_str = to_string(end);
strm << std::setw(end_str.size()) << current << "/" << end << " ";
}
{
// Time
strm << "[" << DurationToString(time);
if (show_remaining_time)
{
auto remaining_time = time * (1 / progress) - time;
strm << "<" << DurationToString(remaining_time);
}
strm << "] ";
}
{
// performance stats
double s = std::chrono::duration_cast<std::chrono::duration<double>>(time).count();
double ele_per_second = current / s;
strm << "[" << std::setprecision(2) << std::fixed << ele_per_second << " " << element_name << "/s]";
}
{
std::unique_lock l(lock);
strm << " " << postfix;
}
strm << std::flush;
strm << std::setprecision(6);
strm.flags(f);
}
};
} // namespace Saiga
|
deconvolution_packnto1_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void deconvolution_packnto1_fp16s_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
const int packn = csrr_vlenb() / 2;
const word_type vl = vsetvl_e16m1(packn);
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
const int maxk = kernel_w * kernel_h;
const float* bias_data_ptr = bias_data;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
__fp16* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float sum = 0.f;
if (bias_data_ptr)
{
sum = bias_data_ptr[p];
}
vfloat32m2_t _sum = vfmv_v_f_f32m2(0.f, vl);
const __fp16* kptr = (const __fp16*)weight_data_fp16 + maxk * channels * p * packn;
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
const __fp16* sptr = m.row<const __fp16>(sy) + sx * packn;
int k = y * kernel_w + x;
vfloat16m1_t _val = vle16_v_f16m1(sptr, vl);
vfloat16m1_t _w = vle16_v_f16m1(kptr + k * packn, vl);
_sum = vfwmacc_vv_f32m2(_sum, _val, _w, vl);
}
}
kptr += maxk * packn;
}
#ifdef RVV_SPEC_0_7
// TODO
std::vector<float> ss(packn);
vse32_v_f32m2((float*)ss.data(), _sum, vl);
for (int i = 0; i < packn; i++)
{
sum += ss[i];
}
#else
sum = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m2_f32m1(vfloat32m1_t(), _sum, vfmv_s_f_f32m1(vfloat32m1_t(), sum, vl), vl));
#endif
sum = activation_ss(sum, activation_type, activation_params);
outptr[j] = sum;
}
outptr += outw;
}
}
}
static void deconvolution_packnto1_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data_fp16, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
const int packn = csrr_vlenb() / 2;
const word_type vl = vsetvl_e16m1(packn);
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
const int maxk = kernel_w * kernel_h;
const __fp16* bias_data_ptr = bias_data_fp16;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
__fp16* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__fp16 sum = 0.f;
if (bias_data_ptr)
{
sum = bias_data_ptr[p];
}
vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl);
const __fp16* kptr = (const __fp16*)weight_data_fp16 + maxk * channels * p * packn;
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
const __fp16* sptr = m.row<const __fp16>(sy) + sx * packn;
int k = y * kernel_w + x;
vfloat16m1_t _val = vle16_v_f16m1(sptr, vl);
vfloat16m1_t _w = vle16_v_f16m1(kptr + k * packn, vl);
_sum = vfmacc_vv_f16m1(_sum, _val, _w, vl);
}
}
kptr += maxk * packn;
}
sum = vfmv_f_s_f16m1_f16(vfredsum_vs_f16m1_f16m1(vfloat16m1_t(), _sum, vfmv_s_f_f16m1(vfloat16m1_t(), sum, vl), vl));
sum = activation_ss(sum, activation_type, activation_params);
outptr[j] = sum;
}
outptr += outw;
}
}
}
|
kmp_atomic_cas.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include <stdbool.h>
#include <omp.h>
// Used to detect architecture
#include "../../src/kmp_platform.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef void* ident_t;
extern bool
__kmpc_atomic_bool_1_cas(ident_t *loc, int gtid, char *x, char e, char d);
extern bool
__kmpc_atomic_bool_2_cas(ident_t *loc, int gtid, short *x, short e, short d);
extern bool
__kmpc_atomic_bool_4_cas(ident_t *loc, int gtid, int *x, int e, int d);
extern bool
__kmpc_atomic_bool_8_cas(ident_t *loc, int gtid, long long *x, long long e,
long long d);
extern char
__kmpc_atomic_val_1_cas(ident_t *loc, int gtid, char *x, char e, char d);
extern short
__kmpc_atomic_val_2_cas(ident_t *loc, int gtid, short *x, short e, short d);
extern int
__kmpc_atomic_val_4_cas(ident_t *loc, int gtid, int *x, int e, int d);
extern long long
__kmpc_atomic_val_8_cas(ident_t *loc, int gtid, long long *x, long long e,
long long d);
#ifdef __cplusplus
}
#endif
int main() {
int ret = 0;
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
bool r;
char c0 = 1;
char c1 = 2;
char c2 = 3;
char co = 2;
char cc = 0;
short s0 = 11;
short s1 = 12;
short s2 = 13;
short so = 12;
short sc = 0;
int i0 = 211;
int i1 = 212;
int i2 = 213;
int io = 212;
int ic = 0;
long long l0 = 3111;
long long l1 = 3112;
long long l2 = 3113;
long long lo = 3112;
long long lc = 0;
// initialize OpenMP runtime library
omp_set_dynamic(0);
// #pragma omp atomic compare update capture
// { r = x == e; if(r) { x = d; } }
// char, co == c1 initially, co == c2 finally
r = __kmpc_atomic_bool_1_cas(NULL, 0, &co, c0, c2); // no-op
if (co != c1) {
ret++; printf("Error bool_1_cas no-op: %d != %d\n", co, c1); }
if (r) { ret++; printf("Error bool_1_cas no-op ret: %d\n", r); }
r = __kmpc_atomic_bool_1_cas(NULL, 0, &co, c1, c2);
if (co != c2) {
ret++; printf("Error bool_1_cas: %d != %d\n", co, c2); }
if (!r) { ret++; printf("Error bool_1_cas ret: %d\n", r); }
// short
r = __kmpc_atomic_bool_2_cas(NULL, 0, &so, s0, s2); // no-op
if (so != s1) {
ret++; printf("Error bool_2_cas no-op: %d != %d\n", so, s1); }
if (r) { ret++; printf("Error bool_2_cas no-op ret: %d\n", r); }
r = __kmpc_atomic_bool_2_cas(NULL, 0, &so, s1, s2);
if (so != s2) {
ret++; printf("Error bool_2_cas: %d != %d\n", so, s2); }
if (!r) { ret++; printf("Error bool_2_cas ret: %d\n", r); }
// int
r = __kmpc_atomic_bool_4_cas(NULL, 0, &io, i0, i2); // no-op
if (io != i1) {
ret++; printf("Error bool_4_cas no-op: %d != %d\n", io, i1); }
if (r) { ret++; printf("Error bool_4_cas no-op ret: %d\n", r); }
r = __kmpc_atomic_bool_4_cas(NULL, 0, &io, i1, i2);
if (io != i2) {
ret++; printf("Error bool_4_cas: %d != %d\n", io, i2); }
if (!r) { ret++; printf("Error bool_4_cas ret: %d\n", r); }
// long long
r = __kmpc_atomic_bool_8_cas(NULL, 0, &lo, l0, l2); // no-op
if (lo != l1) {
ret++; printf("Error bool_8_cas no-op: %lld != %lld\n", lo, l1); }
if (r) { ret++; printf("Error bool_8_cas no-op ret: %d\n", r); }
r = __kmpc_atomic_bool_8_cas(NULL, 0, &lo, l1, l2);
if (lo != l2) {
ret++; printf("Error bool_8_cas: %lld != %lld\n", lo, l2); }
if (!r) { ret++; printf("Error bool_8_cas ret: %d\n", r); }
// #pragma omp atomic compare update capture
// { v = x; if (x == e) { x = d; } }
// char, co == c2 initially, co == c1 finally
cc = __kmpc_atomic_val_1_cas(NULL, 0, &co, c0, c1); // no-op
if (co != c2) {
ret++; printf("Error val_1_cas no-op: %d != %d\n", co, c2); }
if (cc != c2) {
ret++; printf("Error val_1_cas no-op ret: %d != %d\n", cc, c2); }
cc = __kmpc_atomic_val_1_cas(NULL, 0, &co, c2, c1);
if (co != c1) {
ret++; printf("Error val_1_cas: %d != %d\n", co, c1); }
if (cc != c2) { ret++; printf("Error val_1_cas ret: %d != %d\n", cc, c2); }
// short
sc = __kmpc_atomic_val_2_cas(NULL, 0, &so, s0, s1); // no-op
if (so != s2) {
ret++; printf("Error val_2_cas no-op: %d != %d\n", so, s2); }
if (sc != s2) {
ret++; printf("Error val_2_cas no-op ret: %d != %d\n", sc, s2); }
sc = __kmpc_atomic_val_2_cas(NULL, 0, &so, s2, s1);
if (so != s1) {
ret++; printf("Error val_2_cas: %d != %d\n", so, s1); }
if (sc != s2) {
ret++; printf("Error val_2_cas ret: %d != %d\n", sc, s2); }
// int
ic = __kmpc_atomic_val_4_cas(NULL, 0, &io, i0, i1); // no-op
if (io != i2) {
ret++; printf("Error val_4_cas no-op: %d != %d\n", io, i2); }
if (ic != i2) {
ret++; printf("Error val_4_cas no-op ret: %d != %d\n", ic, i2); }
ic = __kmpc_atomic_val_4_cas(NULL, 0, &io, i2, i1);
if (io != i1) {
ret++; printf("Error val_4_cas: %d != %d\n", io, i1); }
if (ic != i2) {
ret++; printf("Error val_4_cas ret: %d != %d\n", ic, i2); }
// long long
lc = __kmpc_atomic_val_8_cas(NULL, 0, &lo, l0, l1); // no-op
if (lo != l2) {
ret++; printf("Error val_8_cas no-op: %lld != %lld\n", lo, l2); }
if (lc != l2) {
ret++; printf("Error val_8_cas no-op ret: %lld != %lld\n", lc, l2); }
lc = __kmpc_atomic_val_8_cas(NULL, 0, &lo, l2, l1);
if (lo != l1) {
ret++; printf("Error val_8_cas: %lld != %lld\n", lo, l1); }
if (lc != l2) {
ret++; printf("Error val_8_cas ret: %lld != %lld\n", lc, l2); }
// check in parallel
i0 = 1;
i1 = 0;
for (io = 0; io < 5; ++io) {
#pragma omp parallel num_threads(2) private(i2, ic, r)
{
if (omp_get_thread_num() == 0) {
// th0 waits for th1 to increment i1, then th0 increments i0
#pragma omp atomic read
i2 = i1;
ic = __kmpc_atomic_val_4_cas(NULL, 0, &i0, i2, i2 + 1);
while(ic != i2) {
#pragma omp atomic read
i2 = i1;
ic = __kmpc_atomic_val_4_cas(NULL, 0, &i0, i2, i2 + 1);
}
} else {
// th1 increments i1 if it is equal to i0 - 1, letting th0 to proceed
r = 0;
while(!r) {
#pragma omp atomic read
i2 = i0;
r = __kmpc_atomic_bool_4_cas(NULL, 0, &i1, i2 - 1, i2);
}
}
}
}
if (i0 != 6 || i1 != 5) {
ret++;
printf("Error in parallel, %d != %d or %d != %d\n", i0, 6, i1, 5);
}
if (ret == 0)
printf("passed\n");
#else
printf("Unsupported architecture, skipping test...\n");
#endif // KMP_ARCH_X86 || KMP_ARCH_X86_64
return ret;
}
|
exact_rhs-brisbane.c | //-------------------------------------------------------------------------//
// //
// This benchmark is a serial C version of the NPB SP code. This C //
// version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the serial Fortran versions in //
// "NPB3.3-SER" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this C version to cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include "header-brisbane.h"
//---------------------------------------------------------------------
// compute the right hand side based on exact solution
//---------------------------------------------------------------------
void exact_rhs()
{
double dtemp[5], xi, eta, zeta, dtpp;
int m, i, j, k, ip1, im1, jp1, jm1, km1, kp1;
//---------------------------------------------------------------------
// initialize
//---------------------------------------------------------------------
for (k = 0; k <= grid_points[2]-1; k++) {
for (j = 0; j <= grid_points[1]-1; j++) {
for (i = 0; i <= grid_points[0]-1; i++) {
for (m = 0; m < 5; m++) {
forcing[m][k][j][i] = 0.0;
}
}
}
}
//---------------------------------------------------------------------
// xi-direction flux differences
//---------------------------------------------------------------------
for (k = 1; k <= grid_points[2]-2; k++) {
zeta = (double)k * dnzm1;
for (j = 1; j <= grid_points[1]-2; j++) {
eta = (double)j * dnym1;
for (i = 0; i <= grid_points[0]-1; i++) {
xi = (double)i * dnxm1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++) {
ue[i][m] = dtemp[m];
}
dtpp = 1.0 / dtemp[0];
for (m = 1; m < 5; m++) {
buf[i][m] = dtpp * dtemp[m];
}
cuf[i] = buf[i][1] * buf[i][1];
buf[i][0] = cuf[i] + buf[i][2] * buf[i][2] + buf[i][3] * buf[i][3];
q[i] = 0.5*(buf[i][1]*ue[i][1] + buf[i][2]*ue[i][2] +
buf[i][3]*ue[i][3]);
}
for (i = 1; i <= grid_points[0]-2; i++) {
im1 = i-1;
ip1 = i+1;
forcing[0][k][j][i] = forcing[0][k][j][i] -
tx2*( ue[ip1][1]-ue[im1][1] )+
dx1tx1*(ue[ip1][0]-2.0*ue[i][0]+ue[im1][0]);
forcing[1][k][j][i] = forcing[1][k][j][i] - tx2 * (
(ue[ip1][1]*buf[ip1][1]+c2*(ue[ip1][4]-q[ip1]))-
(ue[im1][1]*buf[im1][1]+c2*(ue[im1][4]-q[im1])))+
xxcon1*(buf[ip1][1]-2.0*buf[i][1]+buf[im1][1])+
dx2tx1*( ue[ip1][1]-2.0* ue[i][1]+ue[im1][1]);
forcing[2][k][j][i] = forcing[2][k][j][i] - tx2 * (
ue[ip1][2]*buf[ip1][1]-ue[im1][2]*buf[im1][1])+
xxcon2*(buf[ip1][2]-2.0*buf[i][2]+buf[im1][2])+
dx3tx1*( ue[ip1][2]-2.0*ue[i][2] +ue[im1][2]);
forcing[3][k][j][i] = forcing[3][k][j][i] - tx2*(
ue[ip1][3]*buf[ip1][1]-ue[im1][3]*buf[im1][1])+
xxcon2*(buf[ip1][3]-2.0*buf[i][3]+buf[im1][3])+
dx4tx1*( ue[ip1][3]-2.0* ue[i][3]+ ue[im1][3]);
forcing[4][k][j][i] = forcing[4][k][j][i] - tx2*(
buf[ip1][1]*(c1*ue[ip1][4]-c2*q[ip1])-
buf[im1][1]*(c1*ue[im1][4]-c2*q[im1]))+
0.5*xxcon3*(buf[ip1][0]-2.0*buf[i][0]+buf[im1][0])+
xxcon4*(cuf[ip1]-2.0*cuf[i]+cuf[im1])+
xxcon5*(buf[ip1][4]-2.0*buf[i][4]+buf[im1][4])+
dx5tx1*( ue[ip1][4]-2.0* ue[i][4]+ ue[im1][4]);
}
//---------------------------------------------------------------------
// Fourth-order dissipation
//---------------------------------------------------------------------
for (m = 0; m < 5; m++) {
i = 1;
forcing[m][k][j][i] = forcing[m][k][j][i] - dssp *
(5.0*ue[i][m] - 4.0*ue[i+1][m] +ue[i+2][m]);
i = 2;
forcing[m][k][j][i] = forcing[m][k][j][i] - dssp *
(-4.0*ue[i-1][m] + 6.0*ue[i][m] -
4.0*ue[i+1][m] + ue[i+2][m]);
}
for (i = 3; i <= grid_points[0]-4; i++) {
for (m = 0; m < 5; m++) {
forcing[m][k][j][i] = forcing[m][k][j][i] - dssp*
(ue[i-2][m] - 4.0*ue[i-1][m] +
6.0*ue[i][m] - 4.0*ue[i+1][m] + ue[i+2][m]);
}
}
for (m = 0; m < 5; m++) {
i = grid_points[0]-3;
forcing[m][k][j][i] = forcing[m][k][j][i] - dssp *
(ue[i-2][m] - 4.0*ue[i-1][m] +
6.0*ue[i][m] - 4.0*ue[i+1][m]);
i = grid_points[0]-2;
forcing[m][k][j][i] = forcing[m][k][j][i] - dssp *
(ue[i-2][m] - 4.0*ue[i-1][m] + 5.0*ue[i][m]);
}
}
}
//---------------------------------------------------------------------
// eta-direction flux differences
//---------------------------------------------------------------------
for (k = 1; k <= grid_points[2]-2; k++) {
zeta = (double)k * dnzm1;
for (i = 1; i <= grid_points[0]-2; i++) {
xi = (double)i * dnxm1;
for (j = 0; j <= grid_points[1]-1; j++) {
eta = (double)j * dnym1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++) {
ue[j][m] = dtemp[m];
}
dtpp = 1.0/dtemp[0];
for (m = 1; m < 5; m++) {
buf[j][m] = dtpp * dtemp[m];
}
cuf[j] = buf[j][2] * buf[j][2];
buf[j][0] = cuf[j] + buf[j][1] * buf[j][1] + buf[j][3] * buf[j][3];
q[j] = 0.5*(buf[j][1]*ue[j][1] + buf[j][2]*ue[j][2] +
buf[j][3]*ue[j][3]);
}
for (j = 1; j <= grid_points[1]-2; j++) {
jm1 = j-1;
jp1 = j+1;
forcing[0][k][j][i] = forcing[0][k][j][i] -
ty2*( ue[jp1][2]-ue[jm1][2] )+
dy1ty1*(ue[jp1][0]-2.0*ue[j][0]+ue[jm1][0]);
forcing[1][k][j][i] = forcing[1][k][j][i] - ty2*(
ue[jp1][1]*buf[jp1][2]-ue[jm1][1]*buf[jm1][2])+
yycon2*(buf[jp1][1]-2.0*buf[j][1]+buf[jm1][1])+
dy2ty1*( ue[jp1][1]-2.0* ue[j][1]+ ue[jm1][1]);
forcing[2][k][j][i] = forcing[2][k][j][i] - ty2*(
(ue[jp1][2]*buf[jp1][2]+c2*(ue[jp1][4]-q[jp1]))-
(ue[jm1][2]*buf[jm1][2]+c2*(ue[jm1][4]-q[jm1])))+
yycon1*(buf[jp1][2]-2.0*buf[j][2]+buf[jm1][2])+
dy3ty1*( ue[jp1][2]-2.0*ue[j][2] +ue[jm1][2]);
forcing[3][k][j][i] = forcing[3][k][j][i] - ty2*(
ue[jp1][3]*buf[jp1][2]-ue[jm1][3]*buf[jm1][2])+
yycon2*(buf[jp1][3]-2.0*buf[j][3]+buf[jm1][3])+
dy4ty1*( ue[jp1][3]-2.0*ue[j][3]+ ue[jm1][3]);
forcing[4][k][j][i] = forcing[4][k][j][i] - ty2*(
buf[jp1][2]*(c1*ue[jp1][4]-c2*q[jp1])-
buf[jm1][2]*(c1*ue[jm1][4]-c2*q[jm1]))+
0.5*yycon3*(buf[jp1][0]-2.0*buf[j][0]+
buf[jm1][0])+
yycon4*(cuf[jp1]-2.0*cuf[j]+cuf[jm1])+
yycon5*(buf[jp1][4]-2.0*buf[j][4]+buf[jm1][4])+
dy5ty1*(ue[jp1][4]-2.0*ue[j][4]+ue[jm1][4]);
}
//---------------------------------------------------------------------
// Fourth-order dissipation
//---------------------------------------------------------------------
for (m = 0; m < 5; m++) {
j = 1;
forcing[m][k][j][i] = forcing[m][k][j][i] - dssp *
(5.0*ue[j][m] - 4.0*ue[j+1][m] +ue[j+2][m]);
j = 2;
forcing[m][k][j][i] = forcing[m][k][j][i] - dssp *
(-4.0*ue[j-1][m] + 6.0*ue[j][m] -
4.0*ue[j+1][m] + ue[j+2][m]);
}
for (j = 3; j <= grid_points[1]-4; j++) {
for (m = 0; m < 5; m++) {
forcing[m][k][j][i] = forcing[m][k][j][i] - dssp*
(ue[j-2][m] - 4.0*ue[j-1][m] +
6.0*ue[j][m] - 4.0*ue[j+1][m] + ue[j+2][m]);
}
}
for (m = 0; m < 5; m++) {
j = grid_points[1]-3;
forcing[m][k][j][i] = forcing[m][k][j][i] - dssp *
(ue[j-2][m] - 4.0*ue[j-1][m] +
6.0*ue[j][m] - 4.0*ue[j+1][m]);
j = grid_points[1]-2;
forcing[m][k][j][i] = forcing[m][k][j][i] - dssp *
(ue[j-2][m] - 4.0*ue[j-1][m] + 5.0*ue[j][m]);
}
}
}
//---------------------------------------------------------------------
// zeta-direction flux differences
//---------------------------------------------------------------------
for (j = 1; j <= grid_points[1]-2; j++) {
eta = (double)j * dnym1;
for (i = 1; i <= grid_points[0]-2; i++) {
xi = (double)i * dnxm1;
for (k = 0; k <= grid_points[2]-1; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++) {
ue[k][m] = dtemp[m];
}
dtpp = 1.0/dtemp[0];
for (m = 1; m < 5; m++) {
buf[k][m] = dtpp * dtemp[m];
}
cuf[k] = buf[k][3] * buf[k][3];
buf[k][0] = cuf[k] + buf[k][1] * buf[k][1] + buf[k][2] * buf[k][2];
q[k] = 0.5*(buf[k][1]*ue[k][1] + buf[k][2]*ue[k][2] +
buf[k][3]*ue[k][3]);
}
for (k = 1; k <= grid_points[2]-2; k++) {
km1 = k-1;
kp1 = k+1;
forcing[0][k][j][i] = forcing[0][k][j][i] -
tz2*( ue[kp1][3]-ue[km1][3] )+
dz1tz1*(ue[kp1][0]-2.0*ue[k][0]+ue[km1][0]);
forcing[1][k][j][i] = forcing[1][k][j][i] - tz2 * (
ue[kp1][1]*buf[kp1][3]-ue[km1][1]*buf[km1][3])+
zzcon2*(buf[kp1][1]-2.0*buf[k][1]+buf[km1][1])+
dz2tz1*( ue[kp1][1]-2.0* ue[k][1]+ ue[km1][1]);
forcing[2][k][j][i] = forcing[2][k][j][i] - tz2 * (
ue[kp1][2]*buf[kp1][3]-ue[km1][2]*buf[km1][3])+
zzcon2*(buf[kp1][2]-2.0*buf[k][2]+buf[km1][2])+
dz3tz1*(ue[kp1][2]-2.0*ue[k][2]+ue[km1][2]);
forcing[3][k][j][i] = forcing[3][k][j][i] - tz2 * (
(ue[kp1][3]*buf[kp1][3]+c2*(ue[kp1][4]-q[kp1]))-
(ue[km1][3]*buf[km1][3]+c2*(ue[km1][4]-q[km1])))+
zzcon1*(buf[kp1][3]-2.0*buf[k][3]+buf[km1][3])+
dz4tz1*( ue[kp1][3]-2.0*ue[k][3] +ue[km1][3]);
forcing[4][k][j][i] = forcing[4][k][j][i] - tz2 * (
buf[kp1][3]*(c1*ue[kp1][4]-c2*q[kp1])-
buf[km1][3]*(c1*ue[km1][4]-c2*q[km1]))+
0.5*zzcon3*(buf[kp1][0]-2.0*buf[k][0]+buf[km1][0])+
zzcon4*(cuf[kp1]-2.0*cuf[k]+cuf[km1])+
zzcon5*(buf[kp1][4]-2.0*buf[k][4]+buf[km1][4])+
dz5tz1*( ue[kp1][4]-2.0*ue[k][4]+ ue[km1][4]);
}
//---------------------------------------------------------------------
// Fourth-order dissipation
//---------------------------------------------------------------------
for (m = 0; m < 5; m++) {
k = 1;
forcing[m][k][j][i] = forcing[m][k][j][i] - dssp *
(5.0*ue[k][m] - 4.0*ue[k+1][m] +ue[k+2][m]);
k = 2;
forcing[m][k][j][i] = forcing[m][k][j][i] - dssp *
(-4.0*ue[k-1][m] + 6.0*ue[k][m] -
4.0*ue[k+1][m] + ue[k+2][m]);
}
for (k = 3; k <= grid_points[2]-4; k++) {
for (m = 0; m < 5; m++) {
forcing[m][k][j][i] = forcing[m][k][j][i] - dssp*
(ue[k-2][m] - 4.0*ue[k-1][m] +
6.0*ue[k][m] - 4.0*ue[k+1][m] + ue[k+2][m]);
}
}
for (m = 0; m < 5; m++) {
k = grid_points[2]-3;
forcing[m][k][j][i] = forcing[m][k][j][i] - dssp *
(ue[k-2][m] - 4.0*ue[k-1][m] +
6.0*ue[k][m] - 4.0*ue[k+1][m]);
k = grid_points[2]-2;
forcing[m][k][j][i] = forcing[m][k][j][i] - dssp *
(ue[k-2][m] - 4.0*ue[k-1][m] + 5.0*ue[k][m]);
}
}
}
//---------------------------------------------------------------------
// now change the sign of the forcing function,
//---------------------------------------------------------------------
for (k = 1; k <= grid_points[2]-2; k++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (i = 1; i <= grid_points[0]-2; i++) {
for (m = 0; m < 5; m++) {
forcing[m][k][j][i] = -1.0 * forcing[m][k][j][i];
}
}
}
}
#pragma omp target update to(forcing)
brisbane_task task0;
brisbane_task_create(&task0);
brisbane_task_h2d_full(task0, mem_forcing, forcing);
brisbane_task_submit(task0, brisbane_default, NULL, true);
}
|
kondoimpti.h | /*
Copyright (c) 2011, Florian Goth
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the <organization> nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY <copyright holder> ''AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <copyright holder> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef KONDOIMPTI_H
#define KONDOIMPTI_H
#include <complex>
#include <fstream>
#include "Greensfunction.h"
#include "Vertex.h"
#include "MTL/Matrix.h"
#include <limits>
#include "MTL/CommonTypes.h"
/**
some class for the Kondo Impurity in a 2D TI bath
*/
template <typename FPType>
struct GOmegaData
{
FPType lambda;
std::complex<FPType> u;
std::complex<FPType>* evec;
};
template<typename FPType_ = float>
class G_TI_Omega
{
public:
typedef FPType_ FPType;
typedef std::complex<FPType> RetType;
typedef RetType value_type;
enum {
has_real_FourierTransform = false
};
G_TI_Omega(const GOmegaData<FPType> *const mydat, uint l, uint ks, FPType beta, FPType v_, FPType ed_) : data(mydat), v(v_), ed(ed_), betaoverpi(beta / M_PI), ksize(ks), len(l)
{
gomegaplus = new RetType[omega_max];
gomegaminus = new RetType[omega_max];
#pragma omp parallel for
for (uint m = 0; m < omega_max; ++m)
{
std::complex<FPType> omegam = std::complex<FPType>(0.0, (2*m+1)*M_PI/beta);
RetType sumplus = 0.0;
RetType summinus = 0.0;
for (uint k = 0; k < len; ++k)
for (uint n = 0; n < ksize; ++n)
{
sumplus += norm(data[k*ksize + n].u)/(omegam - data[k*ksize + n].lambda);
summinus += norm(data[k*ksize + n].u)/(-omegam - data[k*ksize + n].lambda);
}
sumplus /= len;
summinus /= len;
gomegaplus[m] = 1.0/(-omegam - ed - v*v*conj(sumplus));
gomegaminus[m] = 1.0/(omegam - ed - v*v*conj(summinus));
}
return;
}
inline RetType operator()(FPType omegan) const
{
RetType* g = gomegaplus;
if (omegan < 0)
{
omegan = -omegan;
g = gomegaminus;
}
long int idx = lround(omegan*betaoverpi);
--idx;
idx/=2;
if (idx < omega_max)
return g[idx];
else return RetType (0.0, 1.0/omegan);
/* if(omegan > 0)//Wide Band Limit
return static_cast<FPType>(1.0)/std::complex<FPType>(-ed, -omegan - 0.75*0.75/4.0*M_PI);
else
return static_cast<FPType>(1.0)/std::complex<FPType>(-ed, -omegan + 0.75*0.75/4.0*M_PI);*/
}
inline RetType operator()(int omegan) const
{
RetType* g = gomegaplus;
if (omegan < 0)
{
omegan = -omegan-1;
g = gomegaminus;
}
if (omegan < static_cast<int>(omega_max))
return g[omegan];
else return RetType (0.0, 1.0/((2*omegan + 1)/betaoverpi));
}
~G_TI_Omega()
{
delete [] gomegaplus;
delete [] gomegaminus;
for (uint k = 0; k < len; ++k)
for (uint n = 0; n < ksize; ++n)
delete [] data[k*ksize + n].evec;
delete [] data;
}
uint kpoints() const {return ksize;}
const GOmegaData<FPType> *const data;
private:
#if defined(__GXX_EXPERIMENTAL_CXX0X__) && (GCC_VERSION > GCC_VER(4,5,0))
static constexpr uint omega_max = 40000;
#else
static const uint omega_max = 40000;
#endif
FPType v;
FPType ed;
RetType* gomegaplus;
RetType* gomegaminus;
FPType betaoverpi;
uint ksize;
uint len;
};
template<typename FPType = float>
class KondoImpTI
{
public:
enum {timeevolution = 0,
has_Spin_symmetry = false,
has_Giomega = true,
Is_Impurity_model = true,
has_TRS = true,
};
typedef Basic_Vertex<FPType> Vertex;
typedef std::complex<FPType> FreeGreensFunctionReturnValueType;///< a typedef of the data type of the free greensfunction
typedef G_TI_Omega<FPType> GOmega;
typedef typename GOmega::RetType GOmegaRetType;
/**
This evaluates the value of the free particle Greens-function for the two given vertices
@param v1 the first vertex
@param v2 the second vertex
@return the value of the free greensfunction evaluated with the given vertices
*/
static inline FreeGreensFunctionReturnValueType eval(const Vertex& v1, const Vertex& v2) throw();
/**
A function for initializing the tables that make up the Greensfunction. the necessary parameters are read from the parameter structs.
*/
template <class CFG>
static inline void init(CFG&);
/**
This function frees the memory used by the tables for the Greensfunction
*/
static inline void tidyup();
/**
To access the nr of atoms.(In the SIAM this is mostly for compatability)
@return the nr of atoms in the chain
*/
static inline unsigned int getLen() throw()
{
return 1;
}
/**
Get the length of the interactioninterval.
@return the inverse temperature beta
*/
static FPType getContourLen() throw()
{
return beta;
}
/**
The free particle Greens function in Matsubara frequencies
*/
inline static typename G_SIAM_Omega<FPType>::RetType gomega(FPType om, Vertex& v1, Vertex& v2) throw()
{
if (v1.spin != v2.spin) return 0.0;
else
return v1.spin == UP ? (*gomegaup)(om) : (*gomegadown)(om);
}
static G_TI_Omega<FPType>* gomegaup;
static G_TI_Omega<FPType>* gomegadown;
static FPType gethybridization() {return v;}
private:
static FreeGreensFunctionReturnValueType** g;///< the free particle greensfunction
static unsigned int slices;///< the number of timeslices. this means the resolution of the free greensfunction in tau space
static FPType betaslice;///<the length of one timeslice on the tau axis
static FPType beta;///<the inverse temperature beta
static FPType ed;///< dot-level
static FPType my;///< chemical potential
static FPType v;///< hybridization
};
template<typename FPType>
typename KondoImpTI<FPType>::FreeGreensFunctionReturnValueType** KondoImpTI<FPType>::g = NULL;
template <typename FPType>
G_TI_Omega<FPType>* KondoImpTI<FPType>::gomegaup = NULL;
template <typename FPType>
G_TI_Omega<FPType>* KondoImpTI<FPType>::gomegadown = NULL;
template<typename FPType>
FPType KondoImpTI<FPType>::betaslice;
template<typename FPType>
FPType KondoImpTI<FPType>::v;
template<typename FPType>
FPType KondoImpTI<FPType>::ed;
template<typename FPType>
FPType KondoImpTI<FPType>::beta;
template<typename FPType>
unsigned int KondoImpTI<FPType>::slices;
template <typename FPType>
void KondoImpTI<FPType>::tidyup()
{
delete [] g[0];
delete [] g[1];
delete [] g;
delete gomegaup;
delete gomegadown;
}
template <typename FPType>
template <class CFG>
void KondoImpTI<FPType>::init(CFG& curparams)
{
v = curparams.V;
ed = curparams.ed;
uint Nx = curparams.Nx;
uint Nb = curparams.Nb;
uint bsize = 2;
uint ksize = Nb * bsize;
uint size = Nx * ksize;
FPType lambda = curparams.lambda;
beta = curparams.beta;
slices = 10000;//Number of TimeSlices
const unsigned int slicesp = slices + 1;
betaslice = beta / static_cast<FPType>(slices);
g = new FreeGreensFunctionReturnValueType*[2];
G_TI_Omega<FPType>* gomegaarr[2];
std::ofstream summen("summen.txt");
std::ofstream gdownfile("gdown.txt");
std::ofstream gupfile("gup.txt");
std::ofstream energies("energies.txt");
for (int s = 0; s < 2; ++s)
{
GOmegaData<FPType>* data = new GOmegaData<FPType>[Nx*ksize];
g[s] = new FreeGreensFunctionReturnValueType[slicesp];
int sigma = 2*s - 1;
for (uint kidx = 0; kidx < Nx; ++kidx)
{
FPType k = kidx*2.0*M_PI/Nx;
summen<<k<<std::endl;
std::complex<FPType> expkhalf = std::exp(std::complex<FPType>(0.0, kidx*M_PI/Nx));
std::complex<FPType> expk = exp(std::complex<FPType>(0.0, k));
std::complex<FPType> cexpk = conj(expk);
FPType ls = -2.0 *lambda*sigma;
FPType sinklambda = imag(expk)* ls;
FPType cosfack = 2.0*std::real(expkhalf)/*cos(kidx*M_PI/Nx)*/;
FPType sinfack = std::imag(expkhalf);
std::complex<FPType> lambdaentry = - sinfack * conj(expkhalf)*ls;
std::complex<FPType> clambdaentry = conj(lambdaentry);
std::complex<FPType> entry = cosfack*expkhalf;//= 1 + expk
std::complex<FPType> centry = conj(entry);//cosfack*cexpk;// = 1 + cexpk
CmplxMat hzero_k_sigma(ksize, ksize, MTLICCL::Identity<FPType>(0.0, 0.0));
// CmplxMat hlambda_k_sigma(ksize, ksize, MTLICCL::Identity<FPType>(0.0, 0.0));
for (uint b = 0; b < Nb; ++b)
{
// hzero_k_sigma(b*bsize + 0, b *bsize + 0) = 0.1;
// hzero_k_sigma(b*bsize + 1, b *bsize + 1) = 0.1;
hzero_k_sigma(b*bsize + 1, b *bsize + 0) = -centry;
hzero_k_sigma(b*bsize + 0, b *bsize + 1) = -entry;
if (b != 0)
{
hzero_k_sigma((b-1)*bsize + 1, b *bsize + 0) = -cexpk;
hzero_k_sigma(b*bsize + 0, (b-1) *bsize + 1) = -expk;
//Now the Spin Orbit parts
hzero_k_sigma((b-1)*bsize + 0, b *bsize + 0) = lambdaentry;
hzero_k_sigma(b*bsize + 0, (b-1) *bsize + 0) = clambdaentry;
hzero_k_sigma((b-1)*bsize + 1, b *bsize + 1) = -lambdaentry;
hzero_k_sigma(b*bsize + 1, (b-1) *bsize + 1) = -clambdaentry;
}
hzero_k_sigma(b * bsize + 0, b * bsize + 0) = sinklambda;
hzero_k_sigma(b * bsize + 1, b * bsize + 1) = -sinklambda;
}
// std::cout<<(std::complex<double>(lambda*sigma,0.0)*hlambda_k_sigma-hzero_k_sigma)<<std::endl;
// CmplxMat fullmat(/*std::complex<double>(lambda*sigma,0.0)*hlambda_k_sigma -*/ hzero_k_sigma);
MTLICCL::EigenSystemSolver<CmplxMat> ess2(hzero_k_sigma);
ess2.calculateEVs();
CmplxMat zcplx(ess2.tridiagonalize());
std::vector<MTLICCL::SolutionPair<double> > esystemcplx = ess2.eigensystem();
for (uint n = 0; n < ksize; ++n)
{
data[kidx * ksize + n].lambda = esystemcplx[n].evalue;
data[kidx * ksize + n].evec = new std::complex<FPType>[ksize];
energies<<k<<" "<<esystemcplx[n].evalue<<std::endl;
data[kidx * ksize + n].u = 0.0;
for (uint q = 0; q < ksize; ++q)
{
data[kidx * ksize + n].u += zcplx(0, q)*esystemcplx[n].evector[q];
}
for(uint i = 0; i < ksize; ++i)
for(uint q = 0; q < ksize; ++q)
{
data[kidx * ksize + n].evec[i] += zcplx(i, q)*esystemcplx[n].evector[q];
}
}
CmplxMat evscplx(zcplx.Rows(), zcplx.Rows());
for (uint i = 0; i < zcplx.Rows(); ++i)
for (uint j = 0; j < zcplx.Rows(); ++j)
{
evscplx(j, i) = esystemcplx[i].evector[j];
}
// std::cout<<zcplx*evscplx<<std::endl;
CmplxMat zhc(~(zcplx*evscplx));
for (uint j = 0; j < zhc.Rows(); ++j)
for (uint i = 0; i < zhc.Columns(); ++i)
zhc(j,i) = conj(zhc(j,i));
CmplxMat id(zhc*(zcplx*evscplx));
for (uint j = 0; j < id.Rows(); ++j)
{
double sum = 0.0;
for (uint i = 0; i < id.Rows(); ++i) {
if (i!=j)
sum += abs(id(i,j));
}
std::cout.precision(12);
summen<<"Zeilensumme "<<j<<" : "<<std::scientific<<sum<<" + "<<std::abs(id(j,j))<<std::endl;
}
}
energies<<"&"<<std::endl;
std::cout<<"finished k-sum"<<std::endl;
gomegaarr[s] = new G_TI_Omega<FPType>(data, Nx, ksize, beta, curparams.V, curparams.ed);
std::cout<<"finished gomega"<<std::endl;
matsubarafouriertransform(*(gomegaarr[s]), g[s], beta, betaslice, slicesp, slices);
for (uint k = 0; k < slicesp; ++k)
(s == 0? gdownfile: gupfile)<<k*betaslice<<" "<<std::scientific<<real(g[s][k])<<std::endl;
// exit(-1);
}
gomegadown = gomegaarr[0];
gomegaup = gomegaarr[1];
return;
}
template<typename FPType>
typename KondoImpTI<FPType>::FreeGreensFunctionReturnValueType KondoImpTI<FPType>::eval(const Vertex& v1, const Vertex& v2) throw()
{
const FPType tiny = std::numeric_limits<FPType>::epsilon();
//determine the Differences between the two
FPType delta_tau = v1.tau - v2.tau;
if (v1.spin != v2.spin) return 0.0;
const FreeGreensFunctionReturnValueType *const glocal = (v1.spin == UP ? g[1] : g[0]);
//Take care of negative values
uint signchanges = 0;
while (delta_tau < 0)
{
delta_tau += beta;
++signchanges;
}
while (delta_tau > beta)
{
delta_tau -= beta;
++signchanges;
}
FPType sign = (signchanges & 1? -1.0 : 1.0);
if (std::abs(delta_tau) < tiny)
{
//return only the particle number
return sign*glocal[0];
}
if (std::abs(delta_tau - beta) < tiny)
return sign*glocal[slices];
FPType fptau_idx0;
FPType rem = std::modf(delta_tau/betaslice, &fptau_idx0);//round to the smaller index and determine how far we're of
long int tau_idx0 = lround(fptau_idx0);
return sign * glocal[tau_idx0];
// return lerp(rem, glocal[tau_idx0], glocal[tau_idx0 + 1]) * sign;//return the value of the greensfunction
}
#endif // KONDOIMPTI_H
|
GB_binop__bshift_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bshift_uint8
// A.*B function (eWiseMult): GB_AemultB__bshift_uint8
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__bshift_uint8
// C+=b function (dense accum): GB_Cdense_accumb__bshift_uint8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bshift_uint8
// C=scalar+B GB_bind1st__bshift_uint8
// C=scalar+B' GB_bind1st_tran__bshift_uint8
// C=A+scalar GB_bind2nd__bshift_uint8
// C=A'+scalar GB_bind2nd_tran__bshift_uint8
// C type: uint8_t
// A type: uint8_t
// B,b type: int8_t
// BinaryOp: cij = GB_bitshift_uint8 (aij, bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
0
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = GB_bitshift_uint8 (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSHIFT || GxB_NO_UINT8 || GxB_NO_BSHIFT_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bshift_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bshift_uint8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bshift_uint8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__bshift_uint8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bshift_uint8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bshift_uint8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t bij = Bx [p] ;
Cx [p] = GB_bitshift_uint8 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bshift_uint8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
Cx [p] = GB_bitshift_uint8 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = GB_bitshift_uint8 (x, aij) ; \
}
GrB_Info GB_bind1st_tran__bshift_uint8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = GB_bitshift_uint8 (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__bshift_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
x_solve.c | //-------------------------------------------------------------------------//
// //
// This benchmark is an OpenMP C version of the NPB BT code. This OpenMP //
// C version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the OpenMP Fortran versions in //
// "NPB3.3-OMP" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this OpenMP C version to //
// cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include "header.h"
#include "work_lhs.h"
#include "timers.h"
//---------------------------------------------------------------------
//
// Performs line solves in X direction by first factoring
// the block-tridiagonal matrix into an upper triangular matrix,
// and then performing back substitution to solve for the unknow
// vectors of each line.
//
// Make sure we treat elements zero to cell_size in the direction
// of the sweep.
//
//---------------------------------------------------------------------
void x_solve()
{
int i, j, k, m, n, isize;
//---------------------------------------------------------------------
//---------------------------------------------------------------------
if (timeron) timer_start(t_xsolve);
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// This function computes the left hand side in the xi-direction
//---------------------------------------------------------------------
isize = grid_points[0]-1;
//---------------------------------------------------------------------
// determine a (labeled f) and n jacobians
//---------------------------------------------------------------------
#pragma scop
#pragma omp parallel for default(shared) shared(isize) private(i,j,k,m,n)
for (k = 1; k <= grid_points[2]-2; k++) {
for (j = 1; j <= grid_points[1]-2; j++) {
for (i = 0; i <= isize; i++) {
tmp1 = rho_i[k][j][i];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
//-------------------------------------------------------------------
//
//-------------------------------------------------------------------
fjac[i][0][0] = 0.0;
fjac[i][1][0] = 1.0;
fjac[i][2][0] = 0.0;
fjac[i][3][0] = 0.0;
fjac[i][4][0] = 0.0;
fjac[i][0][1] = -(u[k][j][i][1] * tmp2 * u[k][j][i][1])
+ c2 * qs[k][j][i];
fjac[i][1][1] = ( 2.0 - c2 ) * ( u[k][j][i][1] / u[k][j][i][0] );
fjac[i][2][1] = - c2 * ( u[k][j][i][2] * tmp1 );
fjac[i][3][1] = - c2 * ( u[k][j][i][3] * tmp1 );
fjac[i][4][1] = c2;
fjac[i][0][2] = - ( u[k][j][i][1]*u[k][j][i][2] ) * tmp2;
fjac[i][1][2] = u[k][j][i][2] * tmp1;
fjac[i][2][2] = u[k][j][i][1] * tmp1;
fjac[i][3][2] = 0.0;
fjac[i][4][2] = 0.0;
fjac[i][0][3] = - ( u[k][j][i][1]*u[k][j][i][3] ) * tmp2;
fjac[i][1][3] = u[k][j][i][3] * tmp1;
fjac[i][2][3] = 0.0;
fjac[i][3][3] = u[k][j][i][1] * tmp1;
fjac[i][4][3] = 0.0;
fjac[i][0][4] = ( c2 * 2.0 * square[k][j][i] - c1 * u[k][j][i][4] )
* ( u[k][j][i][1] * tmp2 );
fjac[i][1][4] = c1 * u[k][j][i][4] * tmp1
- c2 * ( u[k][j][i][1]*u[k][j][i][1] * tmp2 + qs[k][j][i] );
fjac[i][2][4] = - c2 * ( u[k][j][i][2]*u[k][j][i][1] ) * tmp2;
fjac[i][3][4] = - c2 * ( u[k][j][i][3]*u[k][j][i][1] ) * tmp2;
fjac[i][4][4] = c1 * ( u[k][j][i][1] * tmp1 );
njac[i][0][0] = 0.0;
njac[i][1][0] = 0.0;
njac[i][2][0] = 0.0;
njac[i][3][0] = 0.0;
njac[i][4][0] = 0.0;
njac[i][0][1] = - con43 * c3c4 * tmp2 * u[k][j][i][1];
njac[i][1][1] = con43 * c3c4 * tmp1;
njac[i][2][1] = 0.0;
njac[i][3][1] = 0.0;
njac[i][4][1] = 0.0;
njac[i][0][2] = - c3c4 * tmp2 * u[k][j][i][2];
njac[i][1][2] = 0.0;
njac[i][2][2] = c3c4 * tmp1;
njac[i][3][2] = 0.0;
njac[i][4][2] = 0.0;
njac[i][0][3] = - c3c4 * tmp2 * u[k][j][i][3];
njac[i][1][3] = 0.0;
njac[i][2][3] = 0.0;
njac[i][3][3] = c3c4 * tmp1;
njac[i][4][3] = 0.0;
njac[i][0][4] = - ( con43 * c3c4
- c1345 ) * tmp3 * (u[k][j][i][1]*u[k][j][i][1])
- ( c3c4 - c1345 ) * tmp3 * (u[k][j][i][2]*u[k][j][i][2])
- ( c3c4 - c1345 ) * tmp3 * (u[k][j][i][3]*u[k][j][i][3])
- c1345 * tmp2 * u[k][j][i][4];
njac[i][1][4] = ( con43 * c3c4
- c1345 ) * tmp2 * u[k][j][i][1];
njac[i][2][4] = ( c3c4 - c1345 ) * tmp2 * u[k][j][i][2];
njac[i][3][4] = ( c3c4 - c1345 ) * tmp2 * u[k][j][i][3];
njac[i][4][4] = ( c1345 ) * tmp1;
}
//---------------------------------------------------------------------
// now jacobians set, so form left hand side in x direction
//---------------------------------------------------------------------
lhsinit(lhs, isize);
for (i = 1; i <= isize-1; i++) {
tmp1 = dt * tx1;
tmp2 = dt * tx2;
lhs[i][AA][0][0] = - tmp2 * fjac[i-1][0][0]
- tmp1 * njac[i-1][0][0]
- tmp1 * dx1;
lhs[i][AA][1][0] = - tmp2 * fjac[i-1][1][0]
- tmp1 * njac[i-1][1][0];
lhs[i][AA][2][0] = - tmp2 * fjac[i-1][2][0]
- tmp1 * njac[i-1][2][0];
lhs[i][AA][3][0] = - tmp2 * fjac[i-1][3][0]
- tmp1 * njac[i-1][3][0];
lhs[i][AA][4][0] = - tmp2 * fjac[i-1][4][0]
- tmp1 * njac[i-1][4][0];
lhs[i][AA][0][1] = - tmp2 * fjac[i-1][0][1]
- tmp1 * njac[i-1][0][1];
lhs[i][AA][1][1] = - tmp2 * fjac[i-1][1][1]
- tmp1 * njac[i-1][1][1]
- tmp1 * dx2;
lhs[i][AA][2][1] = - tmp2 * fjac[i-1][2][1]
- tmp1 * njac[i-1][2][1];
lhs[i][AA][3][1] = - tmp2 * fjac[i-1][3][1]
- tmp1 * njac[i-1][3][1];
lhs[i][AA][4][1] = - tmp2 * fjac[i-1][4][1]
- tmp1 * njac[i-1][4][1];
lhs[i][AA][0][2] = - tmp2 * fjac[i-1][0][2]
- tmp1 * njac[i-1][0][2];
lhs[i][AA][1][2] = - tmp2 * fjac[i-1][1][2]
- tmp1 * njac[i-1][1][2];
lhs[i][AA][2][2] = - tmp2 * fjac[i-1][2][2]
- tmp1 * njac[i-1][2][2]
- tmp1 * dx3;
lhs[i][AA][3][2] = - tmp2 * fjac[i-1][3][2]
- tmp1 * njac[i-1][3][2];
lhs[i][AA][4][2] = - tmp2 * fjac[i-1][4][2]
- tmp1 * njac[i-1][4][2];
lhs[i][AA][0][3] = - tmp2 * fjac[i-1][0][3]
- tmp1 * njac[i-1][0][3];
lhs[i][AA][1][3] = - tmp2 * fjac[i-1][1][3]
- tmp1 * njac[i-1][1][3];
lhs[i][AA][2][3] = - tmp2 * fjac[i-1][2][3]
- tmp1 * njac[i-1][2][3];
lhs[i][AA][3][3] = - tmp2 * fjac[i-1][3][3]
- tmp1 * njac[i-1][3][3]
- tmp1 * dx4;
lhs[i][AA][4][3] = - tmp2 * fjac[i-1][4][3]
- tmp1 * njac[i-1][4][3];
lhs[i][AA][0][4] = - tmp2 * fjac[i-1][0][4]
- tmp1 * njac[i-1][0][4];
lhs[i][AA][1][4] = - tmp2 * fjac[i-1][1][4]
- tmp1 * njac[i-1][1][4];
lhs[i][AA][2][4] = - tmp2 * fjac[i-1][2][4]
- tmp1 * njac[i-1][2][4];
lhs[i][AA][3][4] = - tmp2 * fjac[i-1][3][4]
- tmp1 * njac[i-1][3][4];
lhs[i][AA][4][4] = - tmp2 * fjac[i-1][4][4]
- tmp1 * njac[i-1][4][4]
- tmp1 * dx5;
lhs[i][BB][0][0] = 1.0
+ tmp1 * 2.0 * njac[i][0][0]
+ tmp1 * 2.0 * dx1;
lhs[i][BB][1][0] = tmp1 * 2.0 * njac[i][1][0];
lhs[i][BB][2][0] = tmp1 * 2.0 * njac[i][2][0];
lhs[i][BB][3][0] = tmp1 * 2.0 * njac[i][3][0];
lhs[i][BB][4][0] = tmp1 * 2.0 * njac[i][4][0];
lhs[i][BB][0][1] = tmp1 * 2.0 * njac[i][0][1];
lhs[i][BB][1][1] = 1.0
+ tmp1 * 2.0 * njac[i][1][1]
+ tmp1 * 2.0 * dx2;
lhs[i][BB][2][1] = tmp1 * 2.0 * njac[i][2][1];
lhs[i][BB][3][1] = tmp1 * 2.0 * njac[i][3][1];
lhs[i][BB][4][1] = tmp1 * 2.0 * njac[i][4][1];
lhs[i][BB][0][2] = tmp1 * 2.0 * njac[i][0][2];
lhs[i][BB][1][2] = tmp1 * 2.0 * njac[i][1][2];
lhs[i][BB][2][2] = 1.0
+ tmp1 * 2.0 * njac[i][2][2]
+ tmp1 * 2.0 * dx3;
lhs[i][BB][3][2] = tmp1 * 2.0 * njac[i][3][2];
lhs[i][BB][4][2] = tmp1 * 2.0 * njac[i][4][2];
lhs[i][BB][0][3] = tmp1 * 2.0 * njac[i][0][3];
lhs[i][BB][1][3] = tmp1 * 2.0 * njac[i][1][3];
lhs[i][BB][2][3] = tmp1 * 2.0 * njac[i][2][3];
lhs[i][BB][3][3] = 1.0
+ tmp1 * 2.0 * njac[i][3][3]
+ tmp1 * 2.0 * dx4;
lhs[i][BB][4][3] = tmp1 * 2.0 * njac[i][4][3];
lhs[i][BB][0][4] = tmp1 * 2.0 * njac[i][0][4];
lhs[i][BB][1][4] = tmp1 * 2.0 * njac[i][1][4];
lhs[i][BB][2][4] = tmp1 * 2.0 * njac[i][2][4];
lhs[i][BB][3][4] = tmp1 * 2.0 * njac[i][3][4];
lhs[i][BB][4][4] = 1.0
+ tmp1 * 2.0 * njac[i][4][4]
+ tmp1 * 2.0 * dx5;
lhs[i][CC][0][0] = tmp2 * fjac[i+1][0][0]
- tmp1 * njac[i+1][0][0]
- tmp1 * dx1;
lhs[i][CC][1][0] = tmp2 * fjac[i+1][1][0]
- tmp1 * njac[i+1][1][0];
lhs[i][CC][2][0] = tmp2 * fjac[i+1][2][0]
- tmp1 * njac[i+1][2][0];
lhs[i][CC][3][0] = tmp2 * fjac[i+1][3][0]
- tmp1 * njac[i+1][3][0];
lhs[i][CC][4][0] = tmp2 * fjac[i+1][4][0]
- tmp1 * njac[i+1][4][0];
lhs[i][CC][0][1] = tmp2 * fjac[i+1][0][1]
- tmp1 * njac[i+1][0][1];
lhs[i][CC][1][1] = tmp2 * fjac[i+1][1][1]
- tmp1 * njac[i+1][1][1]
- tmp1 * dx2;
lhs[i][CC][2][1] = tmp2 * fjac[i+1][2][1]
- tmp1 * njac[i+1][2][1];
lhs[i][CC][3][1] = tmp2 * fjac[i+1][3][1]
- tmp1 * njac[i+1][3][1];
lhs[i][CC][4][1] = tmp2 * fjac[i+1][4][1]
- tmp1 * njac[i+1][4][1];
lhs[i][CC][0][2] = tmp2 * fjac[i+1][0][2]
- tmp1 * njac[i+1][0][2];
lhs[i][CC][1][2] = tmp2 * fjac[i+1][1][2]
- tmp1 * njac[i+1][1][2];
lhs[i][CC][2][2] = tmp2 * fjac[i+1][2][2]
- tmp1 * njac[i+1][2][2]
- tmp1 * dx3;
lhs[i][CC][3][2] = tmp2 * fjac[i+1][3][2]
- tmp1 * njac[i+1][3][2];
lhs[i][CC][4][2] = tmp2 * fjac[i+1][4][2]
- tmp1 * njac[i+1][4][2];
lhs[i][CC][0][3] = tmp2 * fjac[i+1][0][3]
- tmp1 * njac[i+1][0][3];
lhs[i][CC][1][3] = tmp2 * fjac[i+1][1][3]
- tmp1 * njac[i+1][1][3];
lhs[i][CC][2][3] = tmp2 * fjac[i+1][2][3]
- tmp1 * njac[i+1][2][3];
lhs[i][CC][3][3] = tmp2 * fjac[i+1][3][3]
- tmp1 * njac[i+1][3][3]
- tmp1 * dx4;
lhs[i][CC][4][3] = tmp2 * fjac[i+1][4][3]
- tmp1 * njac[i+1][4][3];
lhs[i][CC][0][4] = tmp2 * fjac[i+1][0][4]
- tmp1 * njac[i+1][0][4];
lhs[i][CC][1][4] = tmp2 * fjac[i+1][1][4]
- tmp1 * njac[i+1][1][4];
lhs[i][CC][2][4] = tmp2 * fjac[i+1][2][4]
- tmp1 * njac[i+1][2][4];
lhs[i][CC][3][4] = tmp2 * fjac[i+1][3][4]
- tmp1 * njac[i+1][3][4];
lhs[i][CC][4][4] = tmp2 * fjac[i+1][4][4]
- tmp1 * njac[i+1][4][4]
- tmp1 * dx5;
}
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// performs guaussian elimination on this cell.
//
// assumes that unpacking routines for non-first cells
// preload C' and rhs' from previous cell.
//
// assumed send happens outside this routine, but that
// c'(IMAX) and rhs'(IMAX) will be sent to next cell
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// outer most do loops - sweeping in i direction
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// multiply c[k][j][0] by b_inverse and copy back to c
// multiply rhs(0) by b_inverse(0) and copy to rhs
//---------------------------------------------------------------------
binvcrhs( lhs[0][BB], lhs[0][CC], rhs[k][j][0] );
//---------------------------------------------------------------------
// begin inner most do loop
// do all the elements of the cell unless last
//---------------------------------------------------------------------
for (i = 1; i <= isize-1; i++) {
//-------------------------------------------------------------------
// rhs(i) = rhs(i) - A*rhs(i-1)
//-------------------------------------------------------------------
matvec_sub(lhs[i][AA], rhs[k][j][i-1], rhs[k][j][i]);
//-------------------------------------------------------------------
// B(i) = B(i) - C(i-1)*A(i)
//-------------------------------------------------------------------
matmul_sub(lhs[i][AA], lhs[i-1][CC], lhs[i][BB]);
//-------------------------------------------------------------------
// multiply c[k][j][i] by b_inverse and copy back to c
// multiply rhs[k][j][0] by b_inverse[k][j][0] and copy to rhs
//-------------------------------------------------------------------
binvcrhs( lhs[i][BB], lhs[i][CC], rhs[k][j][i] );
}
//---------------------------------------------------------------------
// rhs(isize) = rhs(isize) - A*rhs(isize-1)
//---------------------------------------------------------------------
matvec_sub(lhs[isize][AA], rhs[k][j][isize-1], rhs[k][j][isize]);
//---------------------------------------------------------------------
// B(isize) = B(isize) - C(isize-1)*A(isize)
//---------------------------------------------------------------------
matmul_sub(lhs[isize][AA], lhs[isize-1][CC], lhs[isize][BB]);
//---------------------------------------------------------------------
// multiply rhs() by b_inverse() and copy to rhs
//---------------------------------------------------------------------
binvrhs( lhs[isize][BB], rhs[k][j][isize] );
//---------------------------------------------------------------------
// back solve: if last cell, then generate U(isize)=rhs(isize)
// else assume U(isize) is loaded in un pack backsub_info
// so just use it
// after u(istart) will be sent to next cell
//---------------------------------------------------------------------
for (i = isize-1; i >=0; i--) {
for (m = 0; m < BLOCK_SIZE; m++) {
for (n = 0; n < BLOCK_SIZE; n++) {
rhs[k][j][i][m] = rhs[k][j][i][m]
- lhs[i][CC][n][m]*rhs[k][j][i+1][n];
}
}
}
}
}
#pragma endscop
if (timeron) timer_stop(t_xsolve);
}
|
hw_1.c | #include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <omp.h>
#define PI 3.141592653589793
#define FLOAT double
#define BETA 1.0
void initialize(FLOAT *x, FLOAT *v, int N);
FLOAT F(FLOAT x, FLOAT x_before, FLOAT x_after);
FLOAT energy(FLOAT *x, FLOAT *v, int m, int k);
int main(int argc, char **argv){
FLOAT delta_t = 5E-3;
int N = 64;
FLOAT T = 5.0 * pow(N,2.2);
int N_T = T/delta_t;
FLOAT E_k_1 = 0.0;
FLOAT E_k_2 = 0.0;
FLOAT E_k_3 = 0.0;
FLOAT* x = malloc(N * sizeof(FLOAT));
FLOAT* v = malloc(N * sizeof(FLOAT));
int t;
int n;
int n_proc;
FLOAT F_x;
n_proc = atoi(argv[1]);
initialize(x, v, N);
omp_set_num_threads(n_proc);
for(t = 0; t < N_T; t++){
#pragma omp parallel for shared(v,x) private(F_x)
for(n = 1; n < N-1; n++){
F_x = F(x[n], x[n-1], x[n+1]);
v[n] += F_x * delta_t * 0.5;
}
#pragma omp parallel for shared(v,x)
for(n = 1; n < N-1; n++){
x[n] += v[n] * delta_t;
}
#pragma omp parallel for shared(v,x) private(F_x)
for(n = 1; n < N-1; n++){
F_x = F(x[n], x[n-1], x[n+1]);
v[n] += F_x * delta_t * 0.5;
}
if(!(t%(N_T/1000))){
E_k_1 = energy(x, v, N, 1);
E_k_2 = energy(x, v, N, 2);
E_k_3 = energy(x, v, N, 3);
printf("%d %e %e %e\n", t, E_k_1, E_k_2, E_k_3);
}
}
return(0);
}
void initialize(FLOAT *x, FLOAT *v, int m){
int i;
for(i=0; i<m; i++){
x[i] = sin(1.0*PI*(i)/(FLOAT)(m-1));
v[i] = 0.0;
}
}
FLOAT F(FLOAT x, FLOAT x_before, FLOAT x_after){
FLOAT F_value;
F_value = (x_after - 2.0 * x + x_before);
F_value += BETA * (x_after- x) * (x_after - x);
F_value -= BETA * (x-x_before) * (x-x_before);
return F_value;
}
FLOAT energy(FLOAT *x, FLOAT *v, int m, int k){
int i;
FLOAT A_k, A_k_dot, omega_k_2, E_k;
A_k = 0.0;
for(i=0;i<m ;i++){
A_k += sqrt(2.0/(m+1)) * x[i] * sin(1.0 * (i) * k * PI /(m));
}
A_k_dot = 0.0;
for(i=0;i<m ;i++){
A_k_dot += sqrt(2.0/(m+1)) * v[i] * sin(1.0 * (i) * k * PI /(m));
}
omega_k_2 = 4.0* pow(sin(1.0 * k * PI / (2.0*(m))), 2.0);
E_k = 0.5 * (A_k_dot * A_k_dot + omega_k_2 * A_k * A_k);
return E_k;
}
|
relic_core.c | /*
* RELIC is an Efficient LIbrary for Cryptography
* Copyright (C) 2007-2017 RELIC Authors
*
* This file is part of RELIC. RELIC is legal property of its developers,
* whose names are not listed here. Please refer to the COPYRIGHT file
* for contact information.
*
* RELIC is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* RELIC is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with RELIC. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* @file
*
* Implementation of the library basic functions.
*
* @ingroup relic
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "relic_core.h"
#include "relic_types.h"
#include "relic_err.h"
#include "relic_arch.h"
/*============================================================================*/
/* Public definitions */
/*============================================================================*/
/**
* If multi-threading is enabled, assigns each thread a local copy of the data.
*/
#if MULTI == PTHREAD
#define thread __thread
#else
#define thread /* */
#endif
/**
* Default library context.
*/
thread ctx_t first_ctx;
/**
* Active library context.
*/
thread ctx_t *core_ctx = NULL;
#if MULTI == OPENMP
#pragma omp threadprivate(first_ctx, core_ctx)
#endif
int core_init(void) {
if (core_ctx == NULL) {
core_ctx = &(first_ctx);
}
#if defined(CHECK) && defined(TRACE)
core_ctx->trace = 0;
#endif
#ifdef CHECK
core_ctx->reason[ERR_NO_MEMORY] = MSG_NO_MEMORY;
core_ctx->reason[ERR_NO_PRECI] = MSG_NO_PRECI;
core_ctx->reason[ERR_NO_FILE] = MSG_NO_FILE;
core_ctx->reason[ERR_NO_READ] = MSG_NO_READ;
core_ctx->reason[ERR_NO_VALID] = MSG_NO_VALID;
core_ctx->reason[ERR_NO_BUFFER] = MSG_NO_BUFFER;
core_ctx->reason[ERR_NO_FIELD] = MSG_NO_FIELD;
core_ctx->reason[ERR_NO_CURVE] = MSG_NO_CURVE;
core_ctx->reason[ERR_NO_CONFIG] = MSG_NO_CONFIG;
core_ctx->last = NULL;
#endif /* CHECK */
#ifdef OVERH
core_ctx->over = 0;
#endif
core_ctx->code = STS_OK;
return STS_OK;
}
int core_clean(void) {
core_ctx = NULL;
return STS_OK;
}
ctx_t *core_get(void) {
return core_ctx;
}
void core_set(ctx_t *ctx) {
core_ctx = ctx;
}
|
cg.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 2.3 OpenMP C versions - CG
This benchmark is an OpenMP C version of the NPB CG code.
The OpenMP C versions are developed by RWCP and derived from the serial
Fortran versions in "NPB 2.3-serial" developed by NAS.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Authors: M. Yarrow
C. Kuszmaul
OpenMP C version: S. Satoh
--------------------------------------------------------------------*/
/*
c---------------------------------------------------------------------
c Note: please observe that in the routine conj_grad three
c implementations of the sparse matrix-vector multiply have
c been supplied. The default matrix-vector multiply is not
c loop unrolled. The alternate implementations are unrolled
c to a depth of 2 and unrolled to a depth of 8. Please
c experiment with these to find the fastest for your particular
c architecture. If reporting timing results, any of these three may
c be used without penalty.
c---------------------------------------------------------------------
*/
#include "npb-C.h"
#include "npbparams.h"
#define NZ NA*(NONZER+1)*(NONZER+1)+NA*(NONZER+2)
#ifdef _OPENARC_
#pragma openarc #define NZ \NA*(\NONZER+1)*(\NONZER+1)+\NA*(\NONZER+2)
#endif
/* global variables */
/* common /partit_size/ */
static int naa;
static int nzz;
static int firstrow;
static int lastrow;
static int firstcol;
static int lastcol;
/* common /main_int_mem/ */
static int colidx[NZ+1]; /* colidx[1:NZ] */
static int rowstr[NA+1+1]; /* rowstr[1:NA+1] */
static int iv[2*NA+1+1]; /* iv[1:2*NA+1] */
static int arow[NZ+1]; /* arow[1:NZ] */
static int acol[NZ+1]; /* acol[1:NZ] */
/* common /main_flt_mem/ */
static float v[NA+1+1]; /* v[1:NA+1] */
static float aelt[NZ+1]; /* aelt[1:NZ] */
static float a[NZ+1]; /* a[1:NZ] */
static float x[NA+2+1]; /* x[1:NA+2] */
static float z[NA+2+1]; /* z[1:NA+2] */
static float p[NA+2+1]; /* p[1:NA+2] */
static float q[NA+2+1]; /* q[1:NA+2] */
static float r[NA+2+1]; /* r[1:NA+2] */
static float w[NA+2+1]; /* w[1:NA+2] */
/* common /urando/ */
static float amult;
static float tran;
// Static variables used in conj_grad().
static float d, sum, rho, rho0, alpha, beta;
/* function declarations */
static void conj_grad (int colidx[NZ+1], int rowstr[NA+1+1], float x[NA+2+1], float z[NA+2+1],
float a[NZ+1], float p[NA+2+1], float q[NA+2+1], float r[NA+2+1],
float w[NA+2+1], float *rnorm);
static void makea(int n, int nz, float a[NZ+1], int colidx[NZ+1], int rowstr[NA+1+1],
int nonzer, int firstrow, int lastrow, int firstcol,
int lastcol, float rcond, int arow[NZ+1], int acol[NZ+1],
float aelt[NZ+1], float v[NA+1+1], int iv[2*NA+1+1], float shift );
static void sparse(float a[NZ+1], int colidx[NZ+1], int rowstr[NA+1+1], int n,
int arow[NZ+1], int acol[NZ+1], float aelt[NZ+1],
int firstrow, int lastrow,
float x[NA+1+1], boolean mark[NA+1], int nzloc[NA+1], int nnza);
static void sprnvc(int n, int nz, float v[], int iv[], int nzloc[],
int mark[]);
static int icnvrt(float x, int ipwr2);
static void vecset(int n, float v[], int iv[], int *nzv, int i, float val);
/*--------------------------------------------------------------------
program cg
--------------------------------------------------------------------*/
int main(int argc, char **argv) {
int i_main, j_main, k_main, it;
int nthreads = 1;
float zeta;
float rnorm;
float norm_temp11;
float norm_temp12;
float t, mflops;
char classT = 'U';
boolean verified;
float zeta_verify_value, epsilon;
////////////////////////////////////
// Used for inlining conj_grad(). //
////////////////////////////////////
int i, j, k;
int cgit, cgitmax = 25;
firstrow = 1;
lastrow = NA;
firstcol = 1;
lastcol = NA;
if (NA == 1400 && NONZER == 7 && NITER == 15 && SHIFT == 10.0) {
classT = 'S';
// zeta_verify_value = 8.5971775078648;
zeta_verify_value = 8.379274368286; //serial version value with Single Precision
} else if (NA == 7000 && NONZER == 8 && NITER == 15 && SHIFT == 12.0) {
classT = 'W';
// zeta_verify_value = 10.362595087124;
zeta_verify_value = 10.11725139618; //serial version value with Single Precision
} else if (NA == 14000 && NONZER == 11 && NITER == 15 && SHIFT == 20.0) {
classT = 'A';
// zeta_verify_value = 17.130235054029;
zeta_verify_value = 18.62915039062; //serial version value with Single Precision
} else if (NA == 75000 && NONZER == 13 && NITER == 75 && SHIFT == 60.0) {
classT = 'B';
// zeta_verify_value = 22.712745482631;
zeta_verify_value = 62.42129135132; //serial version value with Single Precision
} else if (NA == 150000 && NONZER == 15 && NITER == 75 && SHIFT == 110.0) {
classT = 'C';
// zeta_verify_value = 28.973605592845;
zeta_verify_value = 115.1209869385; //serial version value with Single Precision
} else {
classT = 'U';
}
printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version"
" - CG Benchmark\n");
printf(" Size: %10d\n", NA);
printf(" Iterations: %5d\n", NITER);
naa = NA;
nzz = NZ;
timer_clear(2);
timer_clear(3);
timer_clear(4);
timer_start(2);
/*--------------------------------------------------------------------
c Initialize random number generator
c-------------------------------------------------------------------*/
// Initial numbers are changed for single precision
// tran = 314159265.0;
// amult = 1220703125.0;
tran = 28183.0f;
amult = 390625.0f;
zeta = randlc( &tran, amult );
/*--------------------------------------------------------------------
c
c-------------------------------------------------------------------*/
timer_start(4);
makea(naa, nzz, a, colidx, rowstr, NONZER,
firstrow, lastrow, firstcol, lastcol,
RCOND, arow, acol, aelt, v, iv, SHIFT);
timer_stop(4);
timer_start(3);
/*---------------------------------------------------------------------
c Note: as a result of the above call to makea:
c values of j used in indexing rowstr go from 1 --> lastrow-firstrow+1
c values of colidx which are col indexes go from firstcol --> lastcol
c So:
c Shift the col index vals from actual (firstcol --> lastcol )
c to local, i.e., (1 --> lastcol-firstcol+1)
c---------------------------------------------------------------------*/
#pragma acc data \
create(x[0:NA+3]) \
create(z[0:NA+3]) \
create(p[0:NA+3]) \
create(q[0:NA+3]) \
create(r[0:NA+3]) \
create(w[0:NA+3]) \
copyin(a[0:NZ+1]) \
copyin(colidx[0:NZ+1]) \
copyin(rowstr[0:NA+2])
{
timer_stop(3);
// R/O Shared scalar: lastrow, firstrow, firstcol
// R/O Shared arrays: rowstr[NA+1+1]
// R/W Shared arrays: colidx[NZ+1]
// R/W Private scalar: j_main, k_main
#pragma acc kernels loop gang worker
for (j_main = 1; j_main <= lastrow - firstrow + 1; j_main++) {
for (k_main = rowstr[j_main]; k_main < rowstr[j_main+1]; k_main++) {
colidx[k_main] = colidx[k_main] - firstcol + 1;
}
}
/*--------------------------------------------------------------------
c set starting vector to (1, 1, .... 1)
c-------------------------------------------------------------------*/
// R/W Shared arrays: x[NA+2+1]
// R/W Private scalar: i_main
#pragma acc kernels loop gang worker
for (i_main = 1; i_main <= NA+1; i_main++) {
x[i_main] = 1.0f;
}
// R/W Shared scalar: zeta
zeta = 0.0f;
/*-------------------------------------------------------------------
c---->
c Do one iteration untimed to init all code and data page tables
c----> (then reinit, start timing, to niter its)
c-------------------------------------------------------------------*/
for (it = 1; it <= 1; it++) {
/*--------------------------------------------------------------------
c The call to the conjugate gradient routine:
c-------------------------------------------------------------------*/
//conj_grad (colidx, rowstr, x, z, a, p, q, r, w, &rnorm);
cgitmax = 25;
// R/W Shared scalars: rho (function-static)
rho = 0.0f;
/*--------------------------------------------------------------------
c Initialize the CG algorithm:
c-------------------------------------------------------------------*/
// R/W Shared arrays: x[NA+2+1], r[NA+2+1]
// R/W Shared arrays: q[NA+2+1], z[NA+2+1], r[NA+2+1], p[NA+2+1], w[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= NA+1; j++) {
q[j] = 0.0f;
z[j] = 0.0f;
r[j] = x[j];
p[j] = r[j];
w[j] = 0.0f;
}
/*--------------------------------------------------------------------
c rho = r.r
c Now, obtain the norm of r: First, sum squares of r elements locally...
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: x[NA+2+1]
// R/W Shared scalars: rho (function-static)
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
rho = rho + x[j]*x[j];
}
/*--------------------------------------------------------------------
c---->
c The conj grad iteration loop
c---->
c-------------------------------------------------------------------*/
for (cgit = 1; cgit <= cgitmax; cgit++) {
// R/W Shared scalars: d, rho, rho0 (function-static)
{
rho0 = rho;
d = 0.0f;
rho = 0.0f;
} /* end single */
/*--------------------------------------------------------------------
c q = A.p
c The partition submatrix-vector multiply: use workspace w
c---------------------------------------------------------------------
C
C NOTE: this version of the multiply is actually (slightly: maybe %5)
C faster on the sp2 on 16 nodes than is the unrolled-by-2 version
C below. On the Cray t3d, the reverse is true, i.e., the
C unrolled-by-two version is some 10% faster.
C The unrolled-by-8 version below is significantly faster
C on the Cray t3d - overall speed of code is 1.5 times faster.
*/
/* rolled version */
// R/O Shared scalars: lastrow, firstrow
// R/O Shared arrays: rowstr[NA+1+1], a[NZ+1], p[NA+2+1], colidx[NZ+1],
// R/W Shared arrays: w[NA+2+1]
// R/W Private scalars: j, k, sum
#pragma acc kernels loop gang worker independent private(sum)
for (j = 1; j <= lastrow-firstrow+1; j++) {
sum = 0.0f;
for (k = rowstr[j]; k < rowstr[j+1]; k++) {
sum = sum + a[k]*p[colidx[k]];
}
w[j] = sum;
}
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: w[NA+2+1]
// R/W Shared arrays: q[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
q[j] = w[j];
}
/*--------------------------------------------------------------------
c Clear w for reuse...
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/W Shared arrays: w[NA+2+1]
// R/W Private scalars: j
/*--------------------------------------------------------------------
c Obtain p.q
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: p[NA+2+1], q[NA+2+1]
// R/W Shared scalars: d (function-static)
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
w[j] = 0.0f;
d = d + p[j]*q[j];
}
/*--------------------------------------------------------------------
c Obtain alpha = rho / (p.q)
c-------------------------------------------------------------------*/
// R/O Shared scalars: rho0, d (function-static)
// R/W Shared scalars: alpha (function-static)
alpha = rho0 / d;
/*--------------------------------------------------------------------
c Save a temporary of rho
c-------------------------------------------------------------------*/
/* rho0 = rho;*/
/*---------------------------------------------------------------------
c Obtain z = z + alpha*p
c and r = r - alpha*q
c---------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared scalars: alpha (function-static)
// R/O Shared arrays: p[NA+2+1], q[NA+2+1]
// R/W Shared arrays: z[NA+2+1], r[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
z[j] = z[j] + alpha*p[j];
r[j] = r[j] - alpha*q[j];
}
/*---------------------------------------------------------------------
c rho = r.r
c Now, obtain the norm of r: First, sum squares of r elements locally...
c---------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: r[NA+2+1]
// R/W Shared scalars: rho (function-static)
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
rho = rho + r[j]*r[j];
}
/*--------------------------------------------------------------------
c Obtain beta:
c-------------------------------------------------------------------*/
// R/O Shared scalars: rho0, rho (function-static)
// R/W Shared scalars: beta (function-static)
beta = rho / rho0;
/*--------------------------------------------------------------------
c p = r + beta*p
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared scalars: beta (function-static)
// R/O Shared arrays: r[NA+2+1]
// R/W Shared arrays: p[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
p[j] = r[j] + beta*p[j];
}
} /* end of do cgit=1,cgitmax */
/*---------------------------------------------------------------------
c Compute residual norm explicitly: ||r|| = ||x - A.z||
c First, form A.z
c The partition submatrix-vector multiply
c---------------------------------------------------------------------*/
// R/W Shared scalars: sum (function-static)
sum = 0.0f;
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: rowstr[NA+1+1], a[NZ+1], colidx[NZ+1], z[NA+2+1]
// R/W Shared arrays: w[NA+2+1]
// R/W Private scalars: j,d,k
#pragma acc kernels loop gang worker independent private(d)
for (j = 1; j <= lastrow-firstrow+1; j++) {
d = 0.0f;
for (k = rowstr[j]; k <= rowstr[j+1]-1; k++) {
d = d + a[k]*z[colidx[k]];
}
w[j] = d;
}
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: w[NA+2+1]
// R/W Shared arrays: r[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
r[j] = w[j];
}
/*--------------------------------------------------------------------
c At this point, r contains A.z
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: r[NA+2+1], x[NA+2+1]
// R/W Shared scalars: d, sum (function-static)
// R/W Private scalars: j
#pragma acc kernels loop gang worker independent private(d)
for (j = 1; j <= lastcol-firstcol+1; j++) {
d = x[j] - r[j];
sum = sum + d*d;
}
// R/O Shared scalars: sum (function-static)
// R/W Shared scalars: rnorm
{
//(*rnorm) = sqrtf(sum);
rnorm = sqrtf(sum);
} /* end single */
/*--------------------------------------------------------------------
c zeta = shift + 1/(x.z)
c So, first: (x.z)
c Also, find norm of z
c So, first: (z.z)
c-------------------------------------------------------------------*/
// R/W Shared scalars: norm_temp11, norm_temp12
{
norm_temp11 = 0.0f;
norm_temp12 = 0.0f;
} /* end single */
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: x[NA+2+1], z[NA+2+1]
// R/W Shared scalars: norm_temp11, norm_temp12
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j_main = 1; j_main <= lastcol-firstcol+1; j_main++) {
norm_temp11 = norm_temp11 + x[j_main]*z[j_main];
norm_temp12 = norm_temp12 + z[j_main]*z[j_main];
}
// R/w Shared scalars: norm_temp12
norm_temp12 = 1.0f / sqrtf( norm_temp12 );
/*--------------------------------------------------------------------
c Normalize z to obtain x
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol, norm_temp12
// R/O Shared arrays: z[NA+2+1]
// R/W Shared arrays: x[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j_main = 1; j_main <= lastcol-firstcol+1; j_main++) {
x[j_main] = norm_temp12*z[j_main];
}
} /* end of do one iteration untimed */
/*--------------------------------------------------------------------
c set starting vector to (1, 1, .... 1)
c-------------------------------------------------------------------*/
// R/W Shared arrays: x[NA+2+1]
// R/W Private scalars: i_main
#pragma acc kernels loop gang worker
for (i_main = 1; i_main <= NA+1; i_main++) {
x[i_main] = 1.0f;
}
// R/W Shared scalars: zeta
zeta = 0.0f;
// } /* end parallel */
timer_clear( 1 );
timer_start( 1 );
/*--------------------------------------------------------------------
c---->
c Main Iteration for inverse power method
c---->
c-------------------------------------------------------------------*/
//#pragma omp parallel private(it,i_main,j_main,k_main)
// {
for (it = 1; it <= NITER; it++) {
/*--------------------------------------------------------------------
c The call to the conjugate gradient routine:
c-------------------------------------------------------------------*/
//conj_grad(colidx, rowstr, x, z, a, p, q, r, w, &rnorm);
cgitmax = 25;
// R/W Shared scalars: rho (function-static)
rho = 0.0f;
/*--------------------------------------------------------------------
c Initialize the CG algorithm:
c-------------------------------------------------------------------*/
// R/W Shared arrays: x[NA+2+1], r[NA+2+1]
// R/W Shared arrays: q[NA+2+1], z[NA+2+1], r[NA+2+1], p[NA+2+1], w[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= NA+1; j++) {
q[j] = 0.0f;
z[j] = 0.0f;
r[j] = x[j];
p[j] = r[j];
w[j] = 0.0f;
}
/*--------------------------------------------------------------------
c rho = r.r
c Now, obtain the norm of r: First, sum squares of r elements locally...
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: x[NA+2+1]
// R/W Shared scalars: rho (function-static)
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
rho = rho + x[j]*x[j];
}
/*--------------------------------------------------------------------
c---->
c The conj grad iteration loop
c---->
c-------------------------------------------------------------------*/
for (cgit = 1; cgit <= cgitmax; cgit++) {
// R/W Shared scalars: d, rho, rho0 (function-static)
{
rho0 = rho;
d = 0.0f;
rho = 0.0f;
} /* end single */
/*--------------------------------------------------------------------
c q = A.p
c The partition submatrix-vector multiply: use workspace w
c---------------------------------------------------------------------
C
C NOTE: this version of the multiply is actually (slightly: maybe %5)
C faster on the sp2 on 16 nodes than is the unrolled-by-2 version
C below. On the Cray t3d, the reverse is true, i.e., the
C unrolled-by-two version is some 10% faster.
C The unrolled-by-8 version below is significantly faster
C on the Cray t3d - overall speed of code is 1.5 times faster.
*/
/* rolled version */
// R/O Shared scalars: lastrow, firstrow
// R/O Shared arrays: rowstr[NA+1+1], a[NZ+1], p[NA+2+1], colidx[NZ+1],
// R/W Shared arrays: w[NA+2+1]
// R/W Private scalars: j, k, sum
#pragma acc kernels loop gang worker independent private(sum)
for (j = 1; j <= lastrow-firstrow+1; j++) {
sum = 0.0f;
for (k = rowstr[j]; k < rowstr[j+1]; k++) {
sum = sum + a[k]*p[colidx[k]];
}
w[j] = sum;
}
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: w[NA+2+1]
// R/W Shared arrays: q[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
q[j] = w[j];
}
/*--------------------------------------------------------------------
c Clear w for reuse...
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/W Shared arrays: w[NA+2+1]
// R/W Private scalars: j
/*--------------------------------------------------------------------
c Obtain p.q
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: p[NA+2+1], q[NA+2+1]
// R/W Shared scalars: d (function-static)
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
w[j] = 0.0f;
d = d + p[j]*q[j];
}
/*--------------------------------------------------------------------
c Obtain alpha = rho / (p.q)
c-------------------------------------------------------------------*/
// R/O Shared scalars: rho0, d (function-static)
// R/W Shared scalars: alpha (function-static)
alpha = rho0 / d;
/*--------------------------------------------------------------------
c Save a temporary of rho
c-------------------------------------------------------------------*/
/* rho0 = rho;*/
/*---------------------------------------------------------------------
c Obtain z = z + alpha*p
c and r = r - alpha*q
c---------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared scalars: alpha (function-static)
// R/O Shared arrays: p[NA+2+1], q[NA+2+1]
// R/W Shared arrays: z[NA+2+1], r[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
z[j] = z[j] + alpha*p[j];
r[j] = r[j] - alpha*q[j];
}
/*---------------------------------------------------------------------
c rho = r.r
c Now, obtain the norm of r: First, sum squares of r elements locally...
c---------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: r[NA+2+1]
// R/W Shared scalars: rho (function-static)
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
rho = rho + r[j]*r[j];
}
/*--------------------------------------------------------------------
c Obtain beta:
c-------------------------------------------------------------------*/
// R/O Shared scalars: rho0, rho (function-static)
// R/W Shared scalars: beta (function-static)
beta = rho / rho0;
/*--------------------------------------------------------------------
c p = r + beta*p
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared scalars: beta (function-static)
// R/O Shared arrays: r[NA+2+1]
// R/W Shared arrays: p[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
p[j] = r[j] + beta*p[j];
}
} /* end of do cgit=1,cgitmax */
/*---------------------------------------------------------------------
c Compute residual norm explicitly: ||r|| = ||x - A.z||
c First, form A.z
c The partition submatrix-vector multiply
c---------------------------------------------------------------------*/
// R/W Shared scalars: sum (function-static)
sum = 0.0f;
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: rowstr[NA+1+1], a[NZ+1], colidx[NZ+1], z[NA+2+1]
// R/W Shared arrays: w[NA+2+1]
// R/W Private scalars: j,d,k
#pragma acc kernels loop gang worker independent private(d)
for (j = 1; j <= lastrow-firstrow+1; j++) {
d = 0.0f;
for (k = rowstr[j]; k <= rowstr[j+1]-1; k++) {
d = d + a[k]*z[colidx[k]];
}
w[j] = d;
}
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: w[NA+2+1]
// R/W Shared arrays: r[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
r[j] = w[j];
}
/*--------------------------------------------------------------------
c At this point, r contains A.z
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: r[NA+2+1], x[NA+2+1]
// R/W Shared scalars: d, sum (function-static)
// R/W Private scalars: j
#pragma acc kernels loop gang worker independent private(d)
for (j = 1; j <= lastcol-firstcol+1; j++) {
d = x[j] - r[j];
sum = sum + d*d;
}
// R/O Shared scalars: sum (function-static)
// R/W Shared scalars: rnorm
{
//(*rnorm) = sqrtf(sum);
rnorm = sqrtf(sum);
} /* end single */
/*--------------------------------------------------------------------
c zeta = shift + 1/(x.z)
c So, first: (x.z)
c Also, find norm of z
c So, first: (z.z)
c-------------------------------------------------------------------*/
// R/W Shared scalars: norm_temp11, norm_temp12
{
norm_temp11 = 0.0f;
norm_temp12 = 0.0f;
} /* end single */
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: x[NA+2+1], z[NA+2+1]
// R/W Shared scalars: norm_temp11, norm_temp12
// R/W Private scalars: j_main
#pragma acc kernels loop gang worker
for (j_main = 1; j_main <= lastcol-firstcol+1; j_main++) {
norm_temp11 = norm_temp11 + x[j_main]*z[j_main];
norm_temp12 = norm_temp12 + z[j_main]*z[j_main];
}
// R/O Shared scalars: norm_temp11
// R/W Shared scalars: norm_temp12, zeta
{
norm_temp12 = 1.0f / sqrtf( norm_temp12 );
zeta = SHIFT + 1.0f / norm_temp11;
} /* end single */
{
if( it == 1 ) {
printf(" iteration ||r|| zeta\n");
}
printf(" %5d %20.14e%20.13e\n", it, rnorm, zeta);
} /* end master */
/*--------------------------------------------------------------------
c Normalize z to obtain x
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol, norm_temp12
// R/O Shared arrays: z[NA+2+1]
// R/W Shared arrays: x[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j_main = 1; j_main <= lastcol-firstcol+1; j_main++) {
x[j_main] = norm_temp12*z[j_main];
}
} /* end of main iter inv pow meth */
#if defined(_OPENMP)
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
} /* end parallel */
timer_stop( 1 );
timer_stop( 2 );
/*--------------------------------------------------------------------
c End of timed section
c-------------------------------------------------------------------*/
t = timer_read( 1 );
printf(" Benchmark completed\n");
//epsilon = 1.0e-10;
//New value for single precision
epsilon = 1.0e-6;
if (classT != 'U') {
if (fabs(zeta - zeta_verify_value) <= epsilon) {
verified = TRUE;
printf(" VERIFICATION SUCCESSFUL\n");
printf(" Zeta is %20.12e\n", zeta);
printf(" Error is %20.12e\n", zeta - zeta_verify_value);
} else {
verified = FALSE;
printf(" VERIFICATION FAILED\n");
printf(" Zeta %20.12e\n", zeta);
printf(" The correct zeta is %20.12e\n", zeta_verify_value);
}
} else {
verified = FALSE;
printf(" Problem size unknown\n");
printf(" NO VERIFICATION PERFORMED\n");
}
if ( t != 0.0 ) {
mflops = (2.0*NITER*NA)
* (3.0+(NONZER*(NONZER+1)) + 25.0*(5.0+(NONZER*(NONZER+1))) + 3.0 )
/ t / 1000000.0;
} else {
mflops = 0.0;
}
c_print_results("CG", classT, NA, 0, 0, NITER, nthreads, t,
mflops, " floating point",
verified, NPBVERSION, COMPILETIME,
CS1, CS2, CS3, CS4, CS5, CS6, CS7);
printf("makea() execution time = %12.4f\n", timer_read(4));
printf("CUDA Initialization time = %12.4f\n", timer_read(3));
printf("Total execution time = %12.4f\n", timer_read(2));
return 0;
}
/*---------------------------------------------------------------------
c generate the test problem for benchmark 6
c makea generates a sparse matrix with a
c prescribed sparsity distribution
c
c parameter type usage
c
c input
c
c n i number of cols/rows of matrix
c nz i nonzeros as declared array size
c rcond r*8 condition number
c shift r*8 main diagonal shift
c
c output
c
c a r*8 array for nonzeros
c colidx i col indices
c rowstr i row pointers
c
c workspace
c
c iv, arow, acol i
c v, aelt r*8
c---------------------------------------------------------------------*/
static void makea(
int n,
int nz,
float a[NZ+1], /* a[1:nz] */
int colidx[NZ+1], /* colidx[1:nz] */
int rowstr[NA+1+1], /* rowstr[1:n+1] */
int nonzer,
int firstrow,
int lastrow,
int firstcol,
int lastcol,
float rcond,
int arow[NZ+1], /* arow[1:nz] */
int acol[NZ+1], /* acol[1:nz] */
float aelt[NZ+1], /* aelt[1:nz] */
float v[NA+1+1], /* v[1:n+1] */
int iv[2*NA+1+1], /* iv[1:2*n+1] */
float shift )
{
int i, nnza, iouter, ivelt, ivelt1, irow, nzv;
/*--------------------------------------------------------------------
c nonzer is approximately (int(sqrt(nnza /n)));
c-------------------------------------------------------------------*/
float size, ratio, scale;
int jcol;
size = 1.0f;
ratio = pow(rcond, (1.0f / (float)n));
nnza = 0;
/*---------------------------------------------------------------------
c Initialize colidx(n+1 .. 2n) to zero.
c Used by sprnvc to mark nonzero positions
c---------------------------------------------------------------------*/
// R/O Shared scalars: n
// R/W Shared arrays: colidx[NZ+1]
// R/W Private scalars: i
#pragma acc kernels loop gang worker pcopyout(colidx)
for (i = 1; i <= n; i++) {
colidx[n+i] = 0;
}
for (iouter = 1; iouter <= n; iouter++) {
nzv = nonzer;
sprnvc(n, nzv, v, iv, &(colidx[0]), &(colidx[n]));
vecset(n, v, iv, &nzv, iouter, 0.5);
for (ivelt = 1; ivelt <= nzv; ivelt++) {
jcol = iv[ivelt];
if (jcol >= firstcol && jcol <= lastcol) {
scale = size * v[ivelt];
for (ivelt1 = 1; ivelt1 <= nzv; ivelt1++) {
irow = iv[ivelt1];
if (irow >= firstrow && irow <= lastrow) {
nnza = nnza + 1;
if (nnza > nz) {
printf("Space for matrix elements exceeded in"
" makea\n");
printf("nnza, nzmax = %d, %d\n", nnza, nz);
printf("iouter = %d\n", iouter);
exit(1);
}
acol[nnza] = jcol;
arow[nnza] = irow;
aelt[nnza] = v[ivelt1] * scale;
}
}
}
}
size = size * ratio;
}
/*---------------------------------------------------------------------
c ... add the identity * rcond to the generated matrix to bound
c the smallest eigenvalue from below by rcond
c---------------------------------------------------------------------*/
for (i = firstrow; i <= lastrow; i++) {
if (i >= firstcol && i <= lastcol) {
iouter = n + i;
nnza = nnza + 1;
if (nnza > nz) {
printf("Space for matrix elements exceeded in makea\n");
printf("nnza, nzmax = %d, %d\n", nnza, nz);
printf("iouter = %d\n", iouter);
exit(1);
}
acol[nnza] = i;
arow[nnza] = i;
aelt[nnza] = rcond - shift;
}
}
/*---------------------------------------------------------------------
c ... make the sparse matrix from list of elements with duplicates
c (v and iv are used as workspace)
c---------------------------------------------------------------------*/
sparse(a, colidx, rowstr, n, arow, acol, aelt,
firstrow, lastrow, v, &(iv[0]), &(iv[n]), nnza);
}
/*---------------------------------------------------
c generate a sparse matrix from a list of
c [col, row, element] tri
c---------------------------------------------------*/
static void sparse(
float a[NZ+1], /* a[1:*] */
int colidx[NZ+1], /* colidx[1:*] */
int rowstr[NA+1+1], /* rowstr[1:*] */
int n,
int arow[NZ+1], /* arow[1:*] */
int acol[NZ+1], /* acol[1:*] */
float aelt[NZ+1], /* aelt[1:*] */
int firstrow,
int lastrow,
float x[NA+1+1], /* x[1:n] */
boolean mark[NA+1], /* mark[1:n] */
int nzloc[NA+1], /* nzloc[1:n] */
int nnza)
/*---------------------------------------------------------------------
c rows range from firstrow to lastrow
c the rowstr pointers are defined for nrows = lastrow-firstrow+1 values
c---------------------------------------------------------------------*/
{
int nrows;
int i, j, jajp1, nza, k, nzrow;
float xi;
/*--------------------------------------------------------------------
c how many rows of result
c-------------------------------------------------------------------*/
nrows = lastrow - firstrow + 1;
/*--------------------------------------------------------------------
c ...count the number of triples in each row
c-------------------------------------------------------------------*/
// R/O Shared scalars: n
// R/W Shared arrays: rowstr[NA+1+1], mark[n]
// R/W Private scalars: j
#pragma acc kernels loop gang worker independent \
pcopyout(rowstr[0:NA+1+1]) create(mark[0:NA+1])
for (j = 1; j <= n; j++) {
rowstr[j] = 0;
mark[j] = FALSE;
}
rowstr[n+1] = 0;
for (nza = 1; nza <= nnza; nza++) {
j = (arow[nza] - firstrow + 1) + 1;
rowstr[j] = rowstr[j] + 1;
}
rowstr[1] = 1;
for (j = 2; j <= nrows+1; j++) {
rowstr[j] = rowstr[j] + rowstr[j-1];
}
/*---------------------------------------------------------------------
c ... rowstr(j) now is the location of the first nonzero
c of row j of a
c---------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c ... do a bucket sort of the triples on the row index
c-------------------------------------------------------------------*/
for (nza = 1; nza <= nnza; nza++) {
j = arow[nza] - firstrow + 1;
k = rowstr[j];
a[k] = aelt[nza];
colidx[k] = acol[nza];
rowstr[j] = rowstr[j] + 1;
}
/*--------------------------------------------------------------------
c ... rowstr(j) now points to the first element of row j+1
c-------------------------------------------------------------------*/
for (j = nrows; j >= 1; j--) {
rowstr[j+1] = rowstr[j];
}
rowstr[1] = 1;
/*--------------------------------------------------------------------
c ... generate the actual output rows by adding elements
c-------------------------------------------------------------------*/
nza = 0;
// R/O Shared scalars: n
// R/W Shared arrays: x[NA+2+1], mark[n]
// R/W Private scalars: i
#pragma acc kernels loop gang worker pcopyout(x, mark)
for (i = 1; i <= n; i++) {
x[i] = 0.0f;
mark[i] = FALSE;
}
jajp1 = rowstr[1];
for (j = 1; j <= nrows; j++) {
nzrow = 0;
/*--------------------------------------------------------------------
c ...loop over the jth row of a
c-------------------------------------------------------------------*/
for (k = jajp1; k < rowstr[j+1]; k++) {
i = colidx[k];
x[i] = x[i] + a[k];
if ( mark[i] == FALSE && x[i] != 0.0f) {
mark[i] = TRUE;
nzrow = nzrow + 1;
nzloc[nzrow] = i;
}
}
/*--------------------------------------------------------------------
c ... extract the nonzeros of this row
c-------------------------------------------------------------------*/
for (k = 1; k <= nzrow; k++) {
i = nzloc[k];
mark[i] = FALSE;
xi = x[i];
x[i] = 0.0f;
if (xi != 0.0f) {
nza = nza + 1;
a[nza] = xi;
colidx[nza] = i;
}
}
jajp1 = rowstr[j+1];
rowstr[j+1] = nza + rowstr[1];
}
}
/*---------------------------------------------------------------------
c generate a sparse n-vector (v, iv)
c having nzv nonzeros
c
c mark(i) is set to 1 if position i is nonzero.
c mark is all zero on entry and is reset to all zero before exit
c this corrects a performance bug found by John G. Lewis, caused by
c reinitialization of mark on every one of the n calls to sprnvc
---------------------------------------------------------------------*/
static void sprnvc(
int n,
int nz,
float v[], /* v[1:*] */
int iv[], /* iv[1:*] */
int nzloc[], /* nzloc[1:n] */
int mark[] ) /* mark[1:n] */
{
int nn1;
int nzrow, nzv, ii, i;
float vecelt, vecloc;
nzv = 0;
nzrow = 0;
nn1 = 1;
do {
nn1 = 2 * nn1;
} while (nn1 < n);
/*--------------------------------------------------------------------
c nn1 is the smallest power of two not less than n
c-------------------------------------------------------------------*/
while (nzv < nz) {
vecelt = randlc(&tran, amult);
/*--------------------------------------------------------------------
c generate an integer between 1 and n in a portable manner
c-------------------------------------------------------------------*/
vecloc = randlc(&tran, amult);
i = icnvrt(vecloc, nn1) + 1;
if (i > n) continue;
/*--------------------------------------------------------------------
c was this integer generated already?
c-------------------------------------------------------------------*/
if (mark[i] == 0) {
mark[i] = 1;
nzrow = nzrow + 1;
nzloc[nzrow] = i;
nzv = nzv + 1;
v[nzv] = vecelt;
iv[nzv] = i;
}
}
for (ii = 1; ii <= nzrow; ii++) {
i = nzloc[ii];
mark[i] = 0;
}
}
/*---------------------------------------------------------------------
* scale a float precision number x in (0,1) by a power of 2 and chop it
*---------------------------------------------------------------------*/
static int icnvrt(float x, int ipwr2) {
return ((int)(ipwr2 * x));
}
/*--------------------------------------------------------------------
c set ith element of sparse vector (v, iv) with
c nzv nonzeros to val
c-------------------------------------------------------------------*/
static void vecset(
int n,
float v[], /* v[1:*] */
int iv[], /* iv[1:*] */
int *nzv,
int i,
float val)
{
int k;
boolean set;
set = FALSE;
for (k = 1; k <= *nzv; k++) {
if (iv[k] == i) {
v[k] = val;
set = TRUE;
}
}
if (set == FALSE) {
*nzv = *nzv + 1;
v[*nzv] = val;
iv[*nzv] = i;
}
}
|
sapG_fmt_plug.c | /*
* this is a SAP PASSCODE (CODEVN G) plugin for john the ripper.
* tested on linux/x86 only, rest is up to you.. at least, someone did the reversing :-)
*
* please note: this code is in a "works for me"-state, feel free to modify/speed up/clean/whatever it...
*
* (c) x7d8 sap loverz, public domain, btw
* cheers: see test-cases.
*
* Heavily modified by magnum 2011-2012 for performance and for SIMD, OMP and
* encodings support. Copyright (c) 2011, 2012 magnum, and it is hereby released
* to the general public under the following terms: Redistribution and use in
* source and binary forms, with or without modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_sapG;
#elif FMT_REGISTERS_H
john_register_one(&fmt_sapG);
#else
#include <string.h>
#include <ctype.h>
#include "arch.h"
#ifdef SIMD_COEF_32
#define NBKEYS (SIMD_COEF_32 * SIMD_PARA_SHA1)
#endif
#include "simd-intrinsics.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "sha.h"
#include "options.h"
#include "unicode.h"
#include "johnswap.h"
#define FORMAT_LABEL "sapg"
#define FORMAT_NAME "SAP CODVN F/G (PASSCODE)"
#define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME
static unsigned int omp_t = 1;
#if defined(_OPENMP)
#include <omp.h>
#ifndef OMP_SCALE
#if defined (SIMD_COEF_32)
// some OMP scaling moved into max_keys, so that we can have more values in SIMD
// mode, to sort hashes by limb size. (TODO)
#define OMP_SCALE 128
#else
#define OMP_SCALE 2048
#endif
#endif
#endif
#include "memdbg.h"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define SALT_FIELD_LENGTH 40
#define USER_NAME_LENGTH 12 /* max. length of user name in characters */
#define SALT_LENGTH (USER_NAME_LENGTH * 4) /* bytes of UTF-8 */
#define PLAINTEXT_LENGTH 40 /* Characters */
#define UTF8_PLAINTEXT_LENGTH MIN(125, PLAINTEXT_LENGTH * 3) /* bytes */
#define BINARY_SIZE 20
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(struct saltstruct)
#define SALT_ALIGN 4
#define CIPHERTEXT_LENGTH (SALT_LENGTH + 1 + 2*BINARY_SIZE) /* SALT + $ + 2x20 bytes for SHA1-representation */
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT NBKEYS
// max keys increased to allow sorting based on limb counts
#define MAX_KEYS_PER_CRYPT NBKEYS*64
#define GETWORDPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&60)*SIMD_COEF_32 + (unsigned int)index/SIMD_COEF_32*64*SIMD_COEF_32 )
#define GETSTARTPOS(index) ( (index&(SIMD_COEF_32-1))*4 + (unsigned int)index/SIMD_COEF_32*64*SIMD_COEF_32 )
#define GETOUTSTARTPOS(index) ( (index&(SIMD_COEF_32-1))*4 + (unsigned int)index/SIMD_COEF_32*20*SIMD_COEF_32 )
#if ARCH_LITTLE_ENDIAN
#define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&60)*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)index/SIMD_COEF_32*64*SIMD_COEF_32 ) //for endianity conversion
#else
#define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&60)*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*64*SIMD_COEF_32 ) //for endianity conversion
#endif
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
//this array is from disp+work (sap's worker process)
#define MAGIC_ARRAY_SIZE 160
static const unsigned char theMagicArray[MAGIC_ARRAY_SIZE]=
{0x91, 0xAC, 0x51, 0x14, 0x9F, 0x67, 0x54, 0x43, 0x24, 0xE7, 0x3B, 0xE0, 0x28, 0x74, 0x7B, 0xC2,
0x86, 0x33, 0x13, 0xEB, 0x5A, 0x4F, 0xCB, 0x5C, 0x08, 0x0A, 0x73, 0x37, 0x0E, 0x5D, 0x1C, 0x2F,
0x33, 0x8F, 0xE6, 0xE5, 0xF8, 0x9B, 0xAE, 0xDD, 0x16, 0xF2, 0x4B, 0x8D, 0x2C, 0xE1, 0xD4, 0xDC,
0xB0, 0xCB, 0xDF, 0x9D, 0xD4, 0x70, 0x6D, 0x17, 0xF9, 0x4D, 0x42, 0x3F, 0x9B, 0x1B, 0x11, 0x94,
0x9F, 0x5B, 0xC1, 0x9B, 0x06, 0x05, 0x9D, 0x03, 0x9D, 0x5E, 0x13, 0x8A, 0x1E, 0x9A, 0x6A, 0xE8,
0xD9, 0x7C, 0x14, 0x17, 0x58, 0xC7, 0x2A, 0xF6, 0xA1, 0x99, 0x63, 0x0A, 0xD7, 0xFD, 0x70, 0xC3,
0xF6, 0x5E, 0x74, 0x13, 0x03, 0xC9, 0x0B, 0x04, 0x26, 0x98, 0xF7, 0x26, 0x8A, 0x92, 0x93, 0x25,
0xB0, 0xA2, 0x0D, 0x23, 0xED, 0x63, 0x79, 0x6D, 0x13, 0x32, 0xFA, 0x3C, 0x35, 0x02, 0x9A, 0xA3,
0xB3, 0xDD, 0x8E, 0x0A, 0x24, 0xBF, 0x51, 0xC3, 0x7C, 0xCD, 0x55, 0x9F, 0x37, 0xAF, 0x94, 0x4C,
0x29, 0x08, 0x52, 0x82, 0xB2, 0x3B, 0x4E, 0x37, 0x9F, 0x17, 0x07, 0x91, 0x11, 0x3B, 0xFD, 0xCD };
// For backwards compatibility, we must support salts padded with spaces to a field width of 40
static struct fmt_tests tests[] = {
{"DDIC$6066CD3147915331EC4C602847D27A75EB3E8F0A", "DDIC"},
/*
* invalid IRL because password is too short (would work during login,
* but not during password change). We use these tests anyway because
* they help verifying key buffer cleaning:
*/
{"F $646A0AD270DF651065669A45D171EDD62DFE39A1", "X"},
{"JOHNNY $7D79B478E70CAAE63C41E0824EAB644B9070D10A", "CYBERPUNK"},
{"VAN$D15597367F24090F0A501962788E9F19B3604E73", "hauser"},
{"ROOT$1194E38F14B9F3F8DA1B181F14DEB70E7BDCC239", "KID"},
// invalid, because password is too short (would work during login, but not during password change):
{"MAN$22886450D0AB90FDA7F91C4F3DD5619175B372EA", "u"},
// SAP user name consisting of 12 consecutive EURO characters:
{"\xe2\x82\xac\xe2\x82\xac\xe2\x82\xac\xe2\x82\xac\xe2\x82\xac\xe2\x82\xac"
"\xe2\x82\xac\xe2\x82\xac\xe2\x82\xac\xe2\x82\xac\xe2\x82\xac\xe2\x82\xac"
"$B20D15C088481780CD44FCF2003AAAFBD9710C7C", "--+----"},
{"SAP* $60A0F7E06D95BC9FB45F605BDF1F7B660E5D5D4E", "MaStEr"},
{"DOLLAR$$$---$E0180FD4542D8B6715E7D0D9EDE7E2D2E40C3D4D", "Dollar$$$---"},
{NULL}
};
static UTF8 (*saved_plain)[UTF8_PLAINTEXT_LENGTH + 1];
static int *keyLen;
static int max_keys;
#ifdef SIMD_COEF_32
// max intermediate crypt size is 256 bytes
// multiple key buffers for lengths > 55
#define LIMB 5
static unsigned char *saved_key[LIMB];
static unsigned char *crypt_key;
static unsigned char *interm_crypt;
static unsigned int *clean_pos;
#else
static UTF8 (*saved_key)[UTF8_PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_key)[BINARY_SIZE / sizeof(uint32_t)];
#endif
static struct saltstruct {
unsigned int l;
unsigned char s[SALT_LENGTH];
} *cur_salt;
static void init(struct fmt_main *self)
{
static int warned = 0;
#ifdef SIMD_COEF_32
int i;
#endif
// This is needed in order NOT to upper-case german double-s
// in UTF-8 mode.
initUnicode(UNICODE_MS_NEW);
if (!options.listconf && options.target_enc != UTF_8 &&
!(options.flags & FLG_TEST_CHK) && warned++ == 0)
fprintf(stderr, "Warning: SAP-F/G format should always be UTF-8.\n"
"Use --target-encoding=utf8\n");
// Max 40 characters or 125 bytes of UTF-8, We actually do not truncate
// multibyte input at 40 characters later because it's too expensive.
if (options.target_enc == UTF_8)
self->params.plaintext_length = UTF8_PLAINTEXT_LENGTH;
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
max_keys = self->params.max_keys_per_crypt;
saved_plain = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_plain));
keyLen = mem_calloc(self->params.max_keys_per_crypt, sizeof(*keyLen));
#ifdef SIMD_COEF_32
clean_pos = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*clean_pos));
for (i = 0; i < LIMB; i++)
saved_key[i] = mem_calloc_align(self->params.max_keys_per_crypt,
SHA_BUF_SIZ * 4,
MEM_ALIGN_SIMD);
interm_crypt = mem_calloc_align(self->params.max_keys_per_crypt,
20, MEM_ALIGN_SIMD);
crypt_key = mem_calloc_align(self->params.max_keys_per_crypt,
20, MEM_ALIGN_SIMD);
#else
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_key));
saved_key = saved_plain;
#endif
}
static void done(void)
{
#ifdef SIMD_COEF_32
int i;
#endif
MEM_FREE(crypt_key);
#ifdef SIMD_COEF_32
MEM_FREE(interm_crypt);
for (i = 0; i < LIMB; i++)
MEM_FREE(saved_key[i]);
MEM_FREE(clean_pos);
#endif
MEM_FREE(keyLen);
MEM_FREE(saved_plain);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
int i, j;
char *p;
if (!ciphertext) return 0;
p = strrchr(ciphertext, '$');
if (!p) return 0;
if (p - ciphertext > SALT_FIELD_LENGTH) return 0;
if (strlen(&p[1]) != BINARY_SIZE * 2) return 0;
j = 0;
for (i = 0; i < p - ciphertext; i++) {
// even those lower case non-ascii characters with a
// corresponding upper case character could be rejected
if (ciphertext[i] >= 'a' && ciphertext[i] <= 'z')
return 0;
else if (ciphertext[i] & 0x80)
j++;
// Reject if user name is longer than 12 characters.
// This is not accurate, but close enough.
// To be exact, I'd need to keep j unchanged for
// the first byte of each character, instead of
// incrementing j for every byte >= 0x80.
if (i >= USER_NAME_LENGTH + j && ciphertext[i] != ' ')
return 0;
}
// SAP user name cannot start with ! or ?
if (ciphertext[0] == '!' || ciphertext[0] == '?') return 0;
// the user name must not simply be spaces, or empty
for (i = 0; i < p - ciphertext; ++i) {
if (ciphertext[i] == ' ')
continue;
break;
}
if (ciphertext[i] == '$') return 0;
p++;
// SAP and sap2john.pl always use upper case A-F for hashes,
// so don't allow a-f
for (i = 0; i < BINARY_SIZE * 2; i++)
if (!(((p[i]>='0' && p[i]<='9')) ||
((p[i]>='A' && p[i]<='F')) ))
return 0;
return 1;
}
static void set_salt(void *salt)
{
cur_salt = salt;
}
static void *get_salt(char *ciphertext)
{
char *p;
static struct saltstruct out;
p = strrchr(ciphertext, '$');
out.l = (int)(p - ciphertext);
memset(out.s, 0, sizeof(out.s));
memcpy(out.s, ciphertext, out.l);
return &out;
}
static void clear_keys(void)
{
memset(keyLen, 0, sizeof(*keyLen) * omp_t * MAX_KEYS_PER_CRYPT);
}
static void set_key(char *key, int index)
{
strnzcpy((char*)saved_plain[index], key, sizeof(*saved_plain));
keyLen[index] = -1;
}
static char *get_key(int index) {
return (char*)saved_plain[index];
}
static int cmp_all(void *binary, int count) {
#ifdef SIMD_COEF_32
unsigned int x,y=0;
for (;y<max_keys;y+=SIMD_COEF_32)
for (x=0;x<SIMD_COEF_32;x++)
{
if ( ((unsigned int*)binary)[0] == ((unsigned int*)crypt_key)[x+y*5] )
return 1;
}
return 0;
#else
unsigned int index;
for (index = 0; index < count; index++)
if (!memcmp(binary, crypt_key[index], BINARY_SIZE))
return 1;
return 0;
#endif
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int cmp_one(void *binary, int index)
{
#ifdef SIMD_COEF_32
unsigned int x,y;
x = index&(SIMD_COEF_32-1);
y = (unsigned int)index/SIMD_COEF_32;
if ( (((unsigned int*)binary)[0] != ((unsigned int*)crypt_key)[x+y*SIMD_COEF_32*5]) |
(((unsigned int*)binary)[1] != ((unsigned int*)crypt_key)[x+y*SIMD_COEF_32*5+SIMD_COEF_32]) |
(((unsigned int*)binary)[2] != ((unsigned int*)crypt_key)[x+y*SIMD_COEF_32*5+2*SIMD_COEF_32]) |
(((unsigned int*)binary)[3] != ((unsigned int*)crypt_key)[x+y*SIMD_COEF_32*5+3*SIMD_COEF_32])|
(((unsigned int*)binary)[4] != ((unsigned int*)crypt_key)[x+y*SIMD_COEF_32*5+4*SIMD_COEF_32]) )
return 0;
return 1;
#else
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
#endif
}
/*
* calculate the length of data that has to be hashed from the magic array. pass the first hash result in here.
* this is part of the walld0rf-magic
* The return value will always be between 32 and 82, inclusive
*/
#if SIMD_COEF_32
inline static unsigned int extractLengthOfMagicArray(unsigned const char *pbHashArray, unsigned int index)
#else
inline static unsigned int extractLengthOfMagicArray(unsigned const char *pbHashArray)
#endif
{
unsigned int modSum = 0;
#if SIMD_COEF_32
unsigned const char *p = &pbHashArray[GETOUTSTARTPOS(index)]; // [(index/SIMD_COEF_32)*20*SIMD_COEF_32+(index%SIMD_COEF_32)*4]
modSum += *p++ % 6;
modSum += *p++ % 6;
modSum += *p++ % 6;
modSum += *p++ % 6;
p += 4*(SIMD_COEF_32 - 1);
modSum += *p++ % 6;
modSum += *p++ % 6;
modSum += *p++ % 6;
modSum += *p++ % 6;
p += 4*(SIMD_COEF_32 - 1);
#if ARCH_LITTLE_ENDIAN
p += 2;
#endif
modSum += *p++ % 6;
modSum += *p % 6;
#else
unsigned int i;
for (i = 0; i < 10; i++)
modSum += pbHashArray[i] % 6;
#endif
return modSum + 0x20; //0x20 is hardcoded...
}
/*
* Calculate the offset into the magic array. pass the first hash result in here
* part of the walld0rf-magic
* The return value will always be between 0 and 70, inclusive
*/
#if SIMD_COEF_32
inline static unsigned int extractOffsetToMagicArray(unsigned const char *pbHashArray, unsigned int index)
#else
inline static unsigned int extractOffsetToMagicArray(unsigned const char *pbHashArray)
#endif
{
unsigned int modSum = 0;
#if SIMD_COEF_32
unsigned const char *p = &pbHashArray[GETOUTSTARTPOS(index)]; // [(index/SIMD_COEF_32)*20*SIMD_COEF_32+(index%SIMD_COEF_32)*4]
p += 4*(SIMD_COEF_32)*2;
#if !ARCH_LITTLE_ENDIAN
p += 2;
#endif
modSum += *p++ % 8;
modSum += *p++ % 8;
#if ARCH_LITTLE_ENDIAN
p += 2;
#endif
p += 4*(SIMD_COEF_32 - 1);
modSum += *p++ % 8;
modSum += *p++ % 8;
modSum += *p++ % 8;
modSum += *p++ % 8;
p += 4*(SIMD_COEF_32 - 1);
modSum += *p++ % 8;
modSum += *p++ % 8;
modSum += *p++ % 8;
modSum += *p % 8;
#else
unsigned int i;
for (i = 10; i < 20; i++)
modSum += pbHashArray[i] % 8;
#endif
return modSum;
}
#if SIMD_COEF_32
inline static void crypt_done(unsigned const int *source, unsigned int *dest, int index)
{
unsigned int i;
unsigned const int *s = &source[(index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*5*SIMD_COEF_32];
unsigned int *d = &dest[(index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*5*SIMD_COEF_32];
for (i = 0; i < 5; i++) {
*d = *s;
s += SIMD_COEF_32;
d += SIMD_COEF_32;
}
}
#endif
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
#if SIMD_COEF_32
#define ti (t*NBKEYS+index)
unsigned t;
#if defined(_OPENMP)
#pragma omp parallel for
#endif
for (t = 0; t < (count-1)/(NBKEYS)+1; t++)
{
unsigned int index, i, longest;
int len;
unsigned int crypt_len[NBKEYS];
longest = 0;
for (index = 0; index < NBKEYS; index++) {
// Store key into vector key buffer
if ((len = keyLen[ti]) < 0) {
uint32_t *keybuf_word = (uint32_t*)&saved_key[0][GETSTARTPOS(ti)];
#if ARCH_ALLOWS_UNALIGNED
const uint32_t *wkey = (uint32_t*)saved_plain[ti];
#else
char buf_aligned[UTF8_PLAINTEXT_LENGTH + 1] JTR_ALIGN(4);
char *key = (char*)saved_plain[ti];
const uint32_t *wkey = is_aligned(key, 4) ?
(uint32_t*)key : (uint32_t*)strcpy(buf_aligned, key);
#endif
uint32_t temp;
len = 0;
#if ARCH_LITTLE_ENDIAN
while(((unsigned char)(temp = *wkey++))) {
if (!(temp & 0xff00))
{
*keybuf_word = JOHNSWAP(temp & 0xff);
len++;
break;
}
if (!(temp & 0xff0000))
{
*keybuf_word = JOHNSWAP(temp & 0xffff);
len+=2;
break;
}
*keybuf_word = JOHNSWAP(temp);
if (!(temp & 0xff000000))
{
len+=3;
break;
}
#else
while((temp = *wkey++) & 0xff000000) {
if (!(temp & 0xff0000))
{
*keybuf_word = (temp & 0xff000000) | (0x80 << 16);
len++;
break;
}
if (!(temp & 0xff00))
{
*keybuf_word = (temp & 0xffff0000) | (0x80 << 8);
len+=2;
break;
}
*keybuf_word = temp;
if (!(temp & 0xff))
{
*keybuf_word = temp | 0x80U;
len+=3;
break;
}
#endif
len += 4;
if (len & 63)
keybuf_word += SIMD_COEF_32;
else
keybuf_word = (uint32_t*)&saved_key[len>>6][GETSTARTPOS(ti)];
}
// Back-out of trailing spaces
while(len && saved_plain[ti][len - 1] == ' ')
saved_plain[ti][--len] = 0;
keyLen[ti] = len;
}
// 1. we need to SHA1 the password and username
for (i = 0; i < cur_salt->l; i++)
saved_key[(len+i)>>6][GETPOS((len + i), ti)] = cur_salt->s[i];
len += i;
saved_key[len>>6][GETPOS(len, ti)] = 0x80;
// Clean rest of this buffer
i = len;
while (++i & 3)
saved_key[i>>6][GETPOS(i, ti)] = 0;
for (; i < (((len+8)>>6)+1)*64; i += 4)
*(uint32_t*)&saved_key[i>>6][GETWORDPOS(i, ti)] = 0;
// This should do good but Valgrind insists it's a waste
//if (clean_pos[ti] < i)
// clean_pos[ti] = len + 1;
if (len > longest)
longest = len;
((unsigned int*)saved_key[(len+8)>>6])[15*SIMD_COEF_32 + (ti&(SIMD_COEF_32-1)) + ti/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32] = len << 3;
crypt_len[index] = len;
}
SIMDSHA1body(&saved_key[0][t*SHA_BUF_SIZ*4*NBKEYS], (unsigned int*)&crypt_key[t*20*NBKEYS], NULL, SSEi_MIXED_IN);
// Do another and possibly a third limb
memcpy(&interm_crypt[t*20*NBKEYS], &crypt_key[t*20*NBKEYS], 20*NBKEYS);
for (i = 1; i < (((longest + 8) >> 6) + 1); i++) {
SIMDSHA1body(&saved_key[i][t*SHA_BUF_SIZ*4*NBKEYS], (unsigned int*)&interm_crypt[t*20*NBKEYS], (unsigned int*)&interm_crypt[t*20*NBKEYS], SSEi_MIXED_IN|SSEi_RELOAD);
// Copy any output that is done now
for (index = 0; index < NBKEYS; index++)
if (((crypt_len[index] + 8) >> 6) == i)
crypt_done((unsigned int*)interm_crypt, (unsigned int*)crypt_key, ti);
}
longest = 0;
for (index = 0; index < NBKEYS; index++) {
unsigned int offsetMagicArray;
unsigned int lengthIntoMagicArray;
const unsigned char *p;
int i;
// If final crypt ends up to be 56-61 bytes (or so), this must be clean
for (i = 0; i < LIMB; i++)
if (keyLen[ti] < i * 64 + 55)
((unsigned int*)saved_key[i])[15*SIMD_COEF_32 + (ti&(SIMD_COEF_32-1)) + ti/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32] = 0;
len = keyLen[ti];
lengthIntoMagicArray = extractLengthOfMagicArray(crypt_key, ti);
offsetMagicArray = extractOffsetToMagicArray(crypt_key, ti);
// 2. now, hash again --> sha1($password+$partOfMagicArray+$username) --> this is CODVNG passcode...
i = len - 1;
p = &theMagicArray[offsetMagicArray];
// Copy a char at a time until aligned (at destination)...
while (++i & 3)
saved_key[i>>6][GETPOS(i, ti)] = *p++;
// ...then a word at a time. This is a good boost, we are copying between 32 and 82 bytes here.
#if ARCH_ALLOWS_UNALIGNED
for (;i < lengthIntoMagicArray + len; i += 4, p += 4)
#if ARCH_LITTLE_ENDIAN
*(uint32_t*)&saved_key[i>>6][GETWORDPOS(i, ti)] = JOHNSWAP(*(uint32_t*)p);
#else
*(uint32_t*)&saved_key[i>>6][GETWORDPOS(i, ti)] = *(uint32_t*)p;
#endif
#else
for (;i < lengthIntoMagicArray + len; ++i, ++p) {
saved_key[i>>6][GETPOS(i, ti)] = *p;
}
#endif
// Now, the salt. This is typically too short for the stunt above.
for (i = 0; i < cur_salt->l; i++)
saved_key[(len+lengthIntoMagicArray+i)>>6][GETPOS((len + lengthIntoMagicArray + i), ti)] = cur_salt->s[i];
len += lengthIntoMagicArray + cur_salt->l;
saved_key[len>>6][GETPOS(len, ti)] = 0x80;
crypt_len[index] = len;
// Clean the rest of this buffer as needed
i = len;
while (++i & 3)
saved_key[i>>6][GETPOS(i, ti)] = 0;
for (; i < clean_pos[ti]; i += 4)
*(uint32_t*)&saved_key[i>>6][GETWORDPOS(i, ti)] = 0;
clean_pos[ti] = len + 1;
if (len > longest)
longest = len;
((unsigned int*)saved_key[(len+8)>>6])[15*SIMD_COEF_32 + (ti&(SIMD_COEF_32-1)) + ti/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32] = len << 3;
}
SIMDSHA1body(&saved_key[0][t*SHA_BUF_SIZ*4*NBKEYS], (unsigned int*)&interm_crypt[t*20*NBKEYS], NULL, SSEi_MIXED_IN);
// Typically, no or very few crypts are done at this point so this is faster than to memcpy the lot
for (index = 0; index < NBKEYS; index++)
if (crypt_len[index] < 56)
crypt_done((unsigned int*)interm_crypt, (unsigned int*)crypt_key, ti);
// Do another and possibly a third, fourth and fifth limb
for (i = 1; i < (((longest + 8) >> 6) + 1); i++) {
SIMDSHA1body(&saved_key[i][t*SHA_BUF_SIZ*4*NBKEYS], (unsigned int*)&interm_crypt[t*20*NBKEYS], (unsigned int*)&interm_crypt[t*20*NBKEYS], SSEi_MIXED_IN|SSEi_RELOAD);
// Copy any output that is done now
for (index = 0; index < NBKEYS; index++)
if (((crypt_len[index] + 8) >> 6) == i)
crypt_done((unsigned int*)interm_crypt, (unsigned int*)crypt_key, ti);
}
}
#undef t
#undef ti
#else
#ifdef _OPENMP
int index;
#pragma omp parallel for
for (index = 0; index < count; index++)
#else
#define index 0
#endif
{
unsigned int offsetMagicArray, lengthIntoMagicArray;
unsigned char temp_key[BINARY_SIZE];
unsigned char tempVar[UTF8_PLAINTEXT_LENGTH + MAGIC_ARRAY_SIZE + SALT_LENGTH]; //max size...
SHA_CTX ctx;
if (keyLen[index] < 0) {
keyLen[index] = strlen((char*)saved_key[index]);
// Back-out of trailing spaces
while (saved_key[index][keyLen[index] - 1] == ' ') {
saved_key[index][--keyLen[index]] = 0;
if (keyLen[index] == 0) break;
}
}
//1. we need to SHA1 the password and username
memcpy(tempVar, saved_key[index], keyLen[index]); //first: the password
memcpy(tempVar + keyLen[index], cur_salt->s, cur_salt->l); //second: the salt(username)
SHA1_Init(&ctx);
SHA1_Update(&ctx, tempVar, keyLen[index] + cur_salt->l);
SHA1_Final((unsigned char*)temp_key, &ctx);
lengthIntoMagicArray = extractLengthOfMagicArray(temp_key);
offsetMagicArray = extractOffsetToMagicArray(temp_key);
//2. now, hash again --> sha1($password+$partOfMagicArray+$username) --> this is CODVNG passcode...
memcpy(tempVar + keyLen[index], &theMagicArray[offsetMagicArray], lengthIntoMagicArray);
memcpy(tempVar + keyLen[index] + lengthIntoMagicArray, cur_salt->s, cur_salt->l);
SHA1_Init(&ctx);
SHA1_Update(&ctx, tempVar, keyLen[index] + lengthIntoMagicArray + cur_salt->l);
SHA1_Final((unsigned char*)crypt_key[index], &ctx);
}
#undef index
#endif
return count;
}
static void *get_binary(char *ciphertext)
{
static int outbuf[BINARY_SIZE / sizeof(int)];
char *realcipher = (char*)outbuf;
int i;
char* newCiphertextPointer;
newCiphertextPointer = strrchr(ciphertext, '$') + 1;
for (i=0;i<BINARY_SIZE;i++)
{
realcipher[i] = atoi16[ARCH_INDEX(newCiphertextPointer[i*2])]*16 + atoi16[ARCH_INDEX(newCiphertextPointer[i*2+1])];
}
#if defined(SIMD_COEF_32) && ARCH_LITTLE_ENDIAN
alter_endianity((unsigned char*)realcipher, BINARY_SIZE);
#endif
return (void*)realcipher;
}
#if 0 // Not possible with current interface
static char *source(struct db_password *pw, char Buf[LINE_BUFFER_SIZE] )
{
struct saltstruct *salt_s = (struct saltstruct*)(pw->source);
unsigned char realcipher[BINARY_SIZE];
unsigned char *cpi;
char *cpo;
int i;
memcpy(realcipher, pw->binary, BINARY_SIZE);
#ifdef SIMD_COEF_32
alter_endianity(realcipher, BINARY_SIZE);
#endif
memcpy(Buf, salt_s->s, salt_s->l);
cpo = &Buf[salt_s->l];
*cpo++ = '$';
cpi = realcipher;
for (i = 0; i < BINARY_SIZE; ++i) {
*cpo++ = itoa16u[(*cpi)>>4];
*cpo++ = itoa16u[*cpi&0xF];
++cpi;
}
*cpo = 0;
return Buf;
}
#endif
#define COMMON_GET_HASH_SIMD32 5
#define COMMON_GET_HASH_VAR crypt_key
#include "common-get-hash.h"
// Here, we remove any salt padding and trim it to 44 bytes
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[CIPHERTEXT_LENGTH + 1];
char *p;
int i;
p = strrchr(ciphertext, '$');
i = (int)(p - ciphertext) - 1;
while (ciphertext[i] == ' ' || i >= SALT_LENGTH)
i--;
i++;
memset(out, 0, sizeof(out));
memcpy(out, ciphertext, i);
strnzcpy(&out[i], p, CIPHERTEXT_LENGTH + 1 - i);
return out;
}
// Public domain hash function by DJ Bernstein
static int salt_hash(void *salt)
{
struct saltstruct *s = (struct saltstruct*)salt;
unsigned int hash = 5381;
unsigned int i;
for (i = 0; i < s->l; i++)
hash = ((hash << 5) + hash) ^ s->s[i];
return hash & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_sapG = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#if !defined(SIMD_COEF_32) || defined(SIMD_PARA_SHA1)
FMT_OMP |
#endif
FMT_CASE | FMT_8_BIT | FMT_UTF8,
{ NULL },
{ NULL },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
clear_keys,
crypt_all,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
mp.c | #include <stdio.h>
#define Const(x,y,z) x##y##z
#define float Const(un,sign,ed)
float x, s[1], a[1], e[1], d[1];
float A(float a, float b){return a&b?A(a^b,(a&b)<<1):a^b;}
float P(float a, float b){return a?P(a/10,'-')+putchar(a%10+48)-48:0;}
float G(float a, float b){for(;b;b^=a^=b^=a%=b);return !--a;}
float F(float a, float b){return b?G(a,b)+F(a,b-1):0;}
float S(float a, float b){return a?a<b?0:S(A(a,1+~b),b+2):1;}
int main()
{
*d=25;
char str[10][10] = { ":Ugly", ":Good", ":Bad"};
int result[200005];
#pragma omp parallel for private(x) schedule(dynamic, 8)
for( x=1; x<=200000; x++ )
{
int t, X = x, sum;
for( sum=0; X; X/=10 ) sum += X % 10;
float y = sum;
if ( x % y ) t = 0;
else if ( S(F(x,x),1) ) t = 1;
else t = 2;
result[x] = t;
}
for( x=1; x<=200000; x++ )
{
printf("%d", x);
puts( str[result[x]] );
}
printf("Who's %d?\n", (*s)[a][e][e][d]);
return 0;
}
|
GB_binop__bxnor_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bxnor_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__bxnor_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__bxnor_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__bxnor_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bxnor_int16)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bxnor_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__bxnor_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxnor_int16)
// C=scalar+B GB (_bind1st__bxnor_int16)
// C=scalar+B' GB (_bind1st_tran__bxnor_int16)
// C=A+scalar GB (_bind2nd__bxnor_int16)
// C=A'+scalar GB (_bind2nd_tran__bxnor_int16)
// C type: int16_t
// A type: int16_t
// A pattern? 0
// B type: int16_t
// B pattern? 0
// BinaryOp: cij = ~((aij) ^ (bij))
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ~((x) ^ (y)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BXNOR || GxB_NO_INT16 || GxB_NO_BXNOR_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bxnor_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bxnor_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bxnor_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bxnor_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bxnor_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bxnor_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bxnor_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bxnor_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bxnor_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = ~((x) ^ (bij)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bxnor_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = ~((aij) ^ (y)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ~((x) ^ (aij)) ; \
}
GrB_Info GB (_bind1st_tran__bxnor_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ~((aij) ^ (y)) ; \
}
GrB_Info GB (_bind2nd_tran__bxnor_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
line_search_contact_strategy.h | // KRATOS ___| | | |
// \___ \ __| __| | | __| __| | | __| _` | |
// | | | | | ( | | | | ( | |
// _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS
//
// License: BSD License
// license: StructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_LINE_SEARCH_CONTACT_STRATEGY)
#define KRATOS_LINE_SEARCH_CONTACT_STRATEGY
/* System Includes */
/* External Includes */
/* Project includes */
#include "includes/kratos_parameters.h"
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/variables.h"
#include "solving_strategies/strategies/solving_strategy.h"
#include "solving_strategies/strategies/line_search_strategy.h"
#include "utilities/openmp_utils.h"
#include "utilities/variable_utils.h"
// Convergence criterias
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
// Default builder and solver
#include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h"
// TODO: Extend the descriptions
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/** \brief Short class definition.
This class
*/
template<class TSparseSpace,
class TDenseSpace, // = DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class LineSearchContactStrategy :
public LineSearchStrategy< TSparseSpace, TDenseSpace, TLinearSolver >
{
public:
typedef ConvergenceCriteria<TSparseSpace, TDenseSpace> TConvergenceCriteriaType;
/** Counted pointer of ClassName */
KRATOS_CLASS_POINTER_DEFINITION( LineSearchContactStrategy );
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> StrategyBaseType;
typedef ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver> NRBaseType;
typedef LineSearchStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType;
typedef typename BaseType::TDataType TDataType;
typedef TSparseSpace SparseSpaceType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef ModelPart::NodesContainerType NodesArrayType;
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
typedef std::size_t IndexType;
/**
* Default constructor
* @param rModelPart: The model part of the problem
* @param pScheme: The integration scheme
* @param pNewLinearSolver: The linear solver employed
* @param pNewConvergenceCriteria: The convergence criteria employed
* @param MaxIterationNumber: The maximum number of iterations
* @param CalculateReactions: The flag for the reaction calculation
* @param ReformDofSetAtEachStep: The flag that allows to compute the modification of the DOF
* @param MoveMeshFlag: The flag that allows to move the mesh
*/
LineSearchContactStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TLinearSolver::Pointer pNewLinearSolver,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
IndexType MaxIterations = 30,
bool CalculateReactions = false,
bool ReformDofSetAtEachStep = false,
bool MoveMeshFlag = false,
Parameters ThisParameters = Parameters(R"({})")
)
: LineSearchStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag)
{
KRATOS_TRY;
Parameters DefaultParameters = Parameters(R"(
{
})" );
ThisParameters.ValidateAndAssignDefaults(DefaultParameters);
KRATOS_CATCH("");
}
/**
* Default constructor
* @param rModelPart: The model part of the problem
* @param pScheme: The integration scheme
* @param pNewLinearSolver: The linear solver employed
* @param pNewConvergenceCriteria: The convergence criteria employed
* @param MaxIterationNumber: The maximum number of iterations
* @param CalculateReactions: The flag for the reaction calculation
* @param ReformDofSetAtEachStep: The flag that allows to compute the modification of the DOF
* @param MoveMeshFlag: The flag that allows to move the mesh
*/
LineSearchContactStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TLinearSolver::Pointer pNewLinearSolver,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver,
IndexType MaxIterations = 30,
bool CalculateReactions = false,
bool ReformDofSetAtEachStep = false,
bool MoveMeshFlag = false,
Parameters ThisParameters = Parameters(R"({})")
)
: LineSearchStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag )
{
KRATOS_TRY;
Parameters DefaultParameters = Parameters(R"(
{
})" );
ThisParameters.ValidateAndAssignDefaults(DefaultParameters);
KRATOS_CATCH("");
}
/**
* Destructor.
*/
~LineSearchContactStrategy() override
= default;
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
bool mRecalculateFactor; // To check if we recalculate or not the scale factor
///@}
///@name Protected Operators
///@{
/**
* Performs all the required operations that should be done (for each step)
* before solving the solution step.
* A member variable should be used as a flag to make sure this function is called only once per step.
*/
void InitializeSolutionStep() override
{
BaseType::InitializeSolutionStep();
// TODO: Add something if necessary
}
/**
* Here the database is updated
*/
void UpdateDatabase(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b,
const bool MoveMesh
) override
{
typename TSchemeType::Pointer pScheme = this->GetScheme();
typename TBuilderAndSolverType::Pointer pBuilderAndSolver = this->GetBuilderAndSolver(); // FIXME: Separate in the parts of LM and displacement
TSystemVectorType aux(b.size()); //TODO: do it by using the space
TSparseSpace::Assign(aux, 0.5, Dx);
TSystemVectorType DxDisp(b.size());
TSystemVectorType DxLM(b.size());
ComputeSplitDx(Dx, DxDisp, DxLM);
// Compute residual without update
TSparseSpace::SetToZero(b);
pBuilderAndSolver->BuildRHS(pScheme, BaseType::GetModelPart(), b );
double roDisp;
double roLM;
ComputeMixedResidual(b, roDisp, roLM);
// Compute half step residual
NRBaseType::UpdateDatabase(A,aux,b,MoveMesh);
TSparseSpace::SetToZero(b);
pBuilderAndSolver->BuildRHS(pScheme, BaseType::GetModelPart(), b );
double rhDisp;
double rhLM;
ComputeMixedResidual(b, rhDisp, rhLM);
// Compute full step residual (add another half Dx to the previous half)
NRBaseType::UpdateDatabase(A,aux,b,MoveMesh);
TSparseSpace::SetToZero(b);
pBuilderAndSolver->BuildRHS(pScheme, BaseType::GetModelPart(), b );
double rfDisp;
double rfLM;
ComputeMixedResidual(b, rfDisp, rfLM);
// We compute the parabola
double XminDisp = 1e-3;
double XmaxDisp = 1.0;
double XminLM = 1e-3;
double XmaxLM = 1.0;
ComputeParabola(XminDisp, XmaxDisp, rfDisp, roDisp, rhDisp);
ComputeParabola(XminLM, XmaxLM, rfLM, roLM, rhLM);
// Perform final update
TSparseSpace::Assign(aux,-(1.0 - XmaxDisp), DxDisp);
TSparseSpace::UnaliasedAdd(aux,-(1.0 - XmaxLM), DxLM);
NRBaseType::UpdateDatabase(A,aux,b,MoveMesh);
}
/**
* This method split the vector of increment of DoF in displacement and LM
* @param Dx The increment of displacements and LM
* @param DxDisp The increment of displacements
* @param DxLM The increment of LM
*/
void ComputeSplitDx(
TSystemVectorType& Dx,
TSystemVectorType& DxDisp,
TSystemVectorType& DxLM
)
{
// Now we iterate over all the nodes
NodesArrayType& nodes_array = StrategyBaseType::GetModelPart().Nodes();
const int num_nodes = static_cast<int>(nodes_array.size());
#pragma omp parallel for
for(int i = 0; i < num_nodes; ++i)
{
auto it_node = nodes_array.begin() + i;
for(auto itDoF = it_node->GetDofs().begin() ; itDoF != it_node->GetDofs().end() ; itDoF++)
{
const int j = (itDoF)->EquationId();
std::size_t CurrVar = (itDoF)->GetVariable().Key();
if ((CurrVar == DISPLACEMENT_X) || (CurrVar == DISPLACEMENT_Y) || (CurrVar == DISPLACEMENT_Z))
{
DxDisp[j] = Dx[j];
DxLM[j] = 0.0;
}
else // Corresponding with contact
{
DxDisp[j] = 0.0;
DxLM[j] = Dx[j];
}
}
}
}
/**
* This method calculates the norm considering one norm for the displacement and other norm for the LM
* @param b The residual vector
* @param normDisp normDisp: The norm of the displacement
* @param normLM The norm of the LM
*/
void ComputeMixedResidual(
TSystemVectorType& b,
double& normDisp,
double& normLM
)
{
// Now we iterate over all the nodes
NodesArrayType& nodes_array = StrategyBaseType::GetModelPart().Nodes();
const int num_nodes = static_cast<int>(nodes_array.size());
#pragma omp parallel for
for(int i = 0; i < num_nodes; ++i)
{
auto it_node = nodes_array.begin() + i;
for(auto itDoF = it_node->GetDofs().begin() ; itDoF != it_node->GetDofs().end() ; itDoF++)
{
const int j = (itDoF)->EquationId();
std::size_t CurrVar = (itDoF)->GetVariable().Key();
if ((CurrVar == DISPLACEMENT_X) || (CurrVar == DISPLACEMENT_Y) || (CurrVar == DISPLACEMENT_Z))
{
#pragma omp atomic
normDisp += b[j] * b[j];
}
else // Corresponding with contact
{
#pragma omp atomic
normLM += b[j] * b[j];
}
}
}
normDisp = std::sqrt(normDisp);
normLM = std::sqrt(normLM);
}
/**
* This method computes the parabola necessary for the line search
* @param Xmax The maximal abscissa
* @param Xmin The norm of the LM
* @param rf The residual norm of the full step
* @param ro The residual norm without step
* @param rh The residual norm of the half step
*/
void ComputeParabola(
double& Xmax,
double& Xmin,
const double rf,
const double ro,
const double rh
)
{
// Compute optimal (limited to the range 0-1)
// Parabola is y = a*x^2 + b*x + c -> min/max for
// x=0 --> r=ro
// x=1/2 --> r=rh
// x=1 --> r =
// c= ro, b= 4*rh -rf -3*ro, a= 2*rf - 4*rh + 2*ro
// max found if a>0 at the position Xmax = (rf/4 - rh)/(rf - 2*rh);
const double parabole_a = 2 * rf + 2 * ro - 4 * rh;
const double parabole_b = 4 * rh - rf - 3 * ro;
if( parabole_a > 0.0) // If parabola has a local minima
{
Xmax = -0.5 * parabole_b/parabole_a; // -b / 2a
if( Xmax > 1.0)
Xmax = 1.0;
else if(Xmax < -1.0)
Xmax = -1.0;
}
else // Parabola degenerates to either a line or to have a local max. best solution on either extreme
{
if(rf < ro)
Xmax = 1.0;
else
Xmax = Xmin; // Should be zero, but otherwise it will stagnate
}
}
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@{
/**
* Copy constructor.
*/
LineSearchContactStrategy(const LineSearchContactStrategy& Other)
{
};
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class LineSearchContactStrategy */
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
///@}
} // namespace Kratos
#endif /* KRATOS_LINE_SEARCH_CONTACT_STRATEGY */
|
lu2lib.c | //
// lu2lib.c
//
// J. Makino
// Time-stamp: <2019-05-06 22:08:30 makino>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <timerlib.h>
#ifndef NOBLAS
#ifdef MKL
#include <mkl_cblas.h>
#else
#include <cblas.h>
#endif
#endif
#ifdef USEGDR
#include "gdrdgemm.h"
#endif
#define FTYPE double
#include <emmintrin.h>
typedef double v2df __attribute__((vector_size(16)));
typedef union {v2df v; double s[2];}v2u;
#ifndef USEGDR
void gdrsetboardid(int boardid)
{}
#endif
void matmul2_host(int n,
FTYPE a[n][n],
FTYPE b[n][n],
FTYPE c[n][n])
{
int i, j, k;
for(i=0;i<n;i++){
for(j=0;j<n;j++){
c[i][j]=0.0e0;
for(k=0;k<n;k++) c[i][j]+= a[i][k]*b[k][j];
}
}
}
// simplest version
void matmul_for_small_nk_0(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int kk,
int n)
{
// simplest version
int i,j,k;
for(j=0;j<n;j++)
for(i=0;i<m;i++)
for(k=0;k<kk;k++)
c[i][j] -= a[i][k]*b[k][j];
}
// make copy of B
void matmul_for_small_nk_1(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int kk,
int n)
{
int i,j,k;
double bcopy[n][kk];
for(k=0;k<kk;k++)
for(j=0;j<n;j++)
bcopy[j][k] = b[k][j];
for(i=0;i<m;i++){
for(j=0;j<n;j++){
register double tmp=0.0;
for(k=0;k<kk;k++){
tmp += a[i][k]*bcopy[j][k];
}
c[i][j] -= tmp;
}
}
}
// hand-unroll innermost loop
void matmul_for_small_nk_2(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int kk,
int n)
{
int i,j,k;
double bcopy[n][kk];
for(j=0;j<n;j++)
for(k=0;k<kk;k++)
bcopy[j][k] = b[k][j];
for(i=0;i<m;i++){
double *ap=a[i];
for(j=0;j<n;j++){
double *bp = bcopy[j];
double tmp=0.0;
for(k=0;k<kk;k+=8)
tmp += ap[k]*bp[k]
+ ap[k+1]*bp[k+1]
+ ap[k+2]*bp[k+2]
+ ap[k+3]*bp[k+3]
+ ap[k+4]*bp[k+4]
+ ap[k+5]*bp[k+5]
+ ap[k+6]*bp[k+6]
+ ap[k+7]*bp[k+7];
c[i][j]-=tmp;
}
}
}
// hand-unroll mid-loop
void matmul_for_small_nk_3(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int kk,
int n)
{
int i,j,k;
double bcopy[n][kk];
for(j=0;j<n;j++)
for(k=0;k<kk;k++)
bcopy[j][k] = b[k][j];
for(i=0;i<m;i++){
double *ap=a[i];
for(j=0;j<n;j+=4){
double *bp = bcopy[j];
double *bpp = bcopy[j+1];
double *bp2 = bcopy[j+2];
double *bp3 = bcopy[j+3];
double tmp=0.0;
double tmp1=0.0;
double tmp2=0.0;
double tmp3=0.0;
for(k=0;k<kk;k+=8){
tmp += ap[k]*bp[k]
+ ap[k+1]*bp[k+1]
+ ap[k+2]*bp[k+2]
+ ap[k+3]*bp[k+3]
+ ap[k+4]*bp[k+4]
+ ap[k+5]*bp[k+5]
+ ap[k+6]*bp[k+6]
+ ap[k+7]*bp[k+7];
tmp1 += ap[k]*bpp[k]
+ ap[k+1]*bpp[k+1]
+ ap[k+2]*bpp[k+2]
+ ap[k+3]*bpp[k+3]
+ ap[k+4]*bpp[k+4]
+ ap[k+5]*bpp[k+5]
+ ap[k+6]*bpp[k+6]
+ ap[k+7]*bpp[k+7];
tmp2 += ap[k]*bp2[k]
+ ap[k+1]*bp2[k+1]
+ ap[k+2]*bp2[k+2]
+ ap[k+3]*bp2[k+3]
+ ap[k+4]*bp2[k+4]
+ ap[k+5]*bp2[k+5]
+ ap[k+6]*bp2[k+6]
+ ap[k+7]*bp2[k+7];
tmp3 += ap[k]*bp3[k]
+ ap[k+1]*bp3[k+1]
+ ap[k+2]*bp3[k+2]
+ ap[k+3]*bp3[k+3]
+ ap[k+4]*bp3[k+4]
+ ap[k+5]*bp3[k+5]
+ ap[k+6]*bp3[k+6]
+ ap[k+7]*bp3[k+7];
}
c[i][j]-=tmp;
c[i][j+1]-=tmp1;
c[i][j+2]-=tmp2;
c[i][j+3]-=tmp3;
}
}
}
// hand-unroll mid-loop by 2
void matmul_for_small_nk_4(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int kk,
int n)
{
int i,j,k;
double bcopy[n][kk];
for(j=0;j<n;j++)
for(k=0;k<kk;k++)
bcopy[j][k] = b[k][j];
for(i=0;i<m;i++){
double *ap=a[i];
for(j=0;j<n;j+=2){
double *bp = bcopy[j];
double *bpp = bcopy[j+1];
double tmp=0.0;
double tmp1=0.0;
for(k=0;k<kk;k+=8){
tmp += ap[k]*bp[k]
+ ap[k+1]*bp[k+1]
+ ap[k+2]*bp[k+2]
+ ap[k+3]*bp[k+3]
+ ap[k+4]*bp[k+4]
+ ap[k+5]*bp[k+5]
+ ap[k+6]*bp[k+6]
+ ap[k+7]*bp[k+7];
tmp1 += ap[k]*bpp[k]
+ ap[k+1]*bpp[k+1]
+ ap[k+2]*bpp[k+2]
+ ap[k+3]*bpp[k+3]
+ ap[k+4]*bpp[k+4]
+ ap[k+5]*bpp[k+5]
+ ap[k+6]*bpp[k+6]
+ ap[k+7]*bpp[k+7];
}
c[i][j]-=tmp;
c[i][j+1]-=tmp1;
}
}
}
// use sse2 for dot product
void matmul_for_small_nk_5(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int kk,
int n)
{
int i,j,k;
int nh = n/2;
double bcopy[n][kk];
v2df bcopy2[nh][kk];
v2df acopy[kk];
double acopyd[kk];
for(j=0;j<nh;j++)
for(k=0;k<kk;k++)
bcopy2[j][k] = *((v2df*)(b[k]+j+j));
for(i=0;i<m;i++){
double *ap=a[i];
double *acp = (double*) acopy;
register v2df tmp= (v2df){0.0,0.0};
v2df * cp = (v2df*) (&(c[i][0]));
for(k=0;k<kk;k+=4){
__builtin_prefetch((double*)a[i+4]+k,0);
}
for(j=0;j<n;j+=4){
__builtin_prefetch(c[i+4]+j,0);
}
for(k=0;k<kk;k+=2){
// v2df aa = *((v2df*)(ap+k));
// acopy[k]=__builtin_ia32_shufpd(aa,aa,0x0);
// acopy[k+1]= __builtin_ia32_shufpd(aa,aa,0x5);
acp[k*2]=acp[k*2+1]=ap[k];
acp[k*2+2]=acp[k*2+3]=ap[k+1];
}
for(j=0;j<nh;j++){
tmp = (v2df){0.0,0.0};
v2df * bp = bcopy2[j];
for(k=0;k<kk;k+=2){
tmp += acopy[k]*bp[k]
+acopy[k+1]*bp[k+1]
#if 0
+acopy[k+2]*bp[k+2]
+acopy[k+3]*bp[k+3]
+acopy[k+4]*bp[k+4]
+acopy[k+5]*bp[k+5]
+acopy[k+6]*bp[k+6]
+acopy[k+7]*bp[k+7]
#endif
;
}
cp[j] -= tmp;
}
}
}
// use sse2 for dot product
void matmul_for_small_nk_6(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int kk,
int n)
{
int i,j,k;
int nh = n/2;
double bcopy[n][kk];
v2df bcopy2[nh][kk];
v2df acopy[kk];
v2df acopy2[kk];
v2df acopy3[kk];
v2df acopy4[kk];
double *acp = (double*) acopy;
double *acp2 = (double*) acopy2;
double *acp3 = (double*) acopy3;
double *acp4 = (double*) acopy4;
for(j=0;j<nh;j++)
for(k=0;k<kk;k++)
bcopy2[j][k] = *((v2df*)(b[k]+j+j));
for(i=0;i<m;i+=4){
double *ap=a[i];
double *ap2=a[i+1];
double *ap3=a[i+2];
double *ap4=a[i+3];
register v2df tmp, tmp2, tmp3, tmp4;
v2df * cp = (v2df*) (&(c[i][0]));
v2df * cp2 = (v2df*) (&(c[i+1][0]));
v2df * cp3 = (v2df*) (&(c[i+2][0]));
v2df * cp4 = (v2df*) (&(c[i+3][0]));
for(k=0;k<kk;k+=4){
__builtin_prefetch((double*)a[i+4]+k,0);
__builtin_prefetch((double*)a[i+5]+k,0);
__builtin_prefetch((double*)a[i+6]+k,0);
__builtin_prefetch((double*)a[i+7]+k,0);
__builtin_prefetch(c[i+4]+j,0);
__builtin_prefetch(c[i+5]+j,0);
__builtin_prefetch(c[i+6]+j,0);
__builtin_prefetch(c[i+7]+j,0);
}
for(k=0;k<kk;k+=2){
v2df * aa = (v2df*)(ap+k);
acopy [k]= (v2df) __builtin_ia32_shufpd(*aa,*aa,0x0);
acopy[k+1]=(v2df) __builtin_ia32_shufpd(*aa,*aa,0xff);
aa = (v2df*)(ap2+k);
acopy2 [k]= (v2df) __builtin_ia32_shufpd(*aa,*aa,0x0);
acopy2[k+1]=(v2df) __builtin_ia32_shufpd(*aa,*aa,0xff);
// acp[k*2]=acp[k*2+1]=ap[k];
// acp[k*2+2]=acp[k*2+3]=ap[k+1];
// acp2[k*2]=acp2[k*2+1]=ap2[k];
// acp2[k*2+2]=acp2[k*2+3]=ap2[k+1];
}
for(k=0;k<kk;k+=2){
acp3[k*2]=acp3[k*2+1]=ap3[k];
acp3[k*2+2]=acp3[k*2+3]=ap3[k+1];
acp4[k*2]=acp4[k*2+1]=ap4[k];
acp4[k*2+2]=acp4[k*2+3]=ap4[k+1];
}
for(j=0;j<nh;j++){
tmp = tmp2= tmp3= tmp4= (v2df){0.0,0.0};
v2df * bp = bcopy2[j];
#if 0
for(k=0;k<kk;k+=4){
tmp += acopy[k]*bp[k]
+acopy[k+1]*bp[k+1]
+acopy[k+2]*bp[k+2]
+acopy[k+3]*bp[k+3];
tmp2 += acopy2[k]*bp[k]
+acopy2[k+1]*bp[k+1]
+acopy2[k+2]*bp[k+2]
+acopy2[k+3]*bp[k+3];
tmp3 += acopy3[k]*bp[k]
+acopy3[k+1]*bp[k+1]
+acopy3[k+2]*bp[k+2]
+acopy3[k+3]*bp[k+3];
tmp4 += acopy4[k]*bp[k]
+acopy4[k+1]*bp[k+1]
+acopy4[k+2]*bp[k+2]
+acopy4[k+3]*bp[k+3];
}
#endif
for(k=0;k<kk;k+=2){
tmp += acopy[k]*bp[k]
+acopy[k+1]*bp[k+1];
tmp2 += acopy2[k]*bp[k]
+acopy2[k+1]*bp[k+1];
tmp3 += acopy3[k]*bp[k]
+acopy3[k+1]*bp[k+1];
tmp4 += acopy4[k]*bp[k]
+acopy4[k+1]*bp[k+1];
}
cp[j] -= tmp;
cp2[j] -= tmp2;
cp3[j] -= tmp3;
cp4[j] -= tmp4;
}
}
}
void matmul_for_small_nk7(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int kk,
int n)
{
int i,j;
int nh = n/2;
register int k;
double bcopy[n][kk];
v2df bcopy2[nh][kk];
v2df acopy[kk];
v2df acopy2[kk];
double *acp = (double*) acopy;
double *acp2 = (double*) acopy2;
unsigned long bpcount, apcount, dotcount;
bpcount= apcount= dotcount=0;
// BEGIN_TSC;
for(k=0;k<kk;k++)
for(j=0;j<nh;j++)
bcopy2[j][k] = *((v2df*)(b[k]+j+j));
// END_TSC(bpcount);
for(i=0;i<m;i+=2){
// BEGIN_TSC;
double *ap=a[i];
double *ap2=a[i+1];
register v2df tmp, tmp2;
v2df * cp = (v2df*) (&(c[i][0]));
v2df * cp2 = (v2df*) (&(c[i+1][0]));
for(k=0;k<kk;k+=2){
v2df * aa = (v2df*)(ap+k);
__builtin_prefetch((double*)a[i+4]+k,0);
__builtin_prefetch((double*)a[i+5]+k,0);
acopy [k]= (v2df) __builtin_ia32_shufpd(*aa,*aa,0x0);
acopy[k+1]=(v2df) __builtin_ia32_shufpd(*aa,*aa,0xff);
aa = (v2df*)(ap2+k);
acopy2 [k]= (v2df) __builtin_ia32_shufpd(*aa,*aa,0x0);
acopy2[k+1]=(v2df) __builtin_ia32_shufpd(*aa,*aa,0xff);
// acp[k*2]=acp[k*2+1]=ap[k];
// acp[k*2+2]=acp[k*2+3]=ap[k+1];
// acp2[k*2]=acp2[k*2+1]=ap2[k];
// acp2[k*2+2]=acp2[k*2+3]=ap2[k+1];
}
// END_TSC(apcount);
// BEGIN_TSC;
for(j=0;j<nh;j++){
tmp = tmp2= (v2df){0.0,0.0};
v2df ctmp= cp[j];
v2df ctmp2 = cp2[j] ;
v2df * bp = bcopy2[j];
__builtin_prefetch(c[i+4]+j,0);
__builtin_prefetch(c[i+5]+j,0);
for(k=0;k<kk;k+=8){
int k2 = k+4;
v2df *avp = acopy+k;
v2df *avp2 = acopy2+k;
v2df *bvp = bp+k;
tmp += avp[0]*bvp[0];
tmp2 += avp2[0]*bvp[0];
tmp +=avp[1]*bvp[1];
tmp2+=avp2[1]*bvp[1];
tmp +=avp[2]*bvp[2];
tmp2+=avp2[2]*bvp[2];
tmp +=avp[3]*bvp[3];
tmp2+=avp2[3]*bvp[3];
tmp += avp[4]*bvp[4];
tmp2 += avp2[4]*bvp[4];
tmp +=avp[5]*bvp[5];
tmp2+=avp2[5]*bvp[5];
tmp +=avp[6]*bvp[6];
tmp2+=avp2[6]*bvp[6];
tmp +=avp[7]*bvp[7];
tmp2+=avp2[7]*bvp[7];
}
#if 0
for(k=0;k<kk;k+=8){
int k2 = k+4;
tmp += acopy[k]*bp[k];
tmp2 += acopy2[k]*bp[k];
tmp +=acopy[k+1]*bp[k+1];
tmp2+=acopy2[k+1]*bp[k+1];
tmp +=acopy[k+2]*bp[k+2];
tmp2+=acopy2[k+2]*bp[k+2];
tmp +=acopy[k+3]*bp[k+3];
tmp2+=acopy2[k+3]*bp[k+3];
tmp += acopy[k2]*bp[k2];
tmp2 += acopy2[k2]*bp[k2];
tmp +=acopy[k2+1]*bp[k2+1];
tmp2+=acopy2[k2+1]*bp[k2+1];
tmp +=acopy[k2+2]*bp[k2+2];
tmp2+=acopy2[k2+2]*bp[k2+2];
tmp +=acopy[k2+3]*bp[k2+3];
tmp2+=acopy2[k2+3]*bp[k2+3];
}
#endif
#if 0
for(k=0;k<kk;k+=2){
tmp += acopy[k]*bp[k];
tmp __builtin_prefetch(c[i+4+(j&1)]+j,0);
+=acopy[k+1]*bp[k+1];
tmp2 += acopy2[k]*bp[k];
tmp2+=acopy2[k+1]*bp[k+1];
}
#endif
cp[j] = ctmp -tmp;
cp2[j] = ctmp2 -tmp2;
}
// END_TSC(dotcount);
}
// printf("m, kk, n = %d %d %d counts = %g %g %g\n", m,kk,n,
// (double)bpcount, (double)apcount, (double)dotcount);
}
// XMM registers
#define X0 "%xmm0"
#define X1 "%xmm1"
#define X2 "%xmm2"
#define X3 "%xmm3"
#define X4 "%xmm4"
#define X5 "%xmm5"
#define X6 "%xmm6"
#define X7 "%xmm7"
#define X8 "%xmm8"
#define X9 "%xmm9"
#define X10 "%xmm10"
#define X11 "%xmm11"
#define X12 "%xmm12"
#define X13 "%xmm13"
#define X14 "%xmm14"
#define X15 "%xmm15"
#define LOADPD(mem, reg) asm("movapd %0, %"reg::"m"(mem));
#define STORPD(reg, mem) asm("movapd %"reg " , %0"::"m"(mem));
#define MOVNTPD(reg, mem) asm("movntpd %"reg " , %0"::"m"(mem));
#define MOVAPD(src, dst) asm("movapd " src "," dst);
#define MOVQ(src, dst) asm("movq " src "," dst);
#define BCAST0(reg) asm("shufpd $0x00, " reg "," reg);
#define BCAST1(reg) asm("shufpd $0xff, " reg "," reg);
#define MULPD(src, dst) asm("mulpd " src "," dst);
#define ADDPD(src, dst) asm("addpd " src "," dst);
#define SUBPD(src, dst) asm("subpd " src "," dst);
void matmul_for_nk8_0(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int n)
{
int i,j;
int kk = 8;
int nh = n/2;
register int k;
double bcopy[n][kk];
v2df bcopy2[nh][kk];
#define PREFETCHL 32
for(i=0;i<PREFETCHL;i++){
__builtin_prefetch((double*)a[i],0,0);
__builtin_prefetch(c[i+8],1,0);
}
for(k=0;k<kk;k++)
for(j=0;j<nh;j++)
bcopy2[j][k] = *((v2df*)(b[k]+j+j));
for(i=0;i<m;i++){
// BEGIN_TSC;
// v2df acopy[8];
v2df *ap = (v2df*) a[i];
v2df * cp = (v2df*) (&(c[i][0]));
__builtin_prefetch((double*)a[i+PREFETCHL],0,0);
int k;
for(j=0;j<nh;j+=4){
__builtin_prefetch(c[i+PREFETCHL]+j,1,3);
v2df * bvp0 = bcopy2[j];
v2df * bvp1 = bcopy2[j+1];
v2df * bvp2 = bcopy2[j+2];
v2df * bvp3 = bcopy2[j+3];
LOADPD(cp[j],X12);
LOADPD(cp[j+1],X13);
LOADPD(cp[j+2],X14);
LOADPD(cp[j+3],X15);
LOADPD(ap[0],X0);
LOADPD(bvp0[0],X4);
LOADPD(bvp1[0],X5);
LOADPD(bvp2[0],X6);
LOADPD(bvp3[0],X7);
LOADPD(bvp0[1],X8);
LOADPD(bvp1[1],X9);
LOADPD(bvp2[1],X10);
LOADPD(bvp3[1],X11);
MOVAPD(X0,X1);
BCAST0(X0);
BCAST1(X1);
MULPD(X0,X4);
MULPD(X0,X5);
MULPD(X0,X6);
MULPD(X0,X7);
MULPD(X1,X8);
MULPD(X1,X9);
MULPD(X1,X10);
MULPD(X1,X11);
SUBPD(X4,X12);
SUBPD(X5,X13);
SUBPD(X6,X14);
SUBPD(X7,X15);
SUBPD(X8,X12);
SUBPD(X9,X13);
SUBPD(X10,X14);
SUBPD(X11,X15);
LOADPD(ap[1],X0);
LOADPD(bvp0[2],X4);
LOADPD(bvp1[2],X5);
LOADPD(bvp2[2],X6);
LOADPD(bvp3[2],X7);
LOADPD(bvp0[3],X8);
LOADPD(bvp1[3],X9);
LOADPD(bvp2[3],X10);
LOADPD(bvp3[3],X11);
MOVAPD(X0,X1);
BCAST0(X0);
BCAST1(X1);
MULPD(X0,X4);
MULPD(X0,X5);
MULPD(X0,X6);
MULPD(X0,X7);
MULPD(X1,X8);
MULPD(X1,X9);
MULPD(X1,X10);
MULPD(X1,X11);
SUBPD(X4,X12);
SUBPD(X5,X13);
SUBPD(X6,X14);
SUBPD(X7,X15);
SUBPD(X8,X12);
SUBPD(X9,X13);
SUBPD(X10,X14);
SUBPD(X11,X15);
LOADPD(ap[2],X0);
LOADPD(bvp0[4],X4);
LOADPD(bvp1[4],X5);
LOADPD(bvp2[4],X6);
LOADPD(bvp3[4],X7);
LOADPD(bvp0[5],X8);
LOADPD(bvp1[5],X9);
LOADPD(bvp2[5],X10);
LOADPD(bvp3[5],X11);
MOVAPD(X0,X1);
BCAST0(X0);
BCAST1(X1);
MULPD(X0,X4);
MULPD(X0,X5);
MULPD(X0,X6);
MULPD(X0,X7);
MULPD(X1,X8);
MULPD(X1,X9);
MULPD(X1,X10);
MULPD(X1,X11);
SUBPD(X4,X12);
SUBPD(X5,X13);
SUBPD(X6,X14);
SUBPD(X7,X15);
SUBPD(X8,X12);
SUBPD(X9,X13);
SUBPD(X10,X14);
SUBPD(X11,X15);
LOADPD(ap[3],X0);
LOADPD(bvp0[6],X4);
LOADPD(bvp1[6],X5);
LOADPD(bvp2[6],X6);
LOADPD(bvp3[6],X7);
LOADPD(bvp0[7],X8);
LOADPD(bvp1[7],X9);
LOADPD(bvp2[7],X10);
LOADPD(bvp3[7],X11);
MOVAPD(X0,X1);
BCAST0(X0);
BCAST1(X1);
MULPD(X0,X4);
MULPD(X0,X5);
MULPD(X0,X6);
MULPD(X0,X7);
MULPD(X1,X8);
MULPD(X1,X9);
MULPD(X1,X10);
MULPD(X1,X11);
SUBPD(X4,X12);
SUBPD(X5,X13);
SUBPD(X6,X14);
SUBPD(X7,X15);
SUBPD(X8,X12);
SUBPD(X9,X13);
SUBPD(X10,X14);
SUBPD(X11,X15);
STORPD(X12,cp[j+0]);
STORPD(X13,cp[j+1]);
STORPD(X14,cp[j+2]);
STORPD(X15,cp[j+3]);
}
}
}
void matmul_for_nk16_0a(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int n)
{
int i,j;
int kk = 16;
int nh = n/2;
register int k;
v2df bcopy2[nh][kk];
#ifdef PREFETCHL
#undef PREFETCHL
#endif
#define PREFETCHL 32
for(i=0;i<PREFETCHL;i++){
__builtin_prefetch((double*)a[i],0,0);
__builtin_prefetch((double*)a[i]+8,0,0);
__builtin_prefetch(c[i+8],1,0);
__builtin_prefetch(c[i+8]+8,1,0);
}
for(k=0;k<kk;k++)
for(j=0;j<nh;j++)
bcopy2[j][k] = *((v2df*)(b[k]+j+j));
for(i=0;i<m;i+=2){
// BEGIN_TSC;
v2df *ap = (v2df*) a[i];
v2df * cp = (v2df*) (&(c[i][0]));
v2df *app = (v2df*) a[i+1];
v2df * cpp = (v2df*) (&(c[i+1][0]));
__builtin_prefetch((double*)a[i+PREFETCHL],0,0);
__builtin_prefetch((double*)a[i+PREFETCHL]+8,0,0);
__builtin_prefetch((double*)a[i+PREFETCHL+1],0,0);
__builtin_prefetch((double*)a[i+PREFETCHL+1]+8,0,0);
int k;
for(j=0;j<nh;j+=2){
__builtin_prefetch(c[i+PREFETCHL]+j,1,0);
v2df * bvp0 = bcopy2[j];
v2df * bvp1 = bcopy2[j+1];
LOADPD(cp[j],X12);
LOADPD(cp[j+1],X13);
LOADPD(cpp[j],X14);
LOADPD(cpp[j+1],X15);
for(k=0;k<8;k++){
LOADPD(ap[k],X0);
LOADPD(app[k],X2);
MOVAPD(X0,X1);
BCAST0(X0);
BCAST1(X1);
MOVAPD(X2,X3);
BCAST0(X2);
BCAST1(X3);
LOADPD(bvp0[k*2],X4);
MOVAPD(X4,X6);
MULPD(X0,X4);
SUBPD(X4,X12);
LOADPD(bvp1[k*2],X5);
MOVAPD(X5,X7);
MULPD(X0,X5);
SUBPD(X5,X13);
LOADPD(bvp0[k*2+1],X8);
MOVAPD(X8,X10);
MULPD(X1,X8);
SUBPD(X8,X12);
LOADPD(bvp1[k*2+1],X9);
MOVAPD(X9,X11);
MULPD(X1,X9);
SUBPD(X9,X13);
MULPD(X2,X6);
SUBPD(X6,X14);
MULPD(X2,X7);
SUBPD(X7,X15);
MULPD(X3,X10);
SUBPD(X10,X14);
MULPD(X3,X11);
SUBPD(X11,X15);
}
STORPD(X12,cp[j+0]);
STORPD(X13,cp[j+1]);
STORPD(X14,cpp[j+0]);
STORPD(X15,cpp[j+1]);
}
}
}
void matmul_for_nk16_0c(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int n)
{
int i,j;
int kk = 16;
int nh = n/2;
register int k;
v2df bcopy2[nh][kk];
#ifdef PREFETCHL
#undef PREFETCHL
#endif
#define PREFETCHL 16
for(i=0;i<PREFETCHL;i++){
__builtin_prefetch((double*)a[i],0,0);
__builtin_prefetch((double*)a[i]+8,0,0);
__builtin_prefetch(c[i+8],1,0);
__builtin_prefetch(c[i+8]+8,1,0);
}
for(k=0;k<kk;k++)
for(j=0;j<nh;j++)
bcopy2[j][k] = *((v2df*)(b[k]+j+j));
for(i=0;i<m;i++){
// BEGIN_TSC;
v2df *ap = (v2df*) a[i];
v2df * cp = (v2df*) (&(c[i][0]));
__builtin_prefetch((double*)a[i+PREFETCHL],0,0);
__builtin_prefetch((double*)a[i+PREFETCHL]+8,0,0);
int k;
for(j=0;j<nh;j+=4){
__builtin_prefetch(c[i+PREFETCHL]+j,1,0);
v2df * bvp0 = bcopy2[j];
v2df * bvp1 = bcopy2[j+1];
v2df * bvp2 = bcopy2[j+2];
v2df * bvp3 = bcopy2[j+3];
LOADPD(cp[j],X12);
LOADPD(cp[j+1],X13);
LOADPD(cp[j+2],X14);
LOADPD(cp[j+3],X15);
for(k=0;k<8;k++){
LOADPD(ap[k],X0);
LOADPD(bvp0[k*2],X4);
LOADPD(bvp1[k*2],X5);
LOADPD(bvp2[k*2],X6);
LOADPD(bvp3[k*2],X7);
MOVAPD(X0,X1);
BCAST0(X0);
BCAST1(X1);
LOADPD(bvp0[k*2+1],X8);
LOADPD(bvp1[k*2+1],X9);
LOADPD(bvp2[k*2+1],X10);
LOADPD(bvp3[k*2+1],X11);
MULPD(X0,X4);
MULPD(X0,X5);
MULPD(X0,X6);
MULPD(X0,X7);
MULPD(X1,X8);
MULPD(X1,X9);
MULPD(X1,X10);
MULPD(X1,X11);
SUBPD(X4,X12);
SUBPD(X5,X13);
SUBPD(X6,X14);
SUBPD(X7,X15);
SUBPD(X8,X12);
SUBPD(X9,X13);
SUBPD(X10,X14);
SUBPD(X11,X15);
}
STORPD(X12,cp[j+0]);
STORPD(X13,cp[j+1]);
STORPD(X14,cp[j+2]);
STORPD(X15,cp[j+3]);
}
}
}
void matmul_for_nk32_0(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int n)
{
int i,j;
int kk = 32;
int nh = n/2;
register int k;
v2df bcopy2[nh][kk];
#ifdef PREFETCHL
#undef PREFETCHL
#endif
#define PREFETCHL 8
for(i=0;i<PREFETCHL;i++){
__builtin_prefetch((double*)a[i],0,0);
__builtin_prefetch((double*)a[i]+8,0,0);
__builtin_prefetch((double*)a[i]+16,0,0);
__builtin_prefetch((double*)a[i]+24,0,0);
__builtin_prefetch(c[i+8],1,0);
__builtin_prefetch(c[i+8]+8,1,0);
__builtin_prefetch(c[i+8]+16,1,0);
__builtin_prefetch(c[i+8]+24,1,0);
}
for(k=0;k<kk;k++)
for(j=0;j<nh;j++)
bcopy2[j][k] = *((v2df*)(b[k]+j+j));
for(i=0;i<m;i++){
// BEGIN_TSC;
v2df *ap = (v2df*) a[i];
v2df * cp = (v2df*) (&(c[i][0]));
__builtin_prefetch((double*)a[i+PREFETCHL],0,0);
__builtin_prefetch((double*)a[i+PREFETCHL]+8,0,0);
__builtin_prefetch((double*)a[i+PREFETCHL]+16,0,0);
__builtin_prefetch((double*)a[i+PREFETCHL]+24,0,0);
int k;
for(j=0;j<nh;j+=4){
__builtin_prefetch(c[i+PREFETCHL]+j,1,0);
v2df * bvp0 = bcopy2[j];
v2df * bvp1 = bcopy2[j+1];
v2df * bvp2 = bcopy2[j+2];
v2df * bvp3 = bcopy2[j+3];
LOADPD(cp[j],X12);
LOADPD(cp[j+1],X13);
LOADPD(cp[j+2],X14);
LOADPD(cp[j+3],X15);
for(k=0;k<16;k++){
LOADPD(ap[k],X0);
LOADPD(bvp0[k*2],X4);
LOADPD(bvp1[k*2],X5);
LOADPD(bvp2[k*2],X6);
LOADPD(bvp3[k*2],X7);
MOVAPD(X0,X1);
BCAST0(X0);
BCAST1(X1);
MULPD(X0,X4);
MULPD(X0,X5);
MULPD(X0,X6);
MULPD(X0,X7);
LOADPD(bvp0[k*2+1],X8);
LOADPD(bvp1[k*2+1],X9);
LOADPD(bvp2[k*2+1],X10);
LOADPD(bvp3[k*2+1],X11);
MULPD(X1,X8);
MULPD(X1,X9);
MULPD(X1,X10);
MULPD(X1,X11);
SUBPD(X4,X12);
SUBPD(X5,X13);
SUBPD(X6,X14);
SUBPD(X7,X15);
SUBPD(X8,X12);
SUBPD(X9,X13);
SUBPD(X10,X14);
SUBPD(X11,X15);
}
STORPD(X12,cp[j+0]);
STORPD(X13,cp[j+1]);
STORPD(X14,cp[j+2]);
STORPD(X15,cp[j+3]);
}
}
}
void matmul_for_nk16_0b(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int n)
{
int i,j;
int kk = 16;
int nh = n/2;
register int k;
v2df bcopy2[nh][kk];
#ifdef PREFETCHL
#undef PREFETCHL
#endif
#define PREFETCHL 16
for(i=0;i<PREFETCHL;i++){
__builtin_prefetch((double*)a[i],0,0);
__builtin_prefetch((double*)a[i]+8,0,0);
__builtin_prefetch(c[i+8],1,0);
__builtin_prefetch(c[i+8]+8,1,0);
}
for(k=0;k<kk;k++)
for(j=0;j<nh;j++)
bcopy2[j][k] = *((v2df*)(b[k]+j+j));
for(i=0;i<m;i++){
// BEGIN_TSC;
v2df *ap = (v2df*) a[i];
v2df * cp = (v2df*) (&(c[i][0]));
__builtin_prefetch((double*)a[i+PREFETCHL],0,0);
__builtin_prefetch((double*)a[i+PREFETCHL]+8,0,0);
int k;
for(j=0;j<nh;j+=4){
__builtin_prefetch(c[i+PREFETCHL]+j,1,0);
v2df * bvp0 = bcopy2[j];
v2df * bvp1 = bcopy2[j+1];
v2df * bvp2 = bcopy2[j+2];
v2df * bvp3 = bcopy2[j+3];
LOADPD(cp[j],X12);
LOADPD(cp[j+1],X13);
LOADPD(cp[j+2],X14);
LOADPD(cp[j+3],X15);
LOADPD(ap[0],X0);
LOADPD(bvp0[0],X4);
LOADPD(bvp1[0],X5);
LOADPD(bvp2[0],X6);
LOADPD(bvp3[0],X7);
LOADPD(bvp0[1],X8);
LOADPD(bvp1[1],X9);
LOADPD(bvp2[1],X10);
LOADPD(bvp3[1],X11);
MOVAPD(X0,X1);
BCAST0(X0);
BCAST1(X1);
MULPD(X0,X4);
MULPD(X0,X5);
MULPD(X0,X6);
MULPD(X0,X7);
MULPD(X1,X8);
MULPD(X1,X9);
MULPD(X1,X10);
MULPD(X1,X11);
SUBPD(X4,X12);
SUBPD(X5,X13);
SUBPD(X6,X14);
SUBPD(X7,X15);
SUBPD(X8,X12);
SUBPD(X9,X13);
SUBPD(X10,X14);
SUBPD(X11,X15);
LOADPD(ap[1],X0);
LOADPD(bvp0[2],X4);
LOADPD(bvp1[2],X5);
LOADPD(bvp2[2],X6);
LOADPD(bvp3[2],X7);
LOADPD(bvp0[3],X8);
LOADPD(bvp1[3],X9);
LOADPD(bvp2[3],X10);
LOADPD(bvp3[3],X11);
MOVAPD(X0,X1);
BCAST0(X0);
BCAST1(X1);
MULPD(X0,X4);
MULPD(X0,X5);
MULPD(X0,X6);
MULPD(X0,X7);
MULPD(X1,X8);
MULPD(X1,X9);
MULPD(X1,X10);
MULPD(X1,X11);
SUBPD(X4,X12);
SUBPD(X5,X13);
SUBPD(X6,X14);
SUBPD(X7,X15);
SUBPD(X8,X12);
SUBPD(X9,X13);
SUBPD(X10,X14);
SUBPD(X11,X15);
LOADPD(ap[2],X0);
LOADPD(bvp0[4],X4);
LOADPD(bvp1[4],X5);
LOADPD(bvp2[4],X6);
LOADPD(bvp3[4],X7);
LOADPD(bvp0[5],X8);
LOADPD(bvp1[5],X9);
LOADPD(bvp2[5],X10);
LOADPD(bvp3[5],X11);
MOVAPD(X0,X1);
BCAST0(X0);
BCAST1(X1);
MULPD(X0,X4);
MULPD(X0,X5);
MULPD(X0,X6);
MULPD(X0,X7);
MULPD(X1,X8);
MULPD(X1,X9);
MULPD(X1,X10);
MULPD(X1,X11);
SUBPD(X4,X12);
SUBPD(X5,X13);
SUBPD(X6,X14);
SUBPD(X7,X15);
SUBPD(X8,X12);
SUBPD(X9,X13);
SUBPD(X10,X14);
SUBPD(X11,X15);
LOADPD(ap[3],X0);
LOADPD(bvp0[6],X4);
LOADPD(bvp1[6],X5);
LOADPD(bvp2[6],X6);
LOADPD(bvp3[6],X7);
LOADPD(bvp0[7],X8);
LOADPD(bvp1[7],X9);
LOADPD(bvp2[7],X10);
LOADPD(bvp3[7],X11);
MOVAPD(X0,X1);
BCAST0(X0);
BCAST1(X1);
MULPD(X0,X4);
MULPD(X0,X5);
MULPD(X0,X6);
MULPD(X0,X7);
MULPD(X1,X8);
MULPD(X1,X9);
MULPD(X1,X10);
MULPD(X1,X11);
SUBPD(X4,X12);
SUBPD(X5,X13);
SUBPD(X6,X14);
SUBPD(X7,X15);
SUBPD(X8,X12);
SUBPD(X9,X13);
SUBPD(X10,X14);
SUBPD(X11,X15);
LOADPD(ap[4],X0);
LOADPD(bvp0[8],X4);
LOADPD(bvp1[8],X5);
LOADPD(bvp2[8],X6);
LOADPD(bvp3[8],X7);
LOADPD(bvp0[9],X8);
LOADPD(bvp1[9],X9);
LOADPD(bvp2[9],X10);
LOADPD(bvp3[9],X11);
MOVAPD(X0,X1);
BCAST0(X0);
BCAST1(X1);
MULPD(X0,X4);
MULPD(X0,X5);
MULPD(X0,X6);
MULPD(X0,X7);
MULPD(X1,X8);
MULPD(X1,X9);
MULPD(X1,X10);
MULPD(X1,X11);
SUBPD(X4,X12);
SUBPD(X5,X13);
SUBPD(X6,X14);
SUBPD(X7,X15);
SUBPD(X8,X12);
SUBPD(X9,X13);
SUBPD(X10,X14);
SUBPD(X11,X15);
LOADPD(ap[5],X0);
LOADPD(bvp0[10],X4);
LOADPD(bvp1[10],X5);
LOADPD(bvp2[10],X6);
LOADPD(bvp3[10],X7);
LOADPD(bvp0[11],X8);
LOADPD(bvp1[11],X9);
LOADPD(bvp2[11],X10);
LOADPD(bvp3[11],X11);
MOVAPD(X0,X1);
BCAST0(X0);
BCAST1(X1);
MULPD(X0,X4);
MULPD(X0,X5);
MULPD(X0,X6);
MULPD(X0,X7);
MULPD(X1,X8);
MULPD(X1,X9);
MULPD(X1,X10);
MULPD(X1,X11);
SUBPD(X4,X12);
SUBPD(X5,X13);
SUBPD(X6,X14);
SUBPD(X7,X15);
SUBPD(X8,X12);
SUBPD(X9,X13);
SUBPD(X10,X14);
SUBPD(X11,X15);
LOADPD(ap[6],X0);
LOADPD(bvp0[12],X4);
LOADPD(bvp1[12],X5);
LOADPD(bvp2[12],X6);
LOADPD(bvp3[12],X7);
LOADPD(bvp0[13],X8);
LOADPD(bvp1[13],X9);
LOADPD(bvp2[13],X10);
LOADPD(bvp3[13],X11);
MOVAPD(X0,X1);
BCAST0(X0);
BCAST1(X1);
MULPD(X0,X4);
MULPD(X0,X5);
MULPD(X0,X6);
MULPD(X0,X7);
MULPD(X1,X8);
MULPD(X1,X9);
MULPD(X1,X10);
MULPD(X1,X11);
SUBPD(X4,X12);
SUBPD(X5,X13);
SUBPD(X6,X14);
SUBPD(X7,X15);
SUBPD(X8,X12);
SUBPD(X9,X13);
SUBPD(X10,X14);
SUBPD(X11,X15);
LOADPD(ap[7],X0);
LOADPD(bvp0[14],X4);
LOADPD(bvp1[14],X5);
LOADPD(bvp2[14],X6);
LOADPD(bvp3[14],X7);
LOADPD(bvp0[15],X8);
LOADPD(bvp1[15],X9);
LOADPD(bvp2[15],X10);
LOADPD(bvp3[15],X11);
MOVAPD(X0,X1);
BCAST0(X0);
BCAST1(X1);
MULPD(X0,X4);
MULPD(X0,X5);
MULPD(X0,X6);
MULPD(X0,X7);
MULPD(X1,X8);
MULPD(X1,X9);
MULPD(X1,X10);
MULPD(X1,X11);
SUBPD(X4,X12);
SUBPD(X5,X13);
SUBPD(X6,X14);
SUBPD(X7,X15);
SUBPD(X8,X12);
SUBPD(X9,X13);
SUBPD(X10,X14);
SUBPD(X11,X15);
STORPD(X12,cp[j+0]);
STORPD(X13,cp[j+1]);
STORPD(X14,cp[j+2]);
STORPD(X15,cp[j+3]);
}
}
}
void matmul_for_nk8_0d(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int n)
{
int i,j;
int kk = 8;
int nh = n/2;
register int k;
double bcopy[n][kk];
v2df bcopy2[nh][kk];
v2df acopy[kk];
v2df acopy2[kk];
v2df awork[4];
v2df awork2[4];
double *acp = (double*) acopy;
double *acp2 = (double*) acopy2;
unsigned long bpcount, apcount, dotcount;
bpcount= apcount= dotcount=0;
for(k=0;k<kk;k++)
for(j=0;j<nh;j++)
bcopy2[j][k] = *((v2df*)(b[k]+j+j));
for(i=0;i<m;i++){
// BEGIN_TSC;
double *ap=a[i];
register v2df tmp, tmp2;
v2df * cp = (v2df*) (&(c[i][0]));
v2df * aa = (v2df*)(ap);
__builtin_prefetch((double*)a[i+8],0,0);
v2df acopy0=(v2df){a[i][0], a[i][0]};
v2df acopy1=(v2df){a[i][1], a[i][1]};
v2df acopy2=(v2df){a[i][2], a[i][2]};
v2df acopy3=(v2df){a[i][3], a[i][3]};
v2df acopy4=(v2df){a[i][4], a[i][4]};
v2df acopy5=(v2df){a[i][5], a[i][5]};
v2df acopy6=(v2df){a[i][6], a[i][6]};
v2df acopy7=(v2df){a[i][7], a[i][7]};
v2df zero=(v2df){0.0, 0.0};
LOADPD(acopy0,X0);
LOADPD(acopy1,X1);
LOADPD(acopy2,X2);
LOADPD(acopy3,X3);
LOADPD(acopy4,X4);
LOADPD(acopy5,X5);
LOADPD(acopy6,X6);
LOADPD(acopy7,X7);
for(j=0;j<nh;j++){
__builtin_prefetch(c[i+8]+j,1,0);
v2df * bvp = bcopy2[j];
LOADPD(cp[j],X14);
LOADPD(bvp[0],X8);
LOADPD(bvp[1],X9);
MULPD(X0,X8);
MULPD(X1,X9);
LOADPD(bvp[2],X10);
LOADPD(bvp[3],X11);
ADDPD(X9,X8);
MULPD(X2,X10);
MULPD(X3,X11);
ADDPD(X11,X10);
LOADPD(bvp[4],X9);
LOADPD(bvp[5],X11);
LOADPD(bvp[6],X12);
LOADPD(bvp[7],X13);
MULPD(X4,X9);
MULPD(X5,X11);
ADDPD(X10,X8);
ADDPD(X11,X9);
MULPD(X6,X12);
MULPD(X7,X13);
ADDPD(X13,X12);
ADDPD(X9,X8);
ADDPD(X12,X8);
SUBPD(X8,X14);
STORPD(X14,cp[j]);
}
}
}
void matmul_for_nk8_0c(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int n)
{
int i,j;
int kk = 8;
int nh = n/2;
register int k;
double bcopy[n][kk];
v2df bcopy2[nh][kk];
v2df acopy[kk];
v2df acopy2[kk];
v2df awork[4];
v2df awork2[4];
double *acp = (double*) acopy;
double *acp2 = (double*) acopy2;
unsigned long bpcount, apcount, dotcount;
bpcount= apcount= dotcount=0;
for(k=0;k<kk;k++)
for(j=0;j<nh;j++)
bcopy2[j][k] = *((v2df*)(b[k]+j+j));
for(i=0;i<m;i++){
// BEGIN_TSC;
double *ap=a[i];
register v2df tmp, tmp2;
v2df * cp = (v2df*) (&(c[i][0]));
v2df * aa = (v2df*)(ap);
__builtin_prefetch((double*)a[i+8],0,0);
register v2df acopy0=(v2df){a[i][0], a[i][0]};
register v2df acopy1=(v2df){a[i][1], a[i][1]};
register v2df acopy2=(v2df){a[i][2], a[i][2]};
register v2df acopy3=(v2df){a[i][3], a[i][3]};
register v2df acopy4=(v2df){a[i][4], a[i][4]};
register v2df acopy5=(v2df){a[i][5], a[i][5]};
register v2df acopy6=(v2df){a[i][6], a[i][6]};
register v2df acopy7=(v2df){a[i][7], a[i][7]};
for(j=0;j<nh;j++){
tmp = (v2df){0.0,0.0};
v2df ctmp= cp[j];
v2df * bp = bcopy2[j];
__builtin_prefetch(c[i+4]+j,1,0);
v2df *bvp = bp;
tmp += acopy0*bvp[0];
tmp +=acopy1*bvp[1];
tmp +=acopy2*bvp[2];
tmp +=acopy3*bvp[3];
tmp +=acopy4*bvp[4];
tmp +=acopy5*bvp[5];
tmp +=acopy6*bvp[6];
tmp +=acopy7*bvp[7];
cp[j] = ctmp -tmp;
}
}
// printf("m, kk, n = %d %d %d counts = %g %g %g\n", m,kk,n,
// (double)bpcount, (double)apcount, (double)dotcount);
}
void matmul_for_nk8_0b(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int n)
{
int i,j;
int kk = 8;
int nh = n/2;
register int k;
double bcopy[n][kk];
v2df bcopy2[nh][kk];
v2df acopy[kk];
v2df acopy2[kk];
v2df awork[4];
v2df awork2[4];
double *acp = (double*) acopy;
double *acp2 = (double*) acopy2;
unsigned long bpcount, apcount, dotcount;
bpcount= apcount= dotcount=0;
for(k=0;k<kk;k++)
for(j=0;j<nh;j++)
bcopy2[j][k] = *((v2df*)(b[k]+j+j));
for(i=0;i<m;i++){
// BEGIN_TSC;
double *ap=a[i];
register v2df tmp, tmp2;
v2df * cp = (v2df*) (&(c[i][0]));
v2df * aa = (v2df*)(ap);
__builtin_prefetch((double*)a[i+8],0,0);
acopy[0]=(v2df){a[i][0], a[i][0]};
acopy[1]=(v2df){a[i][1], a[i][1]};
acopy[2]=(v2df){a[i][2], a[i][2]};
acopy[3]=(v2df){a[i][3], a[i][3]};
acopy[4]=(v2df){a[i][4], a[i][4]};
acopy[5]=(v2df){a[i][5], a[i][5]};
acopy[6]=(v2df){a[i][6], a[i][6]};
acopy[7]=(v2df){a[i][7], a[i][7]};
for(j=0;j<nh;j++){
tmp = tmp2= (v2df){0.0,0.0};
v2df ctmp= cp[j];
v2df * bp = bcopy2[j];
__builtin_prefetch(c[i+4]+j,1,0);
v2df *avp = acopy;
v2df *bvp = bp;
tmp += avp[0]*bvp[0];
tmp +=avp[1]*bvp[1];
tmp +=avp[2]*bvp[2];
tmp +=avp[3]*bvp[3];
tmp += avp[4]*bvp[4];
tmp +=avp[5]*bvp[5];
tmp +=avp[6]*bvp[6];
tmp +=avp[7]*bvp[7];
cp[j] = ctmp -tmp;
}
}
// printf("m, kk, n = %d %d %d counts = %g %g %g\n", m,kk,n,
// (double)bpcount, (double)apcount, (double)dotcount);
}
void matmul_for_nk8_0a(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int n)
{
int i,j;
int kk = 8;
int nh = n/2;
register int k;
double bcopy[n][kk];
v2df bcopy2[nh][kk];
v2df acopy[kk];
v2df acopy2[kk];
v2df awork[4];
v2df awork2[4];
double *acp = (double*) acopy;
double *acp2 = (double*) acopy2;
unsigned long bpcount, apcount, dotcount;
bpcount= apcount= dotcount=0;
for(k=0;k<kk;k++)
for(j=0;j<nh;j++)
bcopy2[j][k] = *((v2df*)(b[k]+j+j));
for(i=0;i<m;i+=2){
// BEGIN_TSC;
double *ap=a[i];
double *ap2=a[i+1];
register v2df tmp, tmp2;
v2df * cp = (v2df*) (&(c[i][0]));
v2df * cp2 = (v2df*) (&(c[i+1][0]));
v2df * aa = (v2df*)(ap);
__builtin_prefetch((double*)a[i+8],0,0);
__builtin_prefetch((double*)a[i+9],0,0);
acopy[0]=(v2df){a[i][0], a[i][0]};
acopy[1]=(v2df){a[i][1], a[i][1]};
acopy[2]=(v2df){a[i][2], a[i][2]};
acopy[3]=(v2df){a[i][3], a[i][3]};
acopy[4]=(v2df){a[i][4], a[i][4]};
acopy[5]=(v2df){a[i][5], a[i][5]};
acopy[6]=(v2df){a[i][6], a[i][6]};
acopy[7]=(v2df){a[i][7], a[i][7]};
aa = (v2df*)(ap2);
acopy2[0]= (v2df) __builtin_ia32_shufpd(*aa,*aa,0x0);
acopy2[1]=(v2df) __builtin_ia32_shufpd(*aa,*aa,0xff);
aa++;
acopy2[2]= (v2df) __builtin_ia32_shufpd(*aa,*aa,0x0);
acopy2[3]=(v2df) __builtin_ia32_shufpd(*aa,*aa,0xff);
aa++;
acopy2[4]= (v2df) __builtin_ia32_shufpd(*aa,*aa,0x0);
acopy2[5]=(v2df) __builtin_ia32_shufpd(*aa,*aa,0xff);
aa++;
acopy2[6]= (v2df) __builtin_ia32_shufpd(*aa,*aa,0x0);
acopy2[7]=(v2df) __builtin_ia32_shufpd(*aa,*aa,0xff);
aa++;
for(j=0;j<nh;j++){
tmp = tmp2= (v2df){0.0,0.0};
v2df ctmp= cp[j];
v2df ctmp2 = cp2[j] ;
v2df * bp = bcopy2[j];
__builtin_prefetch(c[i+4]+j,1,0);
__builtin_prefetch(c[i+5]+j,1,0);
v2df *avp = acopy;
v2df *avp2 = acopy2;
v2df *bvp = bp;
tmp += avp[0]*bvp[0];
tmp2 += avp2[0]*bvp[0];
tmp +=avp[1]*bvp[1];
tmp2+=avp2[1]*bvp[1];
tmp +=avp[2]*bvp[2];
tmp2+=avp2[2]*bvp[2];
tmp +=avp[3]*bvp[3];
tmp2+=avp2[3]*bvp[3];
tmp += avp[4]*bvp[4];
tmp2 += avp2[4]*bvp[4];
tmp +=avp[5]*bvp[5];
tmp2+=avp2[5]*bvp[5];
tmp +=avp[6]*bvp[6];
tmp2+=avp2[6]*bvp[6];
tmp +=avp[7]*bvp[7];
tmp2+=avp2[7]*bvp[7];
cp[j] = ctmp -tmp;
cp2[j] = ctmp2 -tmp2;
}
}
// printf("m, kk, n = %d %d %d counts = %g %g %g\n", m,kk,n,
// (double)bpcount, (double)apcount, (double)dotcount);
}
void matmul_for_nk8_1(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int n)
{
int i,j,k;
const int kk = 8;
const int kh = kk/2;
int nh = n/2;
v2df bcopy[n][kh];
v2df acopy[kk][kh];
unsigned long bpcount, apcount, dotcount;
bpcount= apcount= dotcount=0;
for(j=0;j<n;j++)
for(k=0;k<kh;k++)
bcopy[j][k] = (v2df){b[k*2][j],b[k*2+1][j]};
// printf("copy b end\n");
for(i=0;i<m;i+=kk){
for(k=0;k<kk;k++){
v2df *ak = (v2df*)(a[i+k]);
v2df * awp =acopy+k;
awp[0]=ak[0];
awp[1]=ak[1];
awp[2]=ak[2];
awp[3]=ak[3];
}
// printf("copy a end\n");
for(k=0;k<kk;k++){
v2u tmp, tmp1;
v2df * ap = acopy[k];
for(j=0;j<n;j+=2){
tmp.v = ap[0]*bcopy[j][0]
+ ap[1]*bcopy[j][1]
+ ap[2]*bcopy[j][2]
+ ap[3]*bcopy[j][3];
tmp1.v = ap[0]*bcopy[j+1][0]
+ ap[1]*bcopy[j+1][1]
+ ap[2]*bcopy[j+1][2]
+ ap[3]*bcopy[j+1][3];
c[k+i][j] -= tmp.s[0]+tmp.s[1];
c[k+i][j+1] -= tmp1.s[0]+tmp1.s[1];
}
}
// printf("calc c end\n");
}
// printf("m, kk, n = %d %d %d counts = %g %g %g\n", m,kk,n,
// (double)bpcount, (double)apcount, (double)dotcount);
}
void matmul_for_nk8_2(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int n)
{
int i,j,k;
const int kk = 8;
const int kh = kk/2;
int nh = n/2;
v2df bcopy[nh][kk];
v2df acopy[kk][kh];
v2df ccopy[kk][kh];
v2df acopy2[kk][kk];
unsigned long bpcount, apcount, dotcount;
bpcount= apcount= dotcount=0;
for(k=0;k<kk;k++)
for(j=0;j<nh;j++)
bcopy[j][k] = *((v2df*)(b[k]+j+j));
// printf("copy b end\n");
for(i=0;i<m;i+=kk){
for(k=0;k<kk;k++){
__builtin_prefetch(a+i+k+8,0,0);
__builtin_prefetch(c+i+k+8,1,0);
v2df *ak = (v2df*)(a[i+k]);
v2df * awp = (v2df*)(acopy+k);
v2df *ck = (v2df*)(c[i+k]);
v2df * cwp = (v2df*)(ccopy+k);
awp[0]=ak[0];
awp[1]=ak[1];
awp[2]=ak[2];
awp[3]=ak[3];
cwp[0]=ck[0];
cwp[1]=ck[1];
cwp[2]=ck[2];
cwp[3]=ck[3];
}
for (j=0;j<n;j++){
double * ap = (double*)( acopy+j);
for (k=0;k<kk;k++){
acopy2[j][k]=(v2df){ap[k],ap[k]};
}
}
// printf("copy a end\n");
for(k=0;k<kk;k++){
v2df * cp = (v2df*) ccopy[k];
v2df * ap = acopy2[k];
for(j=0;j<nh;j++){
v2df * bp = bcopy[j];
cp[j] -= ap[0]*bp[0]
+ ap[1]*bp[1]
+ ap[2]*bp[2]
+ ap[3]*bp[3]
+ ap[4]*bp[4]
+ ap[5]*bp[5]
+ ap[6]*bp[6]
+ ap[7]*bp[7];
}
}
for(k=0;k<kk;k++){
v2df *ck = (v2df*)(c[i+k]);
v2df * cwp = (v2df*)(ccopy+k);
#if 0
ck[0] = cwp[0];
ck[1] = cwp[1];
ck[2] = cwp[2];
ck[3] = cwp[3];
#endif
__builtin_ia32_movntpd((double*)(ck),cwp[0]);
__builtin_ia32_movntpd((double*)(ck+1),cwp[1]);
__builtin_ia32_movntpd((double*)(ck+2),cwp[2]);
__builtin_ia32_movntpd((double*)(ck+3),cwp[3]);
}
// printf("calc c end\n");
}
// printf("m, kk, n = %d %d %d counts = %g %g %g\n", m,kk,n,
// (double)bpcount, (double)apcount, (double)dotcount);
}
void matmul_for_nk8(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int n)
{
int i,j,k;
const int kk = 8;
const int kh = kk/2;
int nh = n/2;
v2df bcopy[nh][kk];
v2df acopy2[kk][kk];
// unsigned long bpcount, apcount, dotcount;
// bpcount= apcount= dotcount=0;
BEGIN_TSC;
for(k=0;k<kk;k++)
for(j=0;j<nh;j++)
bcopy[j][k] = *((v2df*)(b[k]+j+j));
// END_TSC(bpcount);
// printf("copy b end\n");
#pragma omp parallel for private(i,j,k,acopy2) schedule(static)
for(i=0;i<m;i+=kk){
// BEGIN_TSC;
for(k=0;k<kk;k++){
__builtin_prefetch(a+i+k+16,0,0);
__builtin_prefetch(c+i+k+16,1,0);
}
for (j=0;j<n;j++){
double * ap = (double*)( a[i+j]);
for (k=0;k<kk;k++){
acopy2[j][k]=(v2df){ap[k],ap[k]};
}
}
// END_TSC(apcount);
// printf("copy a end\n");
// BEGIN_TSC;
for(k=0;k<kk;k++){
v2df * cp = (v2df*) (c[i+k]);
v2df * ap = acopy2[k];
for(j=0;j<nh;j++){
v2df * bp = bcopy[j];
cp[j] -= ap[0]*bp[0] + ap[1]*bp[1]
+ ap[2]*bp[2] + ap[3]*bp[3]
+ ap[4]*bp[4] + ap[5]*bp[5]
+ ap[6]*bp[6] + ap[7]*bp[7];
}
}
// printf("calc c end\n");
// END_TSC(dotcount);
}
// printf("m, kk, n = %d %d %d counts = %g %g %g\n", m,kk,n,
// (double)bpcount, (double)apcount, (double)dotcount);
END_TSC(t,10);
}
void matmul_for_nk8_3(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int n)
{
int ii;
int dm = (m+31)/32;
dm*= 8;
#pragma omp parallel for private(ii) schedule(static)
for(ii=0;ii<4;ii++){
int ifirst, iend;
ifirst = ii*dm;
iend = ifirst+dm;
if (iend > m) iend = m;
// fprintf(stderr, "m, i, ifirst, iend = %d %d %d %d\n", m, ii, ifirst, iend);
if (ifirst < m){
matmul_for_nk8_0(n1, a[ifirst], n2, b, n3, c[ifirst], iend-ifirst, n);
}
}
}
void matmul_for_nk16_0(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int n)
{
int i;
int mm=64;
for(i=0;i<m;i+=mm){
if (i+mm >m) mm=m-i;
matmul_for_nk8_0(n1, (double(*)[]) (a[i]), n2, b,
n3, (double(*)[]) (c[i]), mm, 16);
matmul_for_nk8_0(n1, (double(*)[]) (&a[i][8]), n2,(double(*)[])(b[8]),
n3, (double(*)[]) (c[i]), mm, 16);
}
}
void matmul_for_nk16(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int n)
{
if (m < 64){
matmul_for_nk16_0c(n1, a, n2, b, n3, c, m, n);
return;
}
int ii;
int dm = (m+63)/64;
dm*= 16;
#pragma omp parallel for private(ii) schedule(static)
for(ii=0;ii<4;ii++){
int ifirst, iend;
ifirst = ii*dm;
iend = ifirst+dm;
if (iend > m) iend = m;
// fprintf(stderr, "m, i, ifirst, iend = %d %d %d %d\n", m, ii, ifirst, iend);
if (ifirst < m){
matmul_for_nk16_0c(n1, a[ifirst], n2, b, n3, c[ifirst], iend-ifirst, n);
}
}
}
void matmul_for_nk32(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int n)
{
int ii;
int dm = (m+127)/128;
dm*= 32;
#pragma omp parallel for private(ii) schedule(static)
for(ii=0;ii<4;ii++){
int ifirst, iend;
ifirst = ii*dm;
iend = ifirst+dm;
if (iend > m) iend = m;
// fprintf(stderr, "m, i, ifirst, iend = %d %d %d %d\n", m, ii, ifirst, iend);
if (ifirst < m){
matmul_for_nk32_0(n1, a[ifirst], n2, b, n3, c[ifirst], iend-ifirst, n);
}
}
}
void matmul_for_small_nk_7(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int kk,
int n)
{
int i,j;
int nh = n/2;
register int k;
double bcopy[n][kk];
v2df bcopy2[nh][kk];
v2df acopy[kk];
v2df acopy2[kk];
double *acp = (double*) acopy;
double *acp2 = (double*) acopy2;
unsigned long bpcount, apcount, dotcount;
if (kk == 8){
matmul_for_nk8(n1, a, n2, b, n3, c, m, n);
return;
}
BEGIN_TSC;
bpcount= apcount= dotcount=0;
// BEGIN_TSC;
for(k=0;k<kk;k++)
for(j=0;j<nh;j++)
bcopy2[j][k] = *((v2df*)(b[k]+j+j));
// END_TSC(bpcount);
for(i=0;i<m;i+=2){
// BEGIN_TSC;
double *ap=a[i];
double *ap2=a[i+1];
register v2df tmp, tmp2;
v2df * cp = (v2df*) (&(c[i][0]));
v2df * cp2 = (v2df*) (&(c[i+1][0]));
for(k=0;k<kk;k+=2){
v2df * aa = (v2df*)(ap+k);
__builtin_prefetch((double*)a[i+4]+k,0);
__builtin_prefetch((double*)a[i+5]+k,0);
acopy [k]= (v2df) __builtin_ia32_shufpd(*aa,*aa,0x0);
acopy[k+1]=(v2df) __builtin_ia32_shufpd(*aa,*aa,0xff);
aa = (v2df*)(ap2+k);
acopy2 [k]= (v2df) __builtin_ia32_shufpd(*aa,*aa,0x0);
acopy2[k+1]=(v2df) __builtin_ia32_shufpd(*aa,*aa,0xff);
// acp[k*2]=acp[k*2+1]=ap[k];
// acp[k*2+2]=acp[k*2+3]=ap[k+1];
// acp2[k*2]=acp2[k*2+1]=ap2[k];
// acp2[k*2+2]=acp2[k*2+3]=ap2[k+1];
}
// END_TSC(apcount);
// BEGIN_TSC;
for(j=0;j<nh;j++){
tmp = tmp2= (v2df){0.0,0.0};
v2df ctmp= cp[j];
v2df ctmp2 = cp2[j] ;
v2df * bp = bcopy2[j];
__builtin_prefetch(c[i+4]+j,0);
__builtin_prefetch(c[i+5]+j,0);
for(k=0;k<kk;k+=8){
int k2 = k+4;
v2df *avp = acopy+k;
v2df *avp2 = acopy2+k;
v2df *bvp = bp+k;
tmp += avp[0]*bvp[0];
tmp2 += avp2[0]*bvp[0];
tmp +=avp[1]*bvp[1];
tmp2+=avp2[1]*bvp[1];
tmp +=avp[2]*bvp[2];
tmp2+=avp2[2]*bvp[2];
tmp +=avp[3]*bvp[3];
tmp2+=avp2[3]*bvp[3];
tmp += avp[4]*bvp[4];
tmp2 += avp2[4]*bvp[4];
tmp +=avp[5]*bvp[5];
tmp2+=avp2[5]*bvp[5];
tmp +=avp[6]*bvp[6];
tmp2+=avp2[6]*bvp[6];
tmp +=avp[7]*bvp[7];
tmp2+=avp2[7]*bvp[7];
}
cp[j] = ctmp -tmp;
cp2[j] = ctmp2 -tmp2;
}
// END_TSC(dotcount);
}
// printf("m, kk, n = %d %d %d counts = %g %g %g\n", m,kk,n,
// (double)bpcount, (double)apcount, (double)dotcount);
END_TSC(t,11);
}
void matmul_for_small_nk(int n1, double a[][n1],
int n2, double b[][n2],
int n3, double c[][n3],
int m,
int kk,
int n)
{
int i,ii;
int nh = n/2;
register int k;
double bcopy[n][kk];
v2df bcopy2[nh][kk];
v2df acopy[kk];
v2df acopy2[kk];
double *acp = (double*) acopy;
double *acp2 = (double*) acopy2;
if (kk == 8){
matmul_for_nk8_3(n1, a, n2, b, n3, c, m, n);
return;
}
if (kk == 16){
matmul_for_nk16(n1, a, n2, b, n3, c, m, n);
return;
}
if (kk == 32){
matmul_for_nk32(n1, a, n2, b, n3, c, m, n);
return;
}
BEGIN_TSC;
for(k=0;k<kk;k++){
int j;
for(j=0;j<nh;j++)
bcopy2[j][k] = *((v2df*)(b[k]+j+j));
}
#pragma omp parallel for private(i,k,acopy,acopy2) schedule(static)
for(i=0;i<m;i+=2){
int j;
double *ap=a[i];
double *ap2=a[i+1];
register v2df tmp, tmp2;
v2df * cp = (v2df*) (&(c[i][0]));
v2df * cp2 = (v2df*) (&(c[i+1][0]));
for(k=0;k<kk;k+=2){
v2df * aa = (v2df*)(ap+k);
acopy [k]= (v2df) __builtin_ia32_shufpd(*aa,*aa,0x0);
acopy[k+1]=(v2df) __builtin_ia32_shufpd(*aa,*aa,0xff);
aa = (v2df*)(ap2+k);
acopy2 [k]= (v2df) __builtin_ia32_shufpd(*aa,*aa,0x0);
acopy2[k+1]=(v2df) __builtin_ia32_shufpd(*aa,*aa,0xff);
}
__builtin_prefetch(a[i+4],0,3);
__builtin_prefetch(c[i+4],1);
__builtin_prefetch(a[i+5],0,3);
__builtin_prefetch(c[i+5],1);
__builtin_prefetch(a[i+20],0,3);
__builtin_prefetch(c[i+20],1);
__builtin_prefetch(a[i+21],0,3);
__builtin_prefetch(c[i+21],1);
for(j=0;j<nh;j++){
tmp = tmp2= (v2df){0.0,0.0};
v2df ctmp= cp[j];
v2df ctmp2 = cp2[j] ;
v2df * bp = bcopy2[j];
for(k=0;k<kk;k+=8){
int k2 = k+4;
v2df *avp = acopy+k;
v2df *avp2 = acopy2+k;
v2df *bvp = bp+k;
tmp += avp[0]*bvp[0];
tmp2 += avp2[0]*bvp[0];
tmp +=avp[1]*bvp[1];
tmp2+=avp2[1]*bvp[1];
tmp +=avp[2]*bvp[2];
tmp2+=avp2[2]*bvp[2];
tmp +=avp[3]*bvp[3];
tmp2+=avp2[3]*bvp[3];
tmp += avp[4]*bvp[4];
tmp2 += avp2[4]*bvp[4];
tmp +=avp[5]*bvp[5];
tmp2+=avp2[5]*bvp[5];
tmp +=avp[6]*bvp[6];
tmp2+=avp2[6]*bvp[6];
tmp +=avp[7]*bvp[7];
tmp2+=avp2[7]*bvp[7];
}
cp[j] = ctmp -tmp;
cp2[j] = ctmp2 -tmp2;
}
}
END_TSC(t,11);
}
void mydgemm(int m,
int n,
int k,
double alpha,
double * a,
int na,
double * b,
int nb,
double beta,
double * c,
int nc)
{
double t0, t1, t2;
// printf("mydgemm called %d %d %d\n", m, n, k);
if (k>= 512){
get_cputime(&t0,&t1);
}
BEGIN_TSC;
BEGIN_TIMER(timer);
#ifdef USEGDR
if ((k>512) || ((k==512) && ((n>=1024)||(m>=1024)))){
// if (k>=2048){
mygdrdgemm(m, n, k, alpha, a, na, b, nb, beta, c, nc);
}else{
if ((k<=16) && (alpha == -1.0) && (beta == 1.0)){
matmul_for_small_nk(na, a, nb, b, nc, c, m, n, k);
}else{
cblas_dgemm( CblasRowMajor, CblasNoTrans, CblasNoTrans,
m,n, k, alpha, a, na, b, nb, beta, c, nc);
}
}
#else
#ifdef GEMMTEST
mytestdgemm(m, n, k, na, nb, nc,alpha, a, b, beta, c);
#else
cblas_dgemm( CblasRowMajor, CblasNoTrans, CblasNoTrans,
m,n, k, alpha, a, na, b, nb, beta, c, nc);
#endif
#endif
// cblas_dgemm( CblasRowMajor, CblasNoTrans, CblasNoTrans,
// m,n, k, alpha, a, na, b, nb, beta, c, nc);
if (k==2048){
END_TIMER(timer,31,((double)(m))*n*k*2);
END_TSC(t,14);
}else if (k==1024){
END_TIMER(timer,32,((double)(m))*n*k*2);
END_TSC(t,15);
}else if (k==512){
END_TIMER(timer,33,((double)(m))*n*k*2);
END_TSC(t,17);
}else{
END_TIMER(timer,34,((double)(m))*n*k*2);
END_TSC(t,18);
}
if (k>= 512){
get_cputime(&t0,&t1);
dprintf(10,"dgemm M=%d N=%d K=%d time=%10.4g %g Gflops\n",
m,n,k,t0, ((double)m)*n*k*2/t0/1e9);
}
}
void reset_gdr(int m, double a[][m], int nb, double awork[][nb], int n)
{
#ifdef USEGDR
double aw2[nb][nb];
if (nb < 2048){
fprintf(stderr,"reset_gdr nb = %d <2048 not supported\n", nb);
exit(-1);
}
gdr_check_and_restart(a, awork, aw2);
int i,j;
dprintf(9,"reset_gdr clear awork\n");
for (i=0;i<nb;i++){
for (j=0;j<n;j++){
awork[j][i]=0;
}
}
dprintf(9,"reset_gdr clear aw2\n");
for (i=0;i<nb;i++){
for (j=0;j<nb;j++){
aw2[j][i]=0;
}
}
dprintf(9,"reset_gdr try_dgemm\n");
gdrsetforceswapab();
cblas_dgemm( CblasRowMajor, CblasNoTrans, CblasNoTrans,
n,nb, nb, 1.0, awork, nb, aw2, nb, 0.0, a, m);
mydgemm(n,nb,nb,1.0,awork,nb,aw2,nb,0.0,a,m);
#endif
}
#ifndef USEGDR
void gdrsetforceswapab(){}
void gdrresetforceswapab(){}
void gdrsetskipsendjmat(){};
void gdrresetskipsendjmat(){}
void gdrsetnboards(){}
void set_matmul_msg_level(int level){}
void gdrdgemm_set_stress_factor(int x){}
#endif
|
ROF_TV_core.c | /*
* This work is part of the Core Imaging Library developed by
* Visual Analytics and Imaging System Group of the Science Technology
* Facilities Council, STFC
*
* Copyright 2017 Daniil Kazantsev
* Copyright 2017 Srikanth Nagella, Edoardo Pasca
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ROF_TV_core.h"
#define EPS 1.0e-8
#define MAX(x, y) (((x) > (y)) ? (x) : (y))
#define MIN(x, y) (((x) < (y)) ? (x) : (y))
/*sign function*/
int sign(float x) {
return (x > 0) - (x < 0);
}
/* C-OMP implementation of ROF-TV denoising/regularization model [1] (2D/3D case)
*
* Input Parameters:
* 1. Noisy image/volume [REQUIRED]
* 2. lambda - regularisation parameter (a constant or the same size as the input (1))
* 3. tau - marching step for explicit scheme, ~1 is recommended [REQUIRED]
* 4. Number of iterations, for explicit scheme >= 150 is recommended [REQUIRED]
* 5. eplsilon: tolerance constant
*
* Output:
* [1] Regularised image/volume
* [2] Information vector which contains [iteration no., reached tolerance]
*
* This function is based on the paper by
* [1] Rudin, Osher, Fatemi, "Nonlinear Total Variation based noise removal algorithms"
*/
/* Running iterations of TV-ROF function */
float TV_ROF_CPU_main(float *Input, float *Output, float *infovector, float *lambdaPar, int lambda_is_arr, int iterationsNumb, float tau, float epsil, int dimX, int dimY, int dimZ)
{
float *D1=NULL, *D2=NULL, *D3=NULL, *Output_prev=NULL;
float re, re1;
re = 0.0f; re1 = 0.0f;
int count = 0;
int i;
long DimTotal,j;
DimTotal = (long)(dimX*dimY*dimZ);
D1 = calloc(DimTotal, sizeof(float));
D2 = calloc(DimTotal, sizeof(float));
D3 = calloc(DimTotal, sizeof(float));
/* copy into output */
copyIm(Input, Output, (long)(dimX), (long)(dimY), (long)(dimZ));
if (epsil != 0.0f) Output_prev = calloc(DimTotal, sizeof(float));
/* start TV iterations */
for(i=0; i < iterationsNumb; i++) {
if ((epsil != 0.0f) && (i % 5 == 0)) copyIm(Output, Output_prev, (long)(dimX), (long)(dimY), (long)(dimZ));
/* calculate differences */
D1_func(Output, D1, (long)(dimX), (long)(dimY), (long)(dimZ));
D2_func(Output, D2, (long)(dimX), (long)(dimY), (long)(dimZ));
if (dimZ > 1) D3_func(Output, D3, (long)(dimX), (long)(dimY), (long)(dimZ));
TV_kernel(D1, D2, D3, Output, Input, lambdaPar, lambda_is_arr, tau, (long)(dimX), (long)(dimY), (long)(dimZ));
/* check early stopping criteria */
if ((epsil != 0.0f) && (i % 5 == 0)) {
re = 0.0f; re1 = 0.0f;
for(j=0; j<DimTotal; j++)
{
re += powf(Output[j] - Output_prev[j],2);
re1 += powf(Output[j],2);
}
re = sqrtf(re)/sqrtf(re1);
if (re < epsil) count++;
if (count > 3) break;
}
}
free(D1);free(D2); free(D3);
if (epsil != 0.0f) free(Output_prev);
/*adding info into info_vector */
infovector[0] = (float)(i); /*iterations number (if stopped earlier based on tolerance)*/
infovector[1] = re; /* reached tolerance */
return 0;
}
/* calculate differences 1 */
float D1_func(float *A, float *D1, long dimX, long dimY, long dimZ)
{
float NOMx_1, NOMy_1, NOMy_0, NOMz_1, NOMz_0, denom1, denom2,denom3, T1;
long i,j,k,i1,i2,k1,j1,j2,k2,index;
if (dimZ > 1) {
#pragma omp parallel for shared (A, D1, dimX, dimY, dimZ) private(index, i, j, k, i1, j1, k1, i2, j2, k2, NOMx_1,NOMy_1,NOMy_0,NOMz_1,NOMz_0,denom1,denom2,denom3,T1)
for(j=0; j<dimY; j++) {
for(i=0; i<dimX; i++) {
for(k=0; k<dimZ; k++) {
index = (dimX*dimY)*k + j*dimX+i;
/* symmetric boundary conditions (Neuman) */
i1 = i + 1; if (i1 >= dimX) i1 = i-1;
i2 = i - 1; if (i2 < 0) i2 = i+1;
j1 = j + 1; if (j1 >= dimY) j1 = j-1;
j2 = j - 1; if (j2 < 0) j2 = j+1;
k1 = k + 1; if (k1 >= dimZ) k1 = k-1;
k2 = k - 1; if (k2 < 0) k2 = k+1;
/* Forward-backward differences */
NOMx_1 = A[(dimX*dimY)*k + j1*dimX + i] - A[index]; /* x+ */
NOMy_1 = A[(dimX*dimY)*k + j*dimX + i1] - A[index]; /* y+ */
/*NOMx_0 = (A[(i)*dimY + j] - A[(i2)*dimY + j]); */ /* x- */
NOMy_0 = A[index] - A[(dimX*dimY)*k + j*dimX + i2]; /* y- */
NOMz_1 = A[(dimX*dimY)*k1 + j*dimX + i] - A[index]; /* z+ */
NOMz_0 = A[index] - A[(dimX*dimY)*k2 + j*dimX + i]; /* z- */
denom1 = NOMx_1*NOMx_1;
denom2 = 0.5f*(sign(NOMy_1) + sign(NOMy_0))*(MIN(fabs(NOMy_1),fabs(NOMy_0)));
denom2 = denom2*denom2;
denom3 = 0.5f*(sign(NOMz_1) + sign(NOMz_0))*(MIN(fabs(NOMz_1),fabs(NOMz_0)));
denom3 = denom3*denom3;
T1 = sqrt(denom1 + denom2 + denom3 + EPS);
D1[index] = NOMx_1/T1;
}}}
}
else {
#pragma omp parallel for shared (A, D1, dimX, dimY) private(i, j, i1, j1, i2, j2,NOMx_1,NOMy_1,NOMy_0,denom1,denom2,T1,index)
for(j=0; j<dimY; j++) {
for(i=0; i<dimX; i++) {
index = j*dimX+i;
/* symmetric boundary conditions (Neuman) */
i1 = i + 1; if (i1 >= dimX) i1 = i-1;
i2 = i - 1; if (i2 < 0) i2 = i+1;
j1 = j + 1; if (j1 >= dimY) j1 = j-1;
j2 = j - 1; if (j2 < 0) j2 = j+1;
/* Forward-backward differences */
NOMx_1 = A[j1*dimX + i] - A[index]; /* x+ */
NOMy_1 = A[j*dimX + i1] - A[index]; /* y+ */
/*NOMx_0 = (A[(i)*dimY + j] - A[(i2)*dimY + j]); */ /* x- */
NOMy_0 = A[index] - A[(j)*dimX + i2]; /* y- */
denom1 = NOMx_1*NOMx_1;
denom2 = 0.5f*(sign(NOMy_1) + sign(NOMy_0))*(MIN(fabs(NOMy_1),fabs(NOMy_0)));
denom2 = denom2*denom2;
T1 = sqrtf(denom1 + denom2 + EPS);
D1[index] = NOMx_1/T1;
}}
}
return *D1;
}
/* calculate differences 2 */
float D2_func(float *A, float *D2, long dimX, long dimY, long dimZ)
{
float NOMx_1, NOMy_1, NOMx_0, NOMz_1, NOMz_0, denom1, denom2, denom3, T2;
long i,j,k,i1,i2,k1,j1,j2,k2,index;
if (dimZ > 1) {
#pragma omp parallel for shared (A, D2, dimX, dimY, dimZ) private(index, i, j, k, i1, j1, k1, i2, j2, k2, NOMx_1, NOMy_1, NOMx_0, NOMz_1, NOMz_0, denom1, denom2, denom3, T2)
for(j=0; j<dimY; j++) {
for(i=0; i<dimX; i++) {
for(k=0; k<dimZ; k++) {
index = (dimX*dimY)*k + j*dimX+i;
/* symmetric boundary conditions (Neuman) */
i1 = i + 1; if (i1 >= dimX) i1 = i-1;
i2 = i - 1; if (i2 < 0) i2 = i+1;
j1 = j + 1; if (j1 >= dimY) j1 = j-1;
j2 = j - 1; if (j2 < 0) j2 = j+1;
k1 = k + 1; if (k1 >= dimZ) k1 = k-1;
k2 = k - 1; if (k2 < 0) k2 = k+1;
/* Forward-backward differences */
NOMx_1 = A[(dimX*dimY)*k + (j1)*dimX + i] - A[index]; /* x+ */
NOMy_1 = A[(dimX*dimY)*k + (j)*dimX + i1] - A[index]; /* y+ */
NOMx_0 = A[index] - A[(dimX*dimY)*k + (j2)*dimX + i]; /* x- */
NOMz_1 = A[(dimX*dimY)*k1 + j*dimX + i] - A[index]; /* z+ */
NOMz_0 = A[index] - A[(dimX*dimY)*k2 + (j)*dimX + i]; /* z- */
denom1 = NOMy_1*NOMy_1;
denom2 = 0.5f*(sign(NOMx_1) + sign(NOMx_0))*(MIN(fabs(NOMx_1),fabs(NOMx_0)));
denom2 = denom2*denom2;
denom3 = 0.5f*(sign(NOMz_1) + sign(NOMz_0))*(MIN(fabs(NOMz_1),fabs(NOMz_0)));
denom3 = denom3*denom3;
T2 = sqrtf(denom1 + denom2 + denom3 + EPS);
D2[index] = NOMy_1/T2;
}}}
}
else {
#pragma omp parallel for shared (A, D2, dimX, dimY) private(i, j, i1, j1, i2, j2, NOMx_1,NOMy_1,NOMx_0,denom1,denom2,T2,index)
for(j=0; j<dimY; j++) {
for(i=0; i<dimX; i++) {
index = j*dimX+i;
/* symmetric boundary conditions (Neuman) */
i1 = i + 1; if (i1 >= dimX) i1 = i-1;
i2 = i - 1; if (i2 < 0) i2 = i+1;
j1 = j + 1; if (j1 >= dimY) j1 = j-1;
j2 = j - 1; if (j2 < 0) j2 = j+1;
/* Forward-backward differences */
NOMx_1 = A[j1*dimX + i] - A[index]; /* x+ */
NOMy_1 = A[j*dimX + i1] - A[index]; /* y+ */
NOMx_0 = A[index] - A[j2*dimX + i]; /* x- */
/*NOMy_0 = A[(i)*dimY + j] - A[(i)*dimY + j2]; */ /* y- */
denom1 = NOMy_1*NOMy_1;
denom2 = 0.5f*(sign(NOMx_1) + sign(NOMx_0))*(MIN(fabs(NOMx_1),fabs(NOMx_0)));
denom2 = denom2*denom2;
T2 = sqrtf(denom1 + denom2 + EPS);
D2[index] = NOMy_1/T2;
}}
}
return *D2;
}
/* calculate differences 3 */
float D3_func(float *A, float *D3, long dimX, long dimY, long dimZ)
{
float NOMx_1, NOMy_1, NOMx_0, NOMy_0, NOMz_1, denom1, denom2, denom3, T3;
long index,i,j,k,i1,i2,k1,j1,j2,k2;
#pragma omp parallel for shared (A, D3, dimX, dimY, dimZ) private(index, i, j, k, i1, j1, k1, i2, j2, k2, NOMx_1, NOMy_1, NOMy_0, NOMx_0, NOMz_1, denom1, denom2, denom3, T3)
for(j=0; j<dimY; j++) {
for(i=0; i<dimX; i++) {
for(k=0; k<dimZ; k++) {
index = (dimX*dimY)*k + j*dimX+i;
/* symmetric boundary conditions (Neuman) */
i1 = i + 1; if (i1 >= dimX) i1 = i-1;
i2 = i - 1; if (i2 < 0) i2 = i+1;
j1 = j + 1; if (j1 >= dimY) j1 = j-1;
j2 = j - 1; if (j2 < 0) j2 = j+1;
k1 = k + 1; if (k1 >= dimZ) k1 = k-1;
k2 = k - 1; if (k2 < 0) k2 = k+1;
/* Forward-backward differences */
NOMx_1 = A[(dimX*dimY)*k + (j1)*dimX + i] - A[index]; /* x+ */
NOMy_1 = A[(dimX*dimY)*k + (j)*dimX + i1] - A[index]; /* y+ */
NOMy_0 = A[index] - A[(dimX*dimY)*k + (j)*dimX + i2]; /* y- */
NOMx_0 = A[index] - A[(dimX*dimY)*k + (j2)*dimX + i]; /* x- */
NOMz_1 = A[(dimX*dimY)*k1 + j*dimX + i] - A[index]; /* z+ */
/*NOMz_0 = A[(dimX*dimY)*k + (i)*dimY + j] - A[(dimX*dimY)*k2 + (i)*dimY + j]; */ /* z- */
denom1 = NOMz_1*NOMz_1;
denom2 = 0.5f*(sign(NOMx_1) + sign(NOMx_0))*(MIN(fabs(NOMx_1),fabs(NOMx_0)));
denom2 = denom2*denom2;
denom3 = 0.5f*(sign(NOMy_1) + sign(NOMy_0))*(MIN(fabs(NOMy_1),fabs(NOMy_0)));
denom3 = denom3*denom3;
T3 = sqrtf(denom1 + denom2 + denom3 + EPS);
D3[index] = NOMz_1/T3;
}}}
return *D3;
}
/* calculate divergence */
float TV_kernel(float *D1, float *D2, float *D3, float *B, float *A, float *lambda, int lambda_is_arr, float tau, long dimX, long dimY, long dimZ)
{
float dv1, dv2, dv3, lambda_val;
long index,i,j,k,i1,i2,k1,j1,j2,k2;
if (dimZ > 1) {
#pragma omp parallel for shared (D1, D2, D3, B, dimX, dimY, dimZ) private(index, i, j, k, i1, j1, k1, i2, j2, k2, dv1,dv2,dv3,lambda_val)
for(j=0; j<dimY; j++) {
for(i=0; i<dimX; i++) {
for(k=0; k<dimZ; k++) {
index = (dimX*dimY)*k + j*dimX+i;
lambda_val = *(lambda + index* lambda_is_arr);
/* symmetric boundary conditions (Neuman) */
i1 = i + 1; if (i1 >= dimX) i1 = i-1;
i2 = i - 1; if (i2 < 0) i2 = i+1;
j1 = j + 1; if (j1 >= dimY) j1 = j-1;
j2 = j - 1; if (j2 < 0) j2 = j+1;
k1 = k + 1; if (k1 >= dimZ) k1 = k-1;
k2 = k - 1; if (k2 < 0) k2 = k+1;
/*divergence components */
dv1 = D1[index] - D1[(dimX*dimY)*k + j2*dimX+i];
dv2 = D2[index] - D2[(dimX*dimY)*k + j*dimX+i2];
dv3 = D3[index] - D3[(dimX*dimY)*k2 + j*dimX+i];
B[index] += tau*(lambda_val*(dv1 + dv2 + dv3) - (B[index] - A[index]));
}}}
}
else {
#pragma omp parallel for shared (D1, D2, B, dimX, dimY) private(index, i, j, i1, j1, i2, j2,dv1,dv2,lambda_val)
for(j=0; j<dimY; j++) {
for(i=0; i<dimX; i++) {
index = j*dimX+i;
lambda_val = *(lambda + index* lambda_is_arr);
/* symmetric boundary conditions (Neuman) */
i1 = i + 1; if (i1 >= dimX) i1 = i-1;
i2 = i - 1; if (i2 < 0) i2 = i+1;
j1 = j + 1; if (j1 >= dimY) j1 = j-1;
j2 = j - 1; if (j2 < 0) j2 = j+1;
/* divergence components */
dv1 = D1[index] - D1[j2*dimX + i];
dv2 = D2[index] - D2[j*dimX + i2];
B[index] += tau*(lambda_val*(dv1 + dv2) - (B[index] - A[index]));
}}
}
return *B;
}
|
matrix_op-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file matrix_op-inl.h
* \brief Function definition of matrix related operators
*/
#ifndef MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_
#define MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_
#include <mxnet/operator_util.h>
#include <vector>
#include <string>
#include <algorithm>
#include <utility>
#include <type_traits>
#include "../mshadow_op.h"
#include "../elemwise_op_common.h"
#include "../channel_op_common.h"
#include "../mxnet_op.h"
#include "broadcast_reduce_op.h"
#include "./init_op.h"
#include "../../common/static_array.h"
#include "./slice-inl.h"
#if MXNET_USE_CUDA
#include <thrust/device_vector.h>
#endif
#ifdef __CUDACC__
#include "./pseudo2DTranspose_op-inl.cuh"
#endif
namespace mxnet {
namespace op {
struct ReshapeParam : public dmlc::Parameter<ReshapeParam> {
mxnet::TShape target_shape;
bool keep_highest;
mxnet::Tuple<int> shape;
bool reverse;
DMLC_DECLARE_PARAMETER(ReshapeParam) {
DMLC_DECLARE_FIELD(shape)
.set_default(mxnet::Tuple<int>())
.describe("The target shape");
DMLC_DECLARE_FIELD(reverse)
.set_default(false)
.describe("If true then the special values are inferred from right to left");
DMLC_DECLARE_FIELD(target_shape)
.set_default(mxnet::TShape(0, -1))
.describe("(Deprecated! Use ``shape`` instead.) "
"Target new shape. One and only one dim can be 0, "
"in which case it will be inferred from the rest of dims");
DMLC_DECLARE_FIELD(keep_highest).set_default(false)
.describe("(Deprecated! Use ``shape`` instead.) Whether keep the highest dim unchanged."
"If set to true, then the first dim in target_shape is ignored,"
"and always fixed as input");
}
bool operator==(const ReshapeParam &other) const {
return this->target_shape == other.target_shape &&
this->keep_highest == other.keep_highest &&
this->shape == other.shape &&
this->reverse == other.reverse;
}
};
template<typename IType>
inline mxnet::TShape InferReshapeShape(const mxnet::Tuple<IType>& shape,
const mxnet::TShape& dshape, bool reverse) {
std::vector<IType> dshape_vec;
std::vector<IType> param_shape_vec(shape.begin(), shape.end());
for (int i = 0; i < dshape.ndim(); ++i) {
dshape_vec.push_back(dshape[i]);
}
std::vector<IType> tmp;
size_t src_idx = 0;
int inf_idx = -1;
if (reverse) {
std::reverse(dshape_vec.begin(), dshape_vec.end());
std::reverse(param_shape_vec.begin(), param_shape_vec.end());
}
auto dshape_len = dshape_vec.size();
auto params_len = param_shape_vec.size();
for (size_t i = 0; i < params_len; ++i) {
IType proposed_dim = param_shape_vec[i];
if (proposed_dim == 0) {
// keep same
CHECK_LT(src_idx, dshape_len);
tmp.push_back(dshape_vec[src_idx++]);
} else if (proposed_dim == -1) {
// infer
CHECK_LT(inf_idx, 0) << "One and only one dim can be inferred";
inf_idx = i;
tmp.push_back(1);
src_idx++;
} else if (proposed_dim == -2) {
// copy all remaining dims from source
while (src_idx < dshape_len) {
const int dn = dshape_vec[src_idx++];
tmp.push_back(dn);
}
} else if (proposed_dim == -3) {
// merge two dims from source
CHECK_LT(src_idx, dshape_len-1);
const int d1 = dshape_vec[src_idx++];
const int d2 = dshape_vec[src_idx++];
if (!mxnet::dim_size_is_known(d1) || !mxnet::dim_size_is_known(d2)) {
tmp.push_back(-1);
} else {
tmp.push_back(d1 * d2);
}
} else if (proposed_dim == -4) {
// split the source dim s into two dims
// read the left dim and then the right dim (either can be -1)
CHECK_LT(i + 2, params_len);
CHECK_LT(src_idx, dshape_len);
const int d0 = dshape_vec[src_idx++];
IType d1 = param_shape_vec[++i];
IType d2 = param_shape_vec[++i];
CHECK(d1 != -1 || d2 != -1) << "Split dims cannot both be -1.";
if (d1 == -1 && d0 >= 0) d1 = d0 / d2; // d0 must be known to do this
if (d2 == -1 && d0 >= 0) d2 = d0 / d1; // d0 must be known to do this
CHECK(d1 * d2 == static_cast<IType>(d0) || static_cast<IType>(d0) == IType(-1)) <<
"Split dims " << d1 << ", " << d2 << " do not divide original dim " << d0;
tmp.push_back(d1);
tmp.push_back(d2);
} else {
// greater than 0, new shape
tmp.push_back(proposed_dim);
src_idx++;
}
}
if (inf_idx >= 0) {
if (shape_is_known(dshape)) {
IType new_size = 1;
for (IType x : tmp) new_size *= x;
tmp[inf_idx] = dshape.Size() / new_size;
} else {
tmp[inf_idx] = -1;
}
}
if (reverse) {
std::reverse(param_shape_vec.begin(), param_shape_vec.end());
std::reverse(dshape_vec.begin(), dshape_vec.end());
std::reverse(tmp.begin(), tmp.end());
}
mxnet::TShape oshape(tmp.begin(), tmp.end());
return oshape;
}
inline bool ReverseReshapeInferShape(mxnet::TShape *in, const mxnet::TShape& out) {
if (shape_is_known(*in) && shape_is_known(out)) {
return true;
} else if (!shape_is_known(out)) {
return false;
} else {
int zero_axis = -1;
int known_dim_size_prod = 1;
for (int i = 0; i < in->ndim(); i++) {
if (!mxnet::dim_size_is_known(*in, i)) {
if (zero_axis != -1)
return false; // more than 1 zero found.
else
zero_axis = i;
} else {
known_dim_size_prod *= (*in)[i];
}
}
(*in)[zero_axis] = out.Size() / known_dim_size_prod;
return true;
}
}
inline bool ReshapeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const ReshapeParam& param_ = nnvm::get<ReshapeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]";
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape &dshape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(dshape)) return false;
mxnet::TShape oshape;
if (param_.shape.ndim() != 0) {
oshape = InferReshapeShape(param_.shape, dshape, param_.reverse);
} else if (param_.target_shape.ndim() != -1) {
LOG(INFO) << "Using target_shape will be deprecated.";
oshape = param_.target_shape;
int neg_count = 0;
index_t inf_idx = 0;
index_t start_idx = param_.keep_highest ? 1 : 0;
if (param_.keep_highest) {
oshape[0] = dshape[0];
}
for (int i = start_idx; i < oshape.ndim(); ++i) {
if (oshape[i] == 0) {
neg_count++;
inf_idx = i;
}
}
if (neg_count == 1) {
oshape[inf_idx] = 1;
oshape[inf_idx] = dshape.Size() / oshape.Size();
}
} else {
return shape_is_known((*out_attrs)[0])
&& ReverseReshapeInferShape(&(*in_attrs)[0], (*out_attrs)[0]);
}
ReverseReshapeInferShape(&dshape, oshape);
#if 0
CHECK_EQ(oshape.Size(), dshape.Size())
<< "Target shape size is different to source. "
<< "Target: " << oshape
<< "\nSource: " << dshape;
#endif
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return ReverseReshapeInferShape(&(*in_attrs)[0], (*out_attrs)[0]);
}
inline bool FlattenShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]";
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape &dshape = (*in_attrs)[0];
if (!shape_is_known(dshape)) return false;
int target_dim = 1;
for (int i = 1; i < dshape.ndim(); ++i) {
target_dim *= dshape[i];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mshadow::Shape2(dshape[0], target_dim));
return true;
}
struct TransposeParam : public dmlc::Parameter<TransposeParam> {
mxnet::TShape axes;
DMLC_DECLARE_PARAMETER(TransposeParam) {
DMLC_DECLARE_FIELD(axes).set_default(mxnet::TShape(0, -1))
.describe("Target axis order. By default the axes will be inverted.");
}
bool operator==(const TransposeParam &other) const {
return this->axes == other.axes;
}
};
/*!
* \brief This function performs transpose operation on a 2D matrix by utilizing the L1 cache
* \param in input tensor
* \param out output tensor
* \param row shape of dim 0 of input
* \param col shape of dim 1 of input
* \tparam DType Data type
* \tparam is_addto
*/
template<typename DType, bool is_addto>
MSHADOW_XINLINE void Transpose2D(const DType *in, DType *out, index_t row, index_t col) {
// ensure cache line hits and prevent cache miss for any configuration
// L1 cache size to be utilized = 32kb = 2^15
// Largest size of a single unit of any dtype <= 8 byte = 2^3
// Number of elements - (2^15/2^3) = 2^12
// Block-size - 2^6 v 2^6 (64 v 64)
// But we could leverage unrolling of for loops (for parallelization)
// Block-size - 2^5 v 2^5 (32 v 32) with potential 4 pragma for loop unrolled
// blocksize * blocksize * num_threads = cache_size / dtype_size
// Instead of explicit unroll, let compiler figure out optimal unroll factor
const index_t blocksize = 32;
// collapse 2 parallelizes 2 for loops
// inner 2 for loops aren't parallelized to prevent cache miss
// Microsoft Visual C++ compiler does not support omp collapse
#ifdef _MSC_VER
#pragma omp parallel for
#else
#pragma omp parallel for collapse(2)
#endif // _MSC_VER
for (index_t i = 0; i < row; i += blocksize) {
for (index_t j = 0; j < col; j += blocksize) {
// transpose the block
for (index_t a = j; (a < blocksize + j) && (a < col); ++a) {
for (index_t b = i; (b < blocksize + i) && (b < row); ++b) {
if (!is_addto) {
out[a * row + b] = in[b * col + a];
} else {
out[a * row + b] += in[b * col + a];
}
}
}
}
}
}
inline bool IsIdentityTranspose(const TShape& axes) {
for (dim_t i = 0; i < axes.ndim(); i++) {
if (axes[i] != i) return false;
}
return true;
}
template<typename xpu, bool is_addto = false>
void TransposeImpl(RunContext ctx,
const TBlob& src,
const TBlob& ret,
const mxnet::TShape& axes) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(src.type_flag_, ret.type_flag_);
// zero-size tensor, no need to compute
if (src.shape_.Size() == 0U) return;
Stream<xpu> *s = ctx.get_stream<xpu>();
#ifdef __CUDACC__
// This transpose can be used only if there exist n and m such that:
// params = (0, ..., n-1, n+m, ..., params.size, n, ..., n+m-1)
// Example: (0, 2, 3, 1) or (0, 3, 1, 2), but not (0, 2, 1, 3).
if (isPseudo2DTranspose(axes)) {
MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, {
transpose_pseudo2D<DType, is_addto>(ret, src, axes, s);
});
return;
}
#endif
// Special handle the identity case
if (IsIdentityTranspose(axes)) {
MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, {
Tensor<xpu, 1, DType> in = src.get_with_shape<xpu, 1, DType>(mshadow::Shape1(src.Size()), s);
Tensor<xpu, 1, DType> out = ret.get_with_shape<xpu, 1, DType>(mshadow::Shape1(ret.Size()), s);
if (!is_addto) {
// Use memcpy to accelerate the speed
Copy(out, in, s);
} else {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, kAddTo>, xpu>::Launch(
s, ret.Size(), out.dptr_, in.dptr_);
}
});
return;
}
// Handle the general transpose case
MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, {
switch (axes.ndim()) {
case 2: {
Tensor<xpu, 2, DType> in = src.get<xpu, 2, DType>(s);
Tensor<xpu, 2, DType> out = ret.get<xpu, 2, DType>(s);
if (ctx.get_ctx().dev_mask() == cpu::kDevMask) {
Transpose2D<DType, is_addto>(in.dptr_, out.dptr_, in.shape_[0], in.shape_[1]);
} else {
LOG(FATAL) << "Not Implemented. We should never reach here because the 2D case "
"in GPU has been covered by transpose_pseudo2D."
" Report an issue in Github.";
}
break;
}
case 3: {
Tensor<xpu, 3, DType> in = src.get<xpu, 3, DType>(s);
Tensor<xpu, 3, DType> out = ret.get<xpu, 3, DType>(s);
if (!is_addto) {
out = transpose(in, axes.get<3>());
} else {
out += transpose(in, axes.get<3>());
}
break;
}
case 4: {
Tensor<xpu, 4, DType> in = src.get<xpu, 4, DType>(s);
Tensor<xpu, 4, DType> out = ret.get<xpu, 4, DType>(s);
if (!is_addto) {
out = transpose(in, axes.get<4>());
} else {
out += transpose(in, axes.get<4>());
}
break;
}
case 5: {
Tensor<xpu, 5, DType> in = src.get<xpu, 5, DType>(s);
Tensor<xpu, 5, DType> out = ret.get<xpu, 5, DType>(s);
if (!is_addto) {
out = transpose(in, axes.get<5>());
} else {
out += transpose(in, axes.get<5>());
}
break;
}
case 6: {
Tensor<xpu, 6, DType> in = src.get<xpu, 6, DType>(s);
Tensor<xpu, 6, DType> out = ret.get<xpu, 6, DType>(s);
if (!is_addto) {
out = transpose(in, axes.get<6>());
} else {
out += transpose(in, axes.get<6>());
}
break;
}
default:
LOG(FATAL) << "Transpose support at most 6 dimensions";
break;
}
});
}
// matrix transpose
template<typename xpu>
void Transpose(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
if (req[0] == kNullOp) {
return;
}
const TransposeParam& param = nnvm::get<TransposeParam>(attrs.parsed);
CHECK(req[0] == kWriteTo || req[0] == kAddTo)
<< "Transpose only supports kNullOp, kWriteTo and kAddTo";
mxnet::TShape axes;
if (param.axes.ndim() == 0) {
axes = mxnet::TShape(inputs[0].ndim(), -1);
for (int i = 0; i < axes.ndim(); ++i) {
axes[i] = axes.ndim() - 1 - i;
}
} else {
axes = common::CanonicalizeAxes(param.axes);
}
if (req[0] == kAddTo) {
TransposeImpl<xpu, true>(ctx.run_ctx, inputs[0], outputs[0], axes);
} else {
TransposeImpl<xpu, false>(ctx.run_ctx, inputs[0], outputs[0], axes);
}
}
inline bool TransposeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const TransposeParam& param = nnvm::get<TransposeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& shp = (*in_attrs)[0];
mxnet::TShape& out_shp = (*out_attrs)[0];
if (!mxnet::ndim_is_known(shp) && !mxnet::ndim_is_known(out_shp))
return false; // none of the shapes is known
CHECK_LE(shp.ndim(), 6) << "Transpose support at most 6 dimensions";
if (out_shp.ndim() >= 0 && shp.ndim() >= 0)
CHECK_EQ(out_shp.ndim(), shp.ndim());
mxnet::TShape get(std::max(shp.ndim(), out_shp.ndim()), -1);
mxnet::TShape ret(std::max(shp.ndim(), out_shp.ndim()), -1);
if (param.axes.ndim() == 0) {
for (int i = 0; i < shp.ndim(); ++i) {
ret[i] = shp[shp.ndim()-1-i];
}
for (int i = 0; i < out_shp.ndim(); ++i) {
get[shp.ndim()-1-i] = out_shp[i];
}
} else {
CHECK_EQ(std::max(shp.ndim(), out_shp.ndim()), param.axes.ndim());
for (int i = 0; i < shp.ndim(); ++i) {
CHECK(param.axes[i] < static_cast<int64_t>(shp.ndim()));
ret[i] = shp[param.axes[i]];
}
for (int i = 0; i < out_shp.ndim(); ++i) {
get[param.axes[i]] = out_shp[i];
}
}
SHAPE_ASSIGN_CHECK(*in_attrs, 0, get);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ret);
return shape_is_known(ret);
}
struct ExpandDimParam : public dmlc::Parameter<ExpandDimParam> {
int axis;
DMLC_DECLARE_PARAMETER(ExpandDimParam) {
DMLC_DECLARE_FIELD(axis)
.describe("Position where new axis is to be inserted. Suppose that "
"the input `NDArray`'s dimension is `ndim`, the range of "
"the inserted axis is `[-ndim, ndim]`");
}
bool operator==(const ExpandDimParam &other) const {
return this->axis == other.axis;
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream axis_s;
axis_s << axis;
(*dict)["axis"] = axis_s.str();
}
};
inline bool ExpandDimShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const ExpandDimParam& param = nnvm::get<ExpandDimParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& ishape = (*in_attrs)[0];
mxnet::TShape& oshape = (*out_attrs)[0];
if (!mxnet::ndim_is_known(ishape) && !mxnet::ndim_is_known(oshape)) {
return false;
}
int indim = ishape.ndim();
bool unknown_ishape = false;
if (-1 == indim) {
indim = oshape.ndim() - 1;
unknown_ishape = true;
}
int axis = param.axis;
if (axis < 0) {
axis += indim + 1;
}
CHECK(axis >= 0 && axis <= indim)
<< "axis must be in the range [" << -indim << ", " << indim << "] ("
<< param.axis << " provided)";
mxnet::TShape ret(indim + 1, -1);
for (int i = 0; i < axis; ++i) {
ret[i] = (unknown_ishape? -1 : ishape[i]);
}
ret[axis] = 1;
for (int i = axis+1; i < indim+1; ++i) {
ret[i] = (unknown_ishape? -1 : ishape[i-1]);
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ret);
ret = mxnet::TShape(indim, -1);
for (int i = 0; i < axis; ++i) ret[i] = oshape[i];
for (int i = axis+1; i < indim+1; ++i) ret[i-1] = oshape[i];
SHAPE_ASSIGN_CHECK(*in_attrs, 0, ret);
return shape_is_known(in_attrs->at(0)) && shape_is_known(out_attrs->at(0));
}
// Currently MKLDNN only supports step = 1 or step has no value
inline bool SupportMKLDNNSlice(const SliceParam& param) {
if (param.step.ndim() == 0U) return true;
for (int i = 0; i < param.step.ndim(); ++i) {
if (param.step[i].has_value() && param.step[i].value() != 1)
return false;
}
return true;
}
inline bool SliceForwardInferStorageType(const nnvm::NodeAttrs& attrs,
const int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1);
CHECK_EQ(out_attrs->size(), 1);
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
const auto& in_stype = in_attrs->at(0);
auto& out_stype = out_attrs->at(0);
bool dispatched = false;
const auto dispatch_ex = DispatchMode::kFComputeEx;
// If step = 1, no need to fallback; otherwise fallback to dense
bool trivial_step = false;
if (param.step.ndim() == 0U) {
trivial_step = true;
} else if (param.step.ndim() == 1U
&& (!param.step[0].has_value() || param.step[0].value() == 1)) {
trivial_step = true;
}
if (in_stype == kDefaultStorage) {
#if MXNET_USE_MKLDNN == 1
if (dev_mask == Context::kCPU && MKLDNNEnvSet()
&& SupportMKLDNNSlice(param)) {
dispatched = storage_type_assign(&out_stype, kDefaultStorage,
dispatch_mode, dispatch_ex);
}
#endif
if (!dispatched) {
dispatched = storage_type_assign(&out_stype, kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
}
}
if (!dispatched && in_stype == kCSRStorage && trivial_step) {
dispatched = storage_type_assign(&out_stype, kCSRStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched) {
dispatched = dispatch_fallback(out_attrs, dispatch_mode);
}
return dispatched;
}
// slice the indptr of a csr
struct SliceCsrIndPtr {
template<typename IType>
MSHADOW_XINLINE static void Map(int i, IType* out, const IType* in, const IType* base) {
KERNEL_ASSIGN(out[i], kWriteTo, in[i] - *base);
}
};
/*
* a wrapper to launch SliceCsrIndPtr kernel.
* slice [src[begin] .. src[end]) and store in dst[0, end - begin)
*/
template<typename xpu, typename IType>
void SliceCsrIndPtrImpl(const int begin, const int end, RunContext ctx,
const IType* src, IType* dst) {
using namespace mshadow;
using namespace mxnet_op;
Stream<xpu> *s = ctx.get_stream<xpu>();
int indptr_len = end - begin + 1;
Kernel<SliceCsrIndPtr, xpu>::Launch(s, indptr_len, dst, src + begin, src + begin);
}
/*
* Slice a CSR NDArray for first dimension
*/
template<typename xpu>
void SliceDimOneCsrImpl(const mxnet::TShape &begin, const mxnet::TShape &end, const OpContext& ctx,
const NDArray &in, const NDArray &out) {
using namespace mshadow;
using namespace mxnet_op;
using namespace csr;
nnvm::dim_t begin_row = begin[0];
nnvm::dim_t end_row = end[0];
nnvm::dim_t indptr_len = end_row - begin_row + 1;
out.CheckAndAllocAuxData(kIndPtr, Shape1(indptr_len));
// assume idx indptr share the same type
MSHADOW_IDX_TYPE_SWITCH(in.aux_type(kIndPtr), RType, {
MSHADOW_IDX_TYPE_SWITCH(in.aux_type(kIdx), IType, {
MSHADOW_TYPE_SWITCH(in.dtype(), DType, {
RType* in_indptr = in.aux_data(kIndPtr).dptr<RType>();
RType* out_indptr = out.aux_data(kIndPtr).dptr<RType>();
SliceCsrIndPtrImpl<xpu, RType>(begin_row, end_row, ctx.run_ctx, in_indptr, out_indptr);
Stream<xpu> *s = ctx.get_stream<xpu>();
RType nnz = 0;
mshadow::Copy(Tensor<cpu, 1, RType>(&nnz, Shape1(1)),
Tensor<xpu, 1, RType>(out_indptr + indptr_len - 1, Shape1(1), s));
// return csr zeros if nnz = 0
if (nnz == 0) {
out.set_aux_shape(kIdx, Shape1(0));
return;
}
// copy indices and values
out.CheckAndAllocAuxData(kIdx, Shape1(nnz));
out.CheckAndAllocData(Shape1(nnz));
IType* in_idx = in.aux_data(kIdx).dptr<IType>();
IType* out_idx = out.aux_data(kIdx).dptr<IType>();
DType* in_data = in.data().dptr<DType>();
DType* out_data = out.data().dptr<DType>();
RType offset = 0;
mshadow::Copy(Tensor<cpu, 1, RType>(&offset, Shape1(1)),
Tensor<xpu, 1, RType>(in_indptr + begin_row, Shape1(1), s));
mshadow::Copy(Tensor<xpu, 1, IType>(out_idx, Shape1(nnz), s),
Tensor<xpu, 1, IType>(in_idx + offset, Shape1(nnz), s), s);
mshadow::Copy(Tensor<xpu, 1, DType>(out_data, Shape1(nnz), s),
Tensor<xpu, 1, DType>(in_data + offset, Shape1(nnz), s), s);
});
});
});
}
/*!
* \brief slice a CSRNDArray for two dimensions
*/
struct SliceDimTwoCsrAssign {
/*!
* \brief This function slices a CSRNDArray on axis one between begin_col and end_col
* \param i loop index
* \param out_idx output csr ndarray column indices
* \param out_data output csr ndarray data
* \param out_indptr output csr ndarray row index pointer
* \param in_idx input csr ndarray column indices
* \param in_data input csr ndarray data
* \param in_indptr input csr ndarray row index pointer
* \param begin_col begin column indice
* \param end_col end column indice
*/
template<typename IType, typename RType, typename DType>
MSHADOW_XINLINE static void Map(int i,
IType* out_idx, DType* out_data,
const RType* out_indptr,
const IType* in_idx, const DType* in_data,
const RType* in_indptr,
const int begin_col, const int end_col) {
RType ind = out_indptr[i];
for (RType j = in_indptr[i]; j < in_indptr[i+1]; j++) {
// indices of CSRNDArray are in ascending order per row
if (in_idx[j] >= end_col) {
break;
} else if (in_idx[j] >= begin_col) {
out_idx[ind] = in_idx[j] - begin_col;
out_data[ind] = in_data[j];
ind++;
}
}
}
};
/*
* Slice a CSR NDArray for two dimensions
*/
template<typename xpu>
void SliceDimTwoCsrImpl(const mxnet::TShape &begin, const mxnet::TShape &end, const OpContext& ctx,
const NDArray &in, const NDArray &out);
template<typename xpu>
void SliceCsrImpl(const SliceParam ¶m, const OpContext& ctx,
const NDArray &in, OpReqType req, const NDArray &out) {
if (req == kNullOp) return;
CHECK_NE(req, kAddTo) << "kAddTo for Slice on CSR input is not supported";
CHECK_NE(req, kWriteInplace) << "kWriteInplace for Slice on CSR input is not supported";
const mxnet::TShape ishape = in.shape();
const mxnet::TShape oshape = out.shape();
int N = ishape.ndim();
mxnet::TShape begin(N, -1), end(N, -1);
for (int i = 0; i < N; ++i) {
int s = 0;
if (i < param.begin.ndim() && param.begin[i]) {
s = *param.begin[i];
if (s < 0) s += ishape[i];
}
begin[i] = s;
end[i] = s + oshape[i];
}
switch (N) {
case 1: {
SliceDimOneCsrImpl<xpu>(begin, end, ctx, in, out);
break;
}
case 2: {
SliceDimTwoCsrImpl<xpu>(begin, end, ctx, in, out);
break;
}
default:
LOG(FATAL) << "CSR is only for 2-D shape";
break;
}
}
template<typename xpu>
void SliceEx(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
CHECK_EQ(inputs.size(), 1);
CHECK_EQ(outputs.size(), 1);
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
auto in_stype = inputs[0].storage_type();
if (in_stype == kCSRStorage) {
SliceCsrImpl<xpu>(param, ctx, inputs[0], req[0], outputs[0]);
} else {
LOG(FATAL) << "Slice not implemented for storage type" << in_stype;
}
}
template<int ndim>
inline bool GetIndexRange(const mxnet::TShape& dshape,
const mxnet::Tuple<dmlc::optional<index_t>>& param_begin,
const mxnet::Tuple<dmlc::optional<index_t>>& param_end,
const mxnet::Tuple<dmlc::optional<index_t>>& param_step,
common::StaticArray<index_t, ndim>* begin,
common::StaticArray<index_t, ndim>* end,
common::StaticArray<index_t, ndim>* step) {
// Function returns false if output is zero-sized, true otherwise.
bool zero_size_shape = false;
CHECK_NE(dshape.ndim(), 0U);
CHECK_LE(param_begin.ndim(), dshape.ndim())
<< "Slicing axis exceeds data dimensions";
CHECK_LE(param_end.ndim(), dshape.ndim())
<< "Slicing axis exceeds data dimensions";
CHECK_EQ(param_begin.ndim(), param_end.ndim())
<< "begin and end must have the same length";
CHECK_EQ(ndim, dshape.ndim())
<< "Static array size=" << ndim
<< " is not equal to data shape ndim=" << dshape.ndim();
if (param_step.ndim() > 0) {
CHECK_EQ(param_step.ndim(), param_begin.ndim())
<< "step and begin must have the same length";
}
for (int i = 0; i < param_begin.ndim(); ++i) {
index_t s = param_step.ndim() > 0 && param_step[i].has_value() ? param_step[i].value() : 1;
CHECK_NE(s, 0) << "slice op step[" << i << "] cannot be 0";
index_t b = 0, e = 0;
const index_t len = dshape[i];
if (len > 0) {
b = param_begin[i].has_value() ? param_begin[i].value() : (s < 0 ? len - 1 : 0);
e = param_end[i].has_value() ? param_end[i].value() : (s < 0 ? -1 : len);
if (b < 0) {
b += len;
}
if (e < 0 && param_end[i].has_value()) {
e += len;
}
// move the begin and end to correct position for calculating dim size
b = (b < 0 && s > 0) ? 0 : b;
b = (b > len - 1 && s < 0) ? len - 1 : b;
// if the start value lead to empty tensor under step s, use -1 for indication
b = (b < 0 || b > len - 1) ? -1 : b;
e = e > -1 ? e : -1;
e = e > len ? len : e;
} else if (len == 0) {
b = 0;
e = 0;
}
(*begin)[i] = b;
(*end)[i] = e;
(*step)[i] = s;
// checking begin==end
if (b == e) {
zero_size_shape = true;
}
}
for (int i = param_begin.ndim(); i < dshape.ndim(); ++i) {
(*begin)[i] = 0;
(*end)[i] = dshape[i];
(*step)[i] = 1;
}
return zero_size_shape;
}
inline void SetSliceOpOutputDimSize(const mxnet::TShape& dshape,
const index_t i, const index_t b,
const index_t e, const index_t s,
mxnet::TShape* oshape) {
if (!mxnet::dim_size_is_known(dshape, i)) {
(*oshape)[i] = -1;
return;
}
if (e != b && b >= 0) {
if (s > 0) {
(*oshape)[i] = e > b ? (e - b - 1) / s + 1 : 0;
} else {
(*oshape)[i] = e < b ? (b - e - 1) / (-s) + 1 : 0;
}
} else {
(*oshape)[i] = 0;
}
}
inline bool SliceOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(dshape)) return false;
CHECK_GT(dshape.ndim(), 0) << "slice only works for ndim > 0";
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
mxnet::TShape oshape = dshape;
MXNET_NDIM_SWITCH(dshape.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(dshape, param.begin, param.end, param.step, &begin, &end, &step);
for (int i = 0; i < param.begin.ndim(); ++i) {
const index_t b = begin[i], e = end[i], s = step[i];
SetSliceOpOutputDimSize(dshape, i, b, e, s, &oshape);
}
})
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return shape_is_known(dshape) && shape_is_known(oshape);
}
template<int ndim, int req, typename xpu>
struct slice_forward;
template<int ndim, int req>
struct slice_forward<ndim, req, gpu> {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* data,
const mshadow::Shape<ndim> dshape,
const mshadow::Shape<ndim> oshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = dshape[ndim-1];
const index_t out_last_dim_size = oshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
const index_t j = i % out_last_dim_size;
index_t irow = 0; // row id of flattend 2D data
index_t stride = 1;
index_t idx = i / out_last_dim_size;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % oshape[k]) * step[k] + begin[k]);
idx /= oshape[k];
stride *= dshape[k];
}
KERNEL_ASSIGN(out[i], req,
data[irow * data_last_dim_size + j * step_last_dim + begin_last_dim]);
}
};
template<int ndim, int req>
struct slice_forward<ndim, req, cpu> {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* data,
const mshadow::Shape<ndim> dshape,
const mshadow::Shape<ndim> oshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = dshape[ndim-1];
const index_t out_last_dim_size = oshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
index_t out_offset = i * out_last_dim_size;
for (index_t j = 0; j < out_last_dim_size; ++j) {
index_t irow = 0; // row id of flattend 2D data
index_t stride = 1;
index_t idx = i;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % oshape[k]) * step[k] + begin[k]);
idx /= oshape[k];
stride *= dshape[k];
}
KERNEL_ASSIGN(out[out_offset++], req,
data[irow * data_last_dim_size + j * step_last_dim + begin_last_dim]);
}
}
};
template<typename xpu>
void SliceOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
if (req[0] == kNullOp) return;
using namespace mshadow;
Stream<xpu>* s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& out = outputs[0];
if (out.Size() == 0) return;
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(data.shape_, param.begin, param.end, param.step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH_WITH_BOOL(out.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
size_t num_threads = out.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= out.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_forward<ndim, Req, xpu>, xpu>::Launch(s, num_threads,
out.dptr<DType>(), data.dptr<DType>(),
data.shape_.get<ndim>(), out.shape_.get<ndim>(), begin, step);
})
})
})
}
template<int ndim, int req, typename xpu>
struct slice_assign;
template<int ndim, int req>
struct slice_assign<ndim, req, cpu> {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* val,
const mshadow::Shape<ndim> oshape,
const mshadow::Shape<ndim> vshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = oshape[ndim-1];
const index_t out_last_dim_size = vshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
index_t offset = i * out_last_dim_size;
for (index_t j = 0; j < out_last_dim_size; ++j) {
index_t irow = 0; // row id of flattend 2D out
index_t stride = 1;
index_t idx = i;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % vshape[k]) * step[k] + begin[k]);
idx /= vshape[k];
stride *= oshape[k];
}
KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim],
req, val[offset++]);
}
}
};
template<int ndim, int req>
struct slice_assign<ndim, req, gpu> {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* val,
const mshadow::Shape<ndim> oshape,
const mshadow::Shape<ndim> vshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = oshape[ndim-1];
const index_t out_last_dim_size = vshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
const index_t j = i % out_last_dim_size;
index_t irow = 0; // row id of flattend 2D out
index_t stride = 1;
index_t idx = i / out_last_dim_size;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % vshape[k]) * step[k] + begin[k]);
idx /= vshape[k];
stride *= oshape[k];
}
KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim],
req, val[i]);
}
};
template<typename xpu>
void SliceOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
if (req[0] == kNullOp) return;
using namespace mshadow;
Stream<xpu>* s = ctx.get_stream<xpu>();
const TBlob& ograd = inputs[0];
const TBlob& igrad = outputs[0];
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
if (req[0] == kWriteTo) {
Fill(s, igrad, req[0], 0);
} else if (req[0] == kWriteInplace) {
LOG(FATAL) << "_slice_backward does not support kWriteInplace";
}
if (ograd.Size() == 0) return;
MXNET_NDIM_SWITCH(ograd.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(igrad.shape_, param.begin, param.end, param.step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH(ograd.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
int num_threads = ograd.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= ograd.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(s, num_threads,
igrad.dptr<DType>(), ograd.dptr<DType>(),
igrad.shape_.get<ndim>(), ograd.shape_.get<ndim>(), begin, step);
})
})
})
}
inline bool SliceAssignOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 2U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(dshape)) return false;
mxnet::TShape vshape = dshape; // vshape is the value shape on the right hand side
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
MXNET_NDIM_SWITCH(dshape.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(dshape, param.begin, param.end, param.step, &begin, &end, &step);
for (int i = 0; i < param.begin.ndim(); ++i) {
const int b = begin[i], e = end[i], s = step[i];
SetSliceOpOutputDimSize(dshape, i, b, e, s, &vshape);
}
})
SHAPE_ASSIGN_CHECK(*in_attrs, 1, vshape);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
template<typename xpu>
void SliceAssignOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
CHECK_EQ(inputs.size(), 2U); // data[index] = val, data and val are two inputs
CHECK_EQ(outputs.size(), 1U);
if (req[0] == kNullOp) return;
Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& val = inputs[1];
const TBlob& out = outputs[0];
if (req[0] == kWriteTo) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Tensor<xpu, 1, DType> in = inputs[0].FlatTo1D<xpu, DType>(s);
Tensor<xpu, 1, DType> out = outputs[0].FlatTo1D<xpu, DType>(s);
Copy(out, in, s);
});
} else if (req[0] != kWriteInplace) {
LOG(FATAL) << "_slice_assign only supports kWriteTo and kWriteInplace";
}
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
bool zero_size_shape = GetIndexRange(data.shape_, param.begin, param.end, param.step,
&begin, &end, &step);
if (zero_size_shape) {
return; // slice_assign of zero-sized subspace needs no operation.
}
MSHADOW_TYPE_SWITCH(out.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
int num_threads = val.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= val.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(s, num_threads,
out.dptr<DType>(), val.dptr<DType>(),
out.shape_.get<ndim>(), val.shape_.get<ndim>(), begin, step);
})
})
})
}
struct SliceAssignScalarParam : public dmlc::Parameter<SliceAssignScalarParam> {
double scalar;
mxnet::Tuple<dmlc::optional<index_t>> begin, end;
mxnet::Tuple<dmlc::optional<index_t>> step;
DMLC_DECLARE_PARAMETER(SliceAssignScalarParam) {
DMLC_DECLARE_FIELD(scalar)
.set_default(0)
.describe("The scalar value for assignment.");
DMLC_DECLARE_FIELD(begin)
.describe("starting indices for the slice operation, supports negative indices.");
DMLC_DECLARE_FIELD(end)
.describe("ending indices for the slice operation, supports negative indices.");
DMLC_DECLARE_FIELD(step)
.set_default(mxnet::Tuple<dmlc::optional<index_t>>())
.describe("step for the slice operation, supports negative values.");
}
};
inline bool SliceAssignScalarOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = (*in_attrs)[0];
if (!shape_is_known(dshape)) return false;
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
template<int ndim>
struct slice_assign_scalar {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType val,
const OpReqType req,
const mshadow::Shape<ndim> oshape,
const mshadow::Shape<ndim> vshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = oshape[ndim-1];
const index_t out_last_dim_size = vshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
for (index_t j = 0; j < out_last_dim_size; ++j) {
index_t irow = 0; // row id of flattend 2D out
index_t stride = 1;
index_t idx = i;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % vshape[k]) * step[k] + begin[k]);
idx /= vshape[k];
stride *= oshape[k];
}
KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim], req, val);
}
}
};
template<typename xpu>
void SliceAssignScalarOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
using namespace mshadow;
Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& out = outputs[0];
if (req[0] == kWriteTo) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Tensor<xpu, 1, DType> in = inputs[0].FlatTo1D<xpu, DType>(s);
Tensor<xpu, 1, DType> out = outputs[0].FlatTo1D<xpu, DType>(s);
Copy(out, in, s);
});
} else if (req[0] != kWriteInplace) {
LOG(FATAL) << "_crop_assign_scalar only supports kWriteTo and kWriteInplace";
}
mxnet::TShape vshape = data.shape_;
const SliceAssignScalarParam& param = nnvm::get<SliceAssignScalarParam>(attrs.parsed);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
bool zero_size_shape = GetIndexRange(data.shape_, param.begin, param.end, param.step,
&begin, &end, &step);
if (zero_size_shape) {
return; // slice_assign of zero-sized subspaced needs no operation.
}
for (index_t i = 0; i < param.begin.ndim(); ++i) {
const int b = begin[i], e = end[i], s = step[i];
SetSliceOpOutputDimSize(data.shape_, i, b, e, s, &vshape);
}
MSHADOW_TYPE_SWITCH_WITH_BOOL(out.type_flag_, DType, {
mxnet_op::Kernel<slice_assign_scalar<ndim>, xpu>::Launch(s, vshape.FlatTo2D()[0],
out.dptr<DType>(), static_cast<DType>(param.scalar), req[0],
out.shape_.get<ndim>(), vshape.get<ndim>(), begin, step);
})
})
}
struct SliceAxisParam : public dmlc::Parameter<SliceAxisParam> {
int axis;
index_t begin;
dmlc::optional<index_t> end;
DMLC_DECLARE_PARAMETER(SliceAxisParam) {
DMLC_DECLARE_FIELD(axis)
.describe("Axis along which to be sliced, supports negative indexes.");
DMLC_DECLARE_FIELD(begin)
.describe("The beginning index along the axis to be sliced, "
" supports negative indexes.");
DMLC_DECLARE_FIELD(end)
.describe("The ending index along the axis to be sliced, "
" supports negative indexes.");
}
};
inline void GetSliceAxisParams(const SliceAxisParam& param, const mxnet::TShape& ishape,
int* axis, index_t* begin, index_t* end) {
*axis = param.axis;
if (*axis < 0) {
*axis += ishape.ndim();
}
CHECK(*axis < ishape.ndim() && *axis >= 0) <<
"Transformed axis must be smaller than the source ndim and larger than zero! Recieved axis=" <<
param.axis << ", src_ndim=" << ishape.ndim() << ", transformed axis=" << *axis;
index_t axis_size = static_cast<index_t>(ishape[*axis]);
*begin = param.begin;
*end = -1;
if (*begin < 0) {
*begin += axis_size;
}
if (axis_size > 0) {
if (!static_cast<bool>(param.end)) {
*end = axis_size;
} else {
*end = param.end.value();
if (*end < 0) {
*end += axis_size;
}
}
CHECK(*end <= axis_size) << "Invalid end for end=" << *end << " as axis_size is " << axis_size;
CHECK((*begin < *end))
<< "Invalid begin, end, get begin=" << param.begin << ", end=" << param.end;
} else {
*begin = 0;
*end = 0;
}
CHECK(*end >= 0)
<< "Invalid begin, end, get begin=" << param.begin << ", end=" << param.end;
CHECK(*begin >= 0) << "Invalid begin for begin=" << param.begin;
}
inline bool SliceAxisShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& ishape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(ishape)) return false;
int axis;
index_t begin, end;
GetSliceAxisParams(param, ishape, &axis, &begin, &end);
if (!mxnet::dim_size_is_known(ishape, axis)) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ishape);
return false;
}
mxnet::TShape shape(ishape.ndim(), -1);
for (int i = 0; i < ishape.ndim(); ++i) {
if (i == axis) {
shape[i] = static_cast<index_t>(end - begin);
} else {
shape[i] = ishape[i];
}
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
return shape_is_known(shape);
}
template<typename xpu>
void SliceAxis(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow::expr;
const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed);
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
int axis;
index_t begin, end;
GetSliceAxisParams(param, inputs[0].shape_, &axis, &begin, &end);
int ndim = outputs[0].ndim();
if (axis + 1 == ndim) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 2, DType> in =
inputs[0].FlatTo2D<xpu, DType>(s);
mshadow::Tensor<xpu, 2, DType> out =
outputs[0].FlatTo2D<xpu, DType>(s);
ASSIGN_DISPATCH(out, req[0], slice<1>(in, begin, end));
});
} else {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 3, DType> in =
inputs[0].FlatTo3D<xpu, DType>(axis, s);
mshadow::Tensor<xpu, 3, DType> out =
outputs[0].FlatTo3D<xpu, DType>(axis, s);
ASSIGN_DISPATCH(out, req[0], slice<1>(in, begin, end));
});
}
}
// Backward pass of broadcast over the given axis
template<typename xpu>
void SliceAxisGrad_(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
if (outputs[0].shape_.Size() == 0) {
return;
}
const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed);
using namespace mshadow::op;
using namespace mshadow::expr;
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
int axis;
index_t begin, end;
GetSliceAxisParams(param, outputs[0].shape_, &axis, &begin, &end);
int ndim = outputs[0].shape_.ndim();
if (axis + 1 == ndim) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 2, DType> ograd =
inputs[0].FlatTo2D<xpu, DType>(s);
mshadow::Tensor<xpu, 2, DType> igrad =
outputs[0].FlatTo2D<xpu, DType>(s);
if (req[0] == kAddTo) {
slice<1>(igrad, begin, end) += F<identity>(ograd);
} else if (req[0] == kWriteTo) {
igrad = 0.0f;
slice<1>(igrad, begin, end) = F<identity>(ograd);
} else {
CHECK_EQ(req[0], kNullOp);
}
});
} else {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 3, DType> ograd =
inputs[0].FlatTo3D<xpu, DType>(axis, s);
mshadow::Tensor<xpu, 3, DType> igrad =
outputs[0].FlatTo3D<xpu, DType>(axis, s);
if (req[0] == kAddTo) {
slice<1>(igrad, begin, end) += F<identity>(ograd);
} else if (req[0] == kWriteTo) {
igrad = 0.0f;
slice<1>(igrad, begin, end) = F<identity>(ograd);
} else {
CHECK_EQ(req[0], kNullOp);
}
});
}
}
struct SliceLikeParam : public dmlc::Parameter<SliceLikeParam> {
mxnet::Tuple<int> axes;
DMLC_DECLARE_PARAMETER(SliceLikeParam) {
DMLC_DECLARE_FIELD(axes).set_default(mxnet::Tuple<int>())
.describe("List of axes on which input data will be sliced according to the "
"corresponding size of the second input. By default will slice on "
"all axes. Negative axes are supported.");
}
};
inline bool SliceLikeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 2U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& ishape = (*in_attrs)[0];
mxnet::TShape& from_shape = (*in_attrs)[1];
if (!mxnet::ndim_is_known(ishape) || !mxnet::ndim_is_known(from_shape)) {
return false;
}
if (param.axes.ndim() == 0) {
CHECK_EQ(ishape.ndim(), from_shape.ndim())
<< "By default slice_axis performs slice on all axes, but ndim mismatch "
"for inputs: " << ishape.ndim() << " vs. " << from_shape.ndim();
for (int i = 0; i < ishape.ndim(); ++i) {
CHECK_GE(ishape[i], from_shape[i])
<< "Slice axis " << i << " with size " << from_shape[i]
<< "exceeds limit of input with size " << ishape[i];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, from_shape);
} else {
mxnet::TShape shape(ishape);
for (int i = 0; i < param.axes.ndim(); ++i) {
int axis = param.axes[i];
if (axis < 0) {
axis += ishape.ndim();
}
CHECK_GE(axis, 0)
<< "Slice axis: " << param.axes[i] << " too small";
CHECK_GT(ishape.ndim(), axis)
<< "Slice axis: " << axis << " exceeds first input: " << ishape.ndim();
CHECK_GT(from_shape.ndim(), axis)
<< "Slice axis: " << axis << " exceeds second input: " << from_shape.ndim();
shape[axis] = from_shape[axis];
CHECK_GE(ishape[axis], from_shape[axis])
<< "Slice axis " << axis << " with size " << from_shape[axis]
<< "exceeds limit of input with size " << ishape[axis];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
}
return true;
}
inline void SliceLikeInferRanges(const mxnet::TShape& dshape,
const mxnet::TShape& fshape,
const mxnet::Tuple<int>& axes,
mxnet::Tuple<dmlc::optional<index_t>>* param_begin,
mxnet::Tuple<dmlc::optional<index_t>>* param_end,
mxnet::Tuple<dmlc::optional<index_t>>* param_step) {
std::vector<dmlc::optional<index_t>> pb(dshape.ndim());
std::vector<dmlc::optional<index_t>> pe(dshape.ndim());
std::vector<dmlc::optional<index_t>> ps(dshape.ndim());
if (axes.ndim() == 0) {
for (int i = 0; i < dshape.ndim(); ++i) {
pb[i] = 0;
pe[i] = fshape[i];
ps[i] = 1;
}
} else {
for (int i = 0; i < axes.ndim(); ++i) {
int axis = axes[i];
if (axis < 0) {
axis += dshape.ndim();
}
CHECK_GE(axis, 0)
<< "Slice axis: " << axes[i] << " too small";
CHECK_LT(axis, dshape.ndim())
<< "Slice axis: " << axis << " exceeds first input: " << dshape.ndim();
CHECK_LT(axis, fshape.ndim())
<< "Slice axis: " << axis << " exceeds first input: " << fshape.ndim();
pb[axis] = 0;
pe[axis] = fshape[axis];
ps[axis] = 1;
}
}
*param_begin = mxnet::Tuple<dmlc::optional<index_t>>(pb.begin(), pb.end());
*param_end = mxnet::Tuple<dmlc::optional<index_t>>(pe.begin(), pe.end());
*param_step = mxnet::Tuple<dmlc::optional<index_t>>(ps.begin(), ps.end());
}
template<typename xpu>
void SliceLikeForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
using namespace mshadow::expr;
const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed);
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& out = outputs[0];
const mxnet::TShape& ishape = data.shape_;
const mxnet::TShape& from_shape = inputs[1].shape_;
mxnet::Tuple<dmlc::optional<index_t>> param_begin;
mxnet::Tuple<dmlc::optional<index_t>> param_end;
mxnet::Tuple<dmlc::optional<index_t>> param_step;
SliceLikeInferRanges(ishape, from_shape, param.axes, ¶m_begin, ¶m_end, ¶m_step);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(data.shape_, param_begin, param_end, param_step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH(out.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
int num_threads = out.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= out.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_forward<ndim, Req, xpu>, xpu>::Launch(s,
num_threads, out.dptr<DType>(), data.dptr<DType>(),
data.shape_.get<ndim>(), out.shape_.get<ndim>(), begin, step);
})
})
})
}
template<typename xpu>
void SliceLikeBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 2U);
CHECK_EQ(req.size(), 2U);
using namespace mshadow;
Stream<xpu>* s = ctx.get_stream<xpu>();
if (req[1] != kNullOp && req[1] != kAddTo) {
Fill(s, outputs[1], req[1], 0); // Second input not relavant to gradients.
}
if (req[0] == kNullOp) return;
const TBlob& ograd = inputs[0];
const TBlob& igrad = outputs[0];
const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed);
if (req[0] == kWriteTo) {
Fill(s, igrad, req[0], 0);
} else if (req[0] == kWriteInplace) {
LOG(FATAL) << "_slice_like_backward does not support kWriteInplace";
}
const mxnet::TShape& ishape = ograd.shape_;
const mxnet::TShape& from_shape = outputs[1].shape_;
mxnet::Tuple<dmlc::optional<index_t>> param_begin;
mxnet::Tuple<dmlc::optional<index_t>> param_end;
mxnet::Tuple<dmlc::optional<index_t>> param_step;
SliceLikeInferRanges(ishape, from_shape, param.axes, ¶m_begin, ¶m_end, ¶m_step);
MXNET_NDIM_SWITCH(ograd.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(ograd.shape_, param_begin, param_end, param_step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH(ograd.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
int num_threads = ograd.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= ograd.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(s, num_threads,
igrad.dptr<DType>(), ograd.dptr<DType>(),
igrad.shape_.get<ndim>(), ograd.shape_.get<ndim>(), begin, step);
})
})
})
}
struct ClipParam : public dmlc::Parameter<ClipParam> {
real_t a_min, a_max;
DMLC_DECLARE_PARAMETER(ClipParam) {
DMLC_DECLARE_FIELD(a_min)
.describe("Minimum value");
DMLC_DECLARE_FIELD(a_max)
.describe("Maximum value");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream a_min_s, a_max_s;
a_min_s << a_min;
a_max_s << a_max;
(*dict)["a_min"] = a_min_s.str();
(*dict)["a_max"] = a_max_s.str();
}
};
struct clip {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* datas,
const float a_min, const float a_max) {
DType data = datas[i];
if (data > a_max) {
out[i] = a_max;
} else if (data < a_min) {
out[i] = a_min;
} else {
out[i] = data;
}
}
};
struct clip_grad {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* grad, const DType* datas,
const float a_min, const float a_max) {
DType data = datas[i];
if (data > a_max) {
out[i] = 0;
} else if (data < a_min) {
out[i] = 0;
} else {
out[i] = grad[i];
}
}
};
template<typename xpu>
void Clip(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
const ClipParam& param = nnvm::get<ClipParam>(attrs.parsed);
CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_);
Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mxnet_op::Kernel<mxnet::op::clip, xpu>::Launch(s, outputs[0].Size(), outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(),
param.a_min, param.a_max);
});
}
template<typename xpu>
void ClipEx(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
CHECK_EQ(inputs[0].dtype(), outputs[0].dtype());
CHECK_EQ(inputs[0].storage_type(), outputs[0].storage_type());
CHECK_NE(inputs[0].storage_type(), kDefaultStorage);
UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Clip<xpu>);
}
template<typename xpu>
void ClipGrad_(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mxnet_op;
const ClipParam& param = nnvm::get<ClipParam>(attrs.parsed);
CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_);
Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Kernel<clip_grad, xpu>::Launch(s, outputs[0].Size(), outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>(), param.a_min, param.a_max);
});
}
/*!
* \brief The parameters of the repeat operator include
* the number of repeating time and axis (optional).
* The parameters will be later used to deduce the
* output ndarray shape in bool RepeatShape() function.
*/
struct RepeatParam : public dmlc::Parameter<RepeatParam> {
int repeats = 1;
dmlc::optional<int> axis;
DMLC_DECLARE_PARAMETER(RepeatParam) {
DMLC_DECLARE_FIELD(repeats)
.describe("The number of repetitions for each element.");
DMLC_DECLARE_FIELD(axis)
.set_default(dmlc::optional<int>())
.describe("The axis along which to repeat values."
" The negative numbers are interpreted counting from the backward."
" By default, use the flattened input array,"
" and return a flat output array.");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream repeats_s, axis_s;
repeats_s << repeats;
axis_s << axis;
(*dict)["repeats"] = repeats_s.str();
(*dict)["axis"] = axis_s.str();
}
};
/*!
* \brief Helper function for getting user input params for the operator repeat.
* Sanity check the user input values.
*/
inline void GetRepeatParams(const RepeatParam& param, const mxnet::TShape& ishape,
int* repeats, dmlc::optional<int>* axisOpt) {
*repeats = param.repeats;
CHECK_GE(*repeats, 0) << "repeats cannot be a negative number";
*axisOpt = param.axis;
if (static_cast<bool>(*axisOpt)) {
int ndims = ishape.ndim();
int axis = axisOpt->value();
if (axis < 0) {
axis += ndims;
}
CHECK(axis >= 0 && axis < ndims) << "axis = " << axisOpt->value() << " out of bounds";
}
}
inline bool RepeatOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& ishape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(ishape)) {
return false;
}
int repeats = 0;
dmlc::optional<int> axisOpt;
GetRepeatParams(param, ishape, &repeats, &axisOpt);
// If 0 repeats, return an empty 1-dim, 0-size array
if (0 == repeats) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape(1, 0));
return true;
}
// If repeats > 0, multiply the size of the corresponding axis by repeats
if (static_cast<bool>(axisOpt)) {
int ndims = ishape.ndim();
int axis = axisOpt.value();
if (axis < 0) {
axis += ndims;
}
mxnet::TShape shape(ishape.ndim(), -1);
for (int i = 0; i < ishape.ndim(); ++i) {
if (i == axis) {
shape[i] = repeats * ishape[i];
} else {
shape[i] = ishape[i];
}
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
} else { // If axis is not input by user, return a flat 1D array of size = in.size*repeats
mxnet::TShape shape(1, ishape.Size() * repeats);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
}
return shape_is_known(out_attrs->at(0));
}
inline bool RepeatOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
if ((*in_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, (*in_attrs)[0]);
} else if ((*out_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*in_attrs, 0, (*out_attrs)[0]);
}
return true;
}
/*!
* \brief Reshape the input and output tensors for
* using broadcast_to to achieve the funcitonality
* of operator repeat.
* \return a pair of mxnet::TShape's, first is the reshaped
* input shape, second is the reshaped output shape.
*/
inline std::pair<mxnet::TShape, mxnet::TShape> ReshapeInputOutputForRepeatOp(
const mxnet::TShape& ishape,
const dmlc::optional<int>& axisOpt,
const int repeats) {
if (static_cast<bool>(axisOpt)) {
int axis = axisOpt.value();
int ndim = ishape.ndim();
if (axis < 0) {
axis += ndim;
}
CHECK(axis >= 0 && axis < ishape.ndim()) << "Invalid input of axis";
// reshape the input tensor by adding a dim at the (axis+1)-th dim
mxnet::TShape rshape(ishape.ndim()+1, 1);
// the shape we want to broadcast to
mxnet::TShape bshape(rshape.ndim(), 1);
int i = 0;
while (i <= axis) {
rshape[i] = bshape[i] = ishape[i];
++i;
}
rshape[i] = 1;
bshape[i] = repeats;
while (i < ishape.ndim()) {
rshape[i+1] = ishape[i];
bshape[i+1] = ishape[i];
++i;
}
return std::make_pair(rshape, bshape);
} else {
// axis is not input by user
// reshape the tensor into shape (ishape.Size(), 1)
// then add one dim at axis = 1 and broadcast to
// shape (ishape.Size(), repeats)
mxnet::TShape rshape(2, 1);
rshape[0] = ishape.Size();
rshape[1] = 1;
mxnet::TShape bshape(2, 1);
bshape[0] = rshape[0];
bshape[1] = repeats;
return std::make_pair(rshape, bshape);
}
}
template<typename xpu>
void RepeatOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const TBlob& iTBlob = inputs[0];
const mxnet::TShape& ishape = iTBlob.shape_;
if (!shape_is_known(ishape)) return;
int repeats = 0;
dmlc::optional<int> axisOpt;
const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed);
GetRepeatParams(param, ishape, &repeats, &axisOpt);
if (0 == repeats) return;
std::pair<mxnet::TShape, mxnet::TShape> rshapes = \
ReshapeInputOutputForRepeatOp(ishape, axisOpt, repeats);
// reshaped input tblob
TBlob iblob(inputs[0].dptr_, rshapes.first, inputs[0].dev_mask(),
inputs[0].type_flag_, inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
// reshaped output tblob
TBlob oblob(outputs[0].dptr_, rshapes.second, outputs[0].dev_mask(),
outputs[0].type_flag_, outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
BroadcastCompute<xpu>(attrs, ctx, newInputs, req, newOutputs);
}
/*!
* \brief Compute the gradient of the loss function
* with respect to the input of the operator.
* Backpropagation is employed to implement the
* chain rule.
* \param inputs the gradient of the loss function
* with respect to the outputs of the operator
* \param outputs the gradient of the loss function
* with respect to the inputs of the operator
*/
template<typename xpu>
void RepeatOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
const mxnet::TShape& oshape = outputs[0].shape_;
if (!shape_is_known(oshape)) return;
int repeats = 0;
dmlc::optional<int> axisOpt;
const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed);
GetRepeatParams(param, oshape, &repeats, &axisOpt);
if (0 == repeats) return;
std::pair<mxnet::TShape, mxnet::TShape> rshapes =
ReshapeInputOutputForRepeatOp(oshape, axisOpt, repeats);
// reshaped output grad tblob
TBlob oblob(outputs[0].dptr_, rshapes.first, outputs[0].dev_mask(),
outputs[0].type_flag_, outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
// reshaped input grad tblob
TBlob iblob(inputs[0].dptr_, rshapes.second, inputs[0].dev_mask(),
inputs[0].type_flag_, inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
ReduceAxesComputeImpl<xpu, mshadow::red::sum, false, false>(
ctx, newInputs, req, newOutputs, rshapes.first);
}
struct TileParam : public dmlc::Parameter<TileParam> {
mxnet::Tuple<int> reps;
DMLC_DECLARE_PARAMETER(TileParam) {
DMLC_DECLARE_FIELD(reps)
.describe("The number of times for repeating the tensor a. Each dim size of reps"
" must be a positive integer."
" If reps has length d, the result will have dimension of max(d, a.ndim);"
" If a.ndim < d, a is promoted to be d-dimensional by prepending new axes."
" If a.ndim > d, reps is promoted to a.ndim by pre-pending 1's to it.");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream reps_s;
reps_s << reps;
(*dict)["reps"] = reps_s.str();
}
};
inline bool TileOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const TileParam& param = nnvm::get<TileParam>(attrs.parsed);
const mxnet::TShape& ishape = (*in_attrs)[0];
if (!shape_is_known(ishape)) {
return false;
}
const mxnet::Tuple<int>& reps = param.reps;
// If reps is empty, return a identical input array
if (reps.ndim() == 0) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ishape);
return true;
}
mxnet::TShape oshape(std::max(ishape.ndim(), reps.ndim()), -1);
int i1 = ishape.ndim() - 1;
int i2 = reps.ndim() - 1;
for (int i = oshape.ndim() - 1; i >= 0; --i) {
if (i1 >= 0 && i2 >= 0) {
oshape[i] = ishape[i1--] * reps[i2--];
} else if (i1 >= 0) {
oshape[i] = ishape[i1--];
} else if (i2 >= 0) {
oshape[i] = reps[i2--];
}
}
// If reps contains 0s, oshape is a zero-size shape.
// Need to distinguish between np_shape mode and legacy mode.
if (!Imperative::Get()->is_np_shape()) {
common::ConvertToNumpyShape(&oshape);
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return shape_is_known(oshape);
}
inline bool TileOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
if ((*in_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, (*in_attrs)[0]);
} else if ((*out_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*in_attrs, 0, (*out_attrs)[0]);
}
return true;
}
/*!
* \brief Reshape the input and output tensors for
* using broadcast_to to achieve the functionality
* of operator tile.
* \return a pair of mxnet::TShape's, first is the reshaped
* input shape, second is the reshaped output shape.
*/
inline std::pair<mxnet::TShape, mxnet::TShape> ReshapeInputOutputForTileOp(
const mxnet::TShape& ishape,
const mxnet::Tuple<int>& reps) {
if (reps.ndim() == 0) {
return std::make_pair(ishape, ishape);
}
// The shape we want to broadcast to
mxnet::TShape bshape(std::max(ishape.ndim(), reps.ndim()) * 2, 1);
// The shape of the input tensor after adding new axes before each dim
mxnet::TShape rshape(bshape.ndim(), 1);
int i1 = ishape.ndim() - 1;
int i2 = reps.ndim() - 1;
for (int i = bshape.ndim() - 1; i >= 0; --i) {
if (0 == (i & 1)) {
bshape[i] = (i2 >= 0? reps[i2--] : 1);
rshape[i] = 1;
} else {
rshape[i] = bshape[i] = (i1 >= 0? ishape[i1--] : 1);
}
}
return std::make_pair(rshape, bshape);
}
/*!
* \brief Implementation of tiling the input tensor a based
* on the user-input shape, reps.
* If a.ndim < reps.ndim, new axes are pre-pended to a. For example,
* the input tensor has shape (3,), and the reps is (2, 4); the input
* tensor would be reshaped to (1, 3).
* If a.ndim > reps.ndim, pre-pending 1's to reps. For example,
* the input tensor has shape (2, 3, 4, 5), and reps is (2, 2);
* the reps would be changed to (1, 1, 2, 2).
* Suppose we have a.ndim = reps.ndim now. To achieve tiling,
* we utilize the operator broadcast_to. For example, for a tensor
* of shape (2, 3, 4, 5) and reps (2, 8, 9, 3), we first reshape
* the tensor to the shape (1, 2, 1, 3, 1, 4, 1, 5) by adding
* one axis before each dimension. Then, we want to broadcast
* the new tensor to shape (2, 2, 8, 3, 9, 4, 3, 5). The final
* output tensor would have shape (2*2, 8*3, 9*4, 3*5).
*/
template<typename xpu>
void TileOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
if (inputs[0].Size() == 0) return;
const mxnet::TShape& ishape = inputs[0].shape_;
const mxnet::Tuple<int>& reps = nnvm::get<TileParam>(attrs.parsed).reps;
// If any one of the number in reps is zero, return immediately
for (int i = 0; i < reps.ndim(); ++i) {
if (0 == reps[i]) return;
}
std::pair<mxnet::TShape, mxnet::TShape> rshapes = ReshapeInputOutputForTileOp(ishape, reps);
// reshaped input tblob
TBlob iblob(inputs[0].dptr_, rshapes.first, inputs[0].dev_mask(),
inputs[0].type_flag_, inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
// reshaped output tblob
TBlob oblob(outputs[0].dptr_, rshapes.second, outputs[0].dev_mask(),
outputs[0].type_flag_, outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
BroadcastCompute<xpu>(attrs, ctx, newInputs, req, newOutputs);
}
/*!
* \brief Compute the gradient of the loss function
* with respect to the input of the operator.
* Backpropagation is employed to implement the
* chain rule.
* \param inputs the gradient of the loss function
* with respect to the outputs of the operator
* \param outputs the gradient of the loss function
* with respect to the inputs of the operator
*/
template<typename xpu>
void TileOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
if (inputs[0].Size() == 0) return;
const mxnet::TShape& oshape = outputs[0].shape_;
const mxnet::Tuple<int>& reps = nnvm::get<TileParam>(attrs.parsed).reps;
// If any one of the number in reps is zero, return immediately
for (int i = 0; i < reps.ndim(); ++i) {
if (0 == reps[i]) return;
}
std::pair<mxnet::TShape, mxnet::TShape> rshapes = ReshapeInputOutputForTileOp(oshape, reps);
// reshaped output grad tblob
TBlob oblob(outputs[0].dptr_, rshapes.first, outputs[0].dev_mask(),
outputs[0].type_flag_, outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
// reshaped input grad tblob
TBlob iblob(inputs[0].dptr_, rshapes.second, inputs[0].dev_mask(),
inputs[0].type_flag_, inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
ReduceAxesComputeImpl<xpu, mshadow::red::sum, false, false>(
ctx, newInputs, req, newOutputs, rshapes.first);
}
struct ReverseParam : public dmlc::Parameter<ReverseParam> {
mxnet::Tuple<int> axis;
DMLC_DECLARE_PARAMETER(ReverseParam) {
DMLC_DECLARE_FIELD(axis)
.describe("The axis which to reverse elements.");
}
};
#define REVERSE_MAX_DIM 10U
struct reverse {
MSHADOW_XINLINE static index_t ReverseIndex(index_t idx,
index_t nreversedim,
const index_t * stride_,
const index_t * trailing_) {
index_t outputIndex = idx;
for (index_t i = 0; i < nreversedim; ++i) {
const index_t low = outputIndex % trailing_[i];
index_t high = outputIndex / trailing_[i];
const index_t x = high%stride_[i];
high /= stride_[i];
outputIndex = (high*stride_[i] + stride_[i] - 1 - x)*trailing_[i] + low;
}
return outputIndex;
}
#ifdef __CUDACC__
template<typename DType>
__device__ static void Map(index_t index, index_t nreversedim, const DType *src, DType *dst,
const index_t * stride_,
const index_t * trailing_) {
__shared__ index_t stride_share[REVERSE_MAX_DIM];
__shared__ index_t trailing_share[REVERSE_MAX_DIM];
if (threadIdx.x < REVERSE_MAX_DIM) {
stride_share[threadIdx.x] = stride_[threadIdx.x];
trailing_share[threadIdx.x] = trailing_[threadIdx.x];
}
__syncthreads();
index_t new_idx = ReverseIndex(index, nreversedim, stride_share, trailing_share);
dst[new_idx] = src[index];
}
#else
template<typename DType>
MSHADOW_XINLINE static void Map(index_t index, index_t nreversedim, const DType *src, DType *dst,
const index_t * stride_,
const index_t * trailing_) {
index_t new_idx = ReverseIndex(index, nreversedim, stride_, trailing_);
dst[new_idx] = src[index];
}
#endif
};
template<typename xpu>
void ReverseOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mxnet_op;
const ReverseParam& param = nnvm::get<ReverseParam>(attrs.parsed);
CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_);
CHECK_LT(param.axis.ndim(), REVERSE_MAX_DIM);
Stream<xpu> *s = ctx.get_stream<xpu>();
const mxnet::TShape& ishape = inputs[0].shape_;
std::vector<index_t> stride_(param.axis.ndim());
std::vector<index_t> trailing_(param.axis.ndim());
index_t reverse_index = 0;
for (int axis : param.axis) {
CHECK_LT(axis, ishape.ndim());
stride_[reverse_index] = ishape[axis];
trailing_[reverse_index] = 1;
for (int i2 = axis + 1; i2 < ishape.ndim(); ++i2) {
trailing_[reverse_index] *= ishape[i2];
}
reverse_index++;
}
#ifdef __CUDACC__
mshadow::Tensor<xpu, 1, uint8_t> workspace =
ctx.requested[0].get_space_typed<xpu, 1, uint8_t>(
mshadow::Shape1(reverse_index * sizeof(index_t) * 2), s);
auto stride_workspace = workspace.dptr_;
auto trailing_workspace = workspace.dptr_ + reverse_index * sizeof(index_t);
cudaMemcpyAsync(stride_workspace, thrust::raw_pointer_cast(stride_.data()),
stride_.size() * sizeof(index_t),
cudaMemcpyHostToDevice, mshadow::Stream<gpu>::GetStream(s));
cudaMemcpyAsync(trailing_workspace, thrust::raw_pointer_cast(trailing_.data()),
trailing_.size() * sizeof(index_t),
cudaMemcpyHostToDevice, mshadow::Stream<gpu>::GetStream(s));
#endif
#ifdef __CUDACC__
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Kernel<reverse, xpu>::Launch(s, inputs[0].Size(), reverse_index,
inputs[0].dptr<DType>(), outputs[0].dptr<DType>(),
reinterpret_cast<index_t*>(stride_workspace), reinterpret_cast<index_t*>(trailing_workspace));
});
#else
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Kernel<reverse, xpu>::Launch(s, inputs[0].Size(), reverse_index,
inputs[0].dptr<DType>(), outputs[0].dptr<DType>(),
stride_.data(), trailing_.data());
});
#endif
}
struct StackParam : public dmlc::Parameter<StackParam> {
int axis;
int num_args;
DMLC_DECLARE_PARAMETER(StackParam) {
DMLC_DECLARE_FIELD(axis)
.set_default(0)
.describe("The axis in the result array along which the input arrays are stacked.");
DMLC_DECLARE_FIELD(num_args).set_lower_bound(1)
.describe("Number of inputs to be stacked.");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream axis_s, num_args_s;
axis_s << axis;
num_args_s << num_args;
(*dict)["axis"] = axis_s.str();
(*dict)["num_args"] = num_args_s.str();
}
};
inline bool StackOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const StackParam& param = dmlc::get<StackParam>(attrs.parsed);
mxnet::TShape dshape;
for (const mxnet::TShape& i : (*in_attrs)) {
shape_assign(&dshape, i);
}
if (!shape_is_known(dshape)) return false;
mxnet::TShape oshape(dshape.ndim() + 1, -1);
int axis = CheckAxis(param.axis, oshape.ndim());
for (int i = 0; i < axis; ++i) {
oshape[i] = dshape[i];
}
oshape[axis] = param.num_args;
for (index_t i = axis + 1; i < oshape.ndim(); ++i) {
oshape[i] = dshape[i-1];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return shape_is_known(oshape);
}
template<typename xpu>
void StackOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
const StackParam& param = dmlc::get<StackParam>(attrs.parsed);
int axis = CheckAxis(param.axis, outputs[0].ndim());
Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH_WITH_BOOL(outputs[0].type_flag_, DType, {
std::vector<Tensor<xpu, 3, DType> > data(inputs.size());
Tensor<xpu, 3, DType> out;
size_t leading = 1, trailing = 1;
for (int i = 0; i < axis; ++i) {
leading *= outputs[0].shape_[i];
}
for (int i = axis + 1; i < outputs[0].ndim(); ++i) {
trailing *= outputs[0].shape_[i];
}
size_t mid = outputs[0].shape_[axis];
Shape<3> oshape = Shape3(leading, mid, trailing);
out = outputs[0].get_with_shape<xpu, 3, DType>(oshape, s);
for (size_t i = 0; i < inputs.size(); ++i) {
Shape<3> dshape = Shape3(leading, 1, trailing);
data[i] = inputs[i].get_with_shape<xpu, 3, DType>(dshape, s);
}
Concatenate(data, &out, 1, req[0]);
})
}
template<typename xpu>
void StackOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
const StackParam& param = dmlc::get<StackParam>(attrs.parsed);
int axis = CheckAxis(param.axis, inputs[0].ndim());
Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[0].type_flag_, DType, {
std::vector<Tensor<xpu, 3, DType> > grad_in(outputs.size());
Tensor<xpu, 3, DType> grad;
size_t leading = 1, trailing = 1;
for (int i = 0; i < axis; ++i) {
leading *= inputs[0].shape_[i];
}
for (int i = axis + 1; i < inputs[0].ndim(); ++i) {
trailing *= inputs[0].shape_[i];
}
size_t mid = inputs[0].shape_[axis];
Shape<3> oshape = Shape3(leading, mid, trailing);
grad = inputs[0].get_with_shape<xpu, 3, DType>(oshape, s);
for (size_t i = 0; i < outputs.size(); ++i) {
Shape<3> dshape = Shape3(leading, 1, trailing);
grad_in[i] = outputs[i].get_with_shape<xpu, 3, DType>(dshape, s);
}
Split(grad, &grad_in, 1, req);
})
}
struct SqueezeParam : public dmlc::Parameter<SqueezeParam> {
dmlc::optional<mxnet::Tuple<int>> axis;
DMLC_DECLARE_PARAMETER(SqueezeParam) {
DMLC_DECLARE_FIELD(axis)
.set_default(dmlc::optional<mxnet::Tuple<int>>())
.describe("Selects a subset of the single-dimensional entries in the shape."
" If an axis is selected with shape entry greater than one, an error is raised.");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream axis_s;
axis_s << axis;
(*dict)["axis"] = axis_s.str();
}
};
// Given a shape that may have dim size equal to 0,
// move all the zeros to the last of the shape array
// and keep the relative order of the non-zero values.
// Returns the new shape size after moving all zeros to the end.
inline size_t SqueezeShapeHelper(mxnet::TShape* shape) {
CHECK(shape != nullptr);
size_t count = 0;
for (int i = 0; i < shape->ndim(); ++i) {
if ((*shape)[i] == -1) {
++count;
} else {
std::swap((*shape)[i], (*shape)[i-count]);
}
}
return shape->ndim() - count;
}
inline bool SqueezeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const SqueezeParam& param = nnvm::get<SqueezeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]";
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = in_attrs->at(0);
const int dndim = dshape.ndim();
if (!shape_is_known(dshape)) return false;
mxnet::TShape oshape = dshape;
if (param.axis.has_value()) {
// preprocess axis
mxnet::Tuple<int> axes = param.axis.value();
for (int i = 0; i < axes.ndim(); ++i) {
if (axes[i] < 0) {
axes[i] += dndim;
CHECK_GE(axes[i], 0)
<< "axis " << axes[i] - dndim << " is out of bounds for array of dimension " << dndim;
}
CHECK_LT(axes[i], dndim)
<< "axis " << axes[i] << " is out of bounds for array of dimension " << dndim;
CHECK_EQ(dshape[axes[i]], 1)
<< "cannot select an axis to squeeze out which has size="
<< dshape[axes[i]] << " not equal to one";
CHECK_NE(oshape[axes[i]], -1) << "duplicate value in axis";
oshape[axes[i]] = -1;
}
} else {
for (int i = 0; i < oshape.ndim(); ++i) {
if (oshape[i] == 1) oshape[i] = -1;
}
}
size_t oshape_size = SqueezeShapeHelper(&oshape);
if (oshape_size == 0) { // corner case when dshape is (1, 1, 1, 1)
oshape[0] = 1;
oshape_size = 1;
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape(oshape.data(), oshape.data()+oshape_size));
return true;
}
struct DepthToSpaceParam : public dmlc::Parameter<DepthToSpaceParam> {
int block_size;
DMLC_DECLARE_PARAMETER(DepthToSpaceParam) {
DMLC_DECLARE_FIELD(block_size)
.describe("Blocks of [block_size. block_size] are moved");
}
};
inline bool DepthToSpaceOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
CHECK_EQ(in_attrs->at(0).ndim(), 4) << "Operation Depth To Space requires exactly 4D tensor";
mxnet::TShape expected_out(4, -1);
mxnet::TShape& in_shape = in_attrs->at(0);
if (!mxnet::ndim_is_known(in_shape)) {
return false;
}
int block = param.block_size;
CHECK_NE(block, 0) << "block_size must be a positive integer value";
CHECK_NE(in_shape[1], 0) << "Depth dimension:1 cannot be 0";
CHECK_EQ(in_shape[1] % (block * block), 0)
<< "Cannot perform Depth To Space operation on the specified tensor."
" Dimension:1(depth dimension) should be a multiple of 'block^2'";
CHECK_NE(in_shape[0], 0)
<< "Operation requires a 4D tensor. Size of dimension:0 cannot be 0";
CHECK_NE(in_shape[2], 0)
<< "Operation requires a 4D tensor. Size of dimension:2 cannot be 0";
CHECK_NE(in_shape[3], 0)
<< "Operation requires a 4D tensor. Size of dimension:3 cannot be 0";
expected_out[0] = in_shape[0];
expected_out[1] = in_shape[1] / (block * block);
int i = 2;
while (i < expected_out.ndim()) {
expected_out[i] = in_shape[i] * block;
++i;
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, expected_out);
return shape_is_known(expected_out);
}
inline bool DepthToSpaceOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
return out_attrs->at(0) != -1;
}
/*!
* \brief This function updates the value of input index from where the data element
* needs to be fetched and written out to the ith location in output tensor
* \param index_position index within offset array to get offset of given dimension
* \param dim_size size of current dimension
* \param idx output tensor index
* \param inp_index index within input tensor from where value is retrieved
* \param offset_arr array containing the linear offset of input tensor
*/
MSHADOW_XINLINE void update_index(index_t index_position, index_t dim_size, index_t *idx,
index_t *inp_index, const index_t* offset_arr) {
index_t next_idx_val = *idx / dim_size;
*inp_index += (*idx - next_idx_val * dim_size) * offset_arr[index_position];
*idx = next_idx_val;
}
/*!
* \brief This function performs the tensor transpose (0, 1, 2, 3, 4, 5) ->
* (0, 3, 4, 1, 5, 2) by computing linear index within input tensor to be mapped
* to the ith index of output tensor
* \param i tensor index
* \param out_data output tensor
* \param in_data input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size array containing the size of each dimension of input tensor
* \param offset_arr array containing the linear offset of input tensor
*/
template<int req>
struct depth_to_space_forward {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out_data, const DType* in_data,
const int block, const index_t* size, const index_t* offset_arr) {
index_t inp_index = 0, idx = i, dim_size;
dim_size = block;
update_index(2, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[3];
update_index(5, dim_size, &idx, &inp_index, offset_arr);
dim_size = block;
update_index(1, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[2];
update_index(4, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[1] / (block * block);
update_index(3, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[0];
update_index(0, dim_size, &idx, &inp_index, offset_arr);
KERNEL_ASSIGN(out_data[i], req, in_data[inp_index]);
}
};
/*!
* \brief This function calculates the linear offset for each dimension of
* input tensor and stores them in an array, which is later used in
* performing depth_to_space operation
* \param i global thread id
* \param offset_arr array to be populated with offset values
* \param size array to be populated with size of each dimension of input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size0 size of Dim 0 of input tensor
* \param size1 size of Dim 1 of input tensor
* \param size2 size of Dim 2 of input tensor
* \param size3 size of Dim 3 of input tensor
*/
template<int req>
struct compute_offset_for_depth_to_space {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* offset_arr, DType* size, const int block,
const index_t size0, const index_t size1, const index_t size2,
const index_t size3) {
size[0] = size0;
size[1] = size1;
size[2] = size2;
size[3] = size3;
offset_arr[5] = 1;
offset_arr[4] = offset_arr[5] * size[3];
offset_arr[3] = offset_arr[4] * size[2];
offset_arr[2] = offset_arr[3] * size[1] / (block * block);
offset_arr[1] = offset_arr[2] * block;
offset_arr[0] = offset_arr[1] * block;
}
};
template<typename xpu>
void DepthToSpaceOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
using namespace mxnet_op;
int block = param.block_size;
mshadow::Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(mshadow::Shape1(sizeof(index_t) * 10), s);
char* workspace_curr_ptr = workspace.dptr_;
index_t* offset_arr = reinterpret_cast<index_t*>(workspace_curr_ptr);
index_t* size = reinterpret_cast<index_t*>(workspace_curr_ptr + sizeof(index_t) * 6);
MSHADOW_TYPE_SWITCH(out_data.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
Kernel<compute_offset_for_depth_to_space<req_type>, xpu>::Launch(
s, 1, offset_arr, size, block, in_data.shape_[0], in_data.shape_[1],
in_data.shape_[2], in_data.shape_[3]);
Kernel<depth_to_space_forward<req_type>, xpu>::Launch(
s, out_data.Size(), out_data.dptr<DType>(), in_data.dptr<DType>(),
block, size, offset_arr);
});
});
}
inline bool SpaceToDepthOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
CHECK_EQ(in_attrs->at(0).ndim(), 4) << "Operation Space To Depth requires exactly 4D tensor";
mxnet::TShape expected_out(in_attrs->at(0).ndim(), -1);
mxnet::TShape& in_shape = in_attrs->at(0);
if (!mxnet::ndim_is_known(in_shape)) {
return false;
}
int block = param.block_size;
CHECK_NE(block, 0) << "block_size must be a positive integer value";
CHECK_NE(in_shape[0], 0)
<< "Operation requires a 4D tensor. Size of dimension:0 cannot be 0";
CHECK_NE(in_shape[1], 0) << "Depth dimension:1 cannot be 0";
CHECK_NE(in_shape[2], 0)
<< "Operation requires a 4D tensor. Size of dimension:2 cannot be 0";
CHECK_EQ(in_shape[2] % block, 0)
<< "Cannot perform Depth To Space operation on the specified tensor."
" Dimension:2(1st Space dimension) should be a multiple of 'block' ";
CHECK_NE(in_shape[3], 0)
<< "Operation requires a 4D tensor. Size of dimension:3 cannot be 0";
CHECK_EQ(in_shape[3] % block, 0)
<< "Cannot perform Depth To Space operation on the specified tensor."
" Dimension:3(2nd space dimension) should be a multiple of 'block' ";
expected_out[0] = in_shape[0];
expected_out[1] = in_shape[1] * block * block;
int i = 2;
while (i < expected_out.ndim()) {
expected_out[i] = in_shape[i] / block;
++i;
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, expected_out);
return shape_is_known(expected_out);
}
inline bool SpaceToDepthOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
return out_attrs->at(0) != -1;
}
/*!
* \brief This function preforms the tensor transpose (0, 1, 2, 3, 4, 5) ->
* (0, 3, 5, 1, 2, 4) by computing linear index within input tensor to be mapped
* to the ith index of output tensor
* \param i tensor index
* \param out_data output tensor
* \param in_data input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size array containing the size of each dimension of input tensor
* \param offset_arr array containing the linear offset of input tensor
*/
template<int req>
struct space_to_depth_forward {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out_data, const DType* in_data, const int block,
const index_t* size, const index_t* offset_arr) {
index_t inp_index = 0, idx = i, dim_size;
dim_size = size[3] / block;
update_index(4, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[2] / block;
update_index(2, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[1];
update_index(1, dim_size, &idx, &inp_index, offset_arr);
dim_size = block;
update_index(5, dim_size, &idx, &inp_index, offset_arr);
dim_size = block;
update_index(3, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[0];
update_index(0, dim_size, &idx, &inp_index, offset_arr);
KERNEL_ASSIGN(out_data[i], req, in_data[inp_index]);
}
};
/*!
* \brief This function calculates the linear offset for each dimension of
* input tensor and stores them in an array, which is later used in
* performing space_to_depth operation
* \param i global thread id
* \param offset_arr array to be populated with offset values
* \param size array to be populated with size of each dimension of input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size0 size of Dim 0 of input tensor
* \param size1 size of Dim 1 of input tensor
* \param size2 size of Dim 2 of input tensor
* \param size3 size of Dim 3 of input tensor
*/
template<int req>
struct compute_offset_for_space_to_depth {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* offset_arr, DType* size, const int block,
const index_t size0, const index_t size1,
const index_t size2, const index_t size3) {
size[0] = size0;
size[1] = size1;
size[2] = size2;
size[3] = size3;
offset_arr[5] = 1;
offset_arr[4] = offset_arr[5] * block;
offset_arr[3] = offset_arr[4] * size[3] / block;
offset_arr[2] = offset_arr[3] * block;
offset_arr[1] = offset_arr[2] * size[2] / block;
offset_arr[0] = offset_arr[1] * size[1];
}
};
template<typename xpu>
void SpaceToDepthOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
using namespace mxnet_op;
int block = param.block_size;
mshadow::Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(mshadow::Shape1(sizeof(index_t) * 10), s);
char* workspace_curr_ptr = workspace.dptr_;
index_t* offset_arr = reinterpret_cast<index_t*>(workspace_curr_ptr);
index_t* size = reinterpret_cast<index_t*>(workspace_curr_ptr + sizeof(index_t) * 6);
MSHADOW_TYPE_SWITCH(out_data.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
Kernel<compute_offset_for_space_to_depth<req_type>, xpu>::Launch(
s, 1, offset_arr, size, block, in_data.shape_[0], in_data.shape_[1],
in_data.shape_[2], in_data.shape_[3]);
Kernel<space_to_depth_forward<req_type>, xpu>::Launch(
s, out_data.Size(), out_data.dptr<DType>(), in_data.dptr<DType>(),
block, size, offset_arr);
});
});
}
namespace split_enum {
enum SplitOpInputs {kData};
} // namespace split_enum
struct SplitParam : public dmlc::Parameter<SplitParam> {
mxnet::TShape indices;
int axis;
bool squeeze_axis;
int sections;
DMLC_DECLARE_PARAMETER(SplitParam) {
DMLC_DECLARE_FIELD(indices)
.describe("Indices of splits. The elements should denote the boundaries of at which split"
" is performed along the `axis`.");
DMLC_DECLARE_FIELD(axis).set_default(1)
.describe("Axis along which to split.");
DMLC_DECLARE_FIELD(squeeze_axis).set_default(0)
.describe("If true, Removes the axis with length 1 from the shapes of the output arrays."
" **Note** that setting `squeeze_axis` to ``true`` removes axis with length 1"
" only along the `axis` which it is split."
" Also `squeeze_axis` can be set to ``true``"
" only if ``input.shape[axis] == num_outputs``.");
DMLC_DECLARE_FIELD(sections).set_default(0)
.describe("Number of sections if equally splitted. Default to 0 which means split by indices.");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream indices_s, axis_s, squeeze_axis_s, sections_s;
indices_s << indices;
axis_s << axis;
squeeze_axis_s << squeeze_axis;
sections_s << sections;
(*dict)["indices"] = indices_s.str();
(*dict)["axis"] = axis_s.str();
(*dict)["squeeze_axis"] = squeeze_axis_s.str();
(*dict)["sections"] = sections_s.str();
}
}; // struct SplitParam
inline mxnet::TShape GetSplitIndices(const mxnet::TShape& ishape, int axis, int sections) {
mxnet::TShape indices(sections+1, -1);
indices[0] = 0;
int64_t section_size_b = (int64_t) (ishape[axis] / sections);
int64_t section_size_a = section_size_b + 1;
int section_a = ishape[axis] % sections;
for (int i = 0; i < sections; ++i) {
if ( i < section_a ) {
indices[i+1] = section_size_a * (i + 1);
} else {
indices[i+1] = section_size_b + indices[i];
}
}
return indices;
}
inline bool SplitOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
int dtype = (*in_attrs)[0];
CHECK_NE(dtype, -1) << "First input must have specified type";
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
out_attrs->clear();
int num_outputs = (param.sections > 0) ? param.sections : param.indices.ndim();
for (int i = 0; i < num_outputs; ++i) {
out_attrs->push_back(dtype);
}
return true;
}
inline bool SplitOpShapeImpl(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs,
const int real_axis) {
using namespace mshadow;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
mxnet::TShape dshape = in_attrs->at(split_enum::kData);
mxnet::TShape ishape = in_attrs->at(split_enum::kData);
const mxnet::TShape indices =
(param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices;
int num_outputs = (param.sections > 0) ? indices.ndim() - 1 : indices.ndim();
// Pre-compute squeezed output shape for future usage
mxnet::TShape squeezed_dshape = dshape;
for (int d = real_axis; d < squeezed_dshape.ndim() - 1; ++d) {
squeezed_dshape[d] = squeezed_dshape[d+1];
}
squeezed_dshape = mxnet::TShape(&squeezed_dshape[0], &squeezed_dshape[squeezed_dshape.ndim()-1]);
// Assign shape to every output
for (int i = 0; i < num_outputs; ++i) {
int start = indices[i];
int end = (i < num_outputs - 1) ? indices[i + 1] : ishape[real_axis];
if (ishape[real_axis] == 0U) {
end = start;
} else {
CHECK(start <= end)
<< "start " << start << " is not less than end " << end << "for subarray " << i;
CHECK(end <= ishape[real_axis])
<< "end " << end << " is no less than the size of the axis " << ishape[real_axis];
}
dshape[real_axis] = (end - start);
if (param.squeeze_axis) {
CHECK_EQ(end - start, 1U) << "expected axis size of 1 but got " << end - start;
SHAPE_ASSIGN_CHECK(*out_attrs, i, squeezed_dshape);
} else {
SHAPE_ASSIGN_CHECK(*out_attrs, i, dshape);
}
}
mxnet::TShape back_calculate_dshape = ishape;
back_calculate_dshape[real_axis] = 0;
for (int d = 0; d < real_axis; ++d) {
back_calculate_dshape[d] = (*out_attrs)[0][d];
}
if (param.squeeze_axis) {
back_calculate_dshape[real_axis] = num_outputs;
} else {
for (int i = 0; i < num_outputs; ++i) {
back_calculate_dshape[real_axis] += (*out_attrs)[i][real_axis];
}
}
for (int d = real_axis + 1; d < ishape.ndim(); ++d) {
if (param.squeeze_axis) {
back_calculate_dshape[d] = (*out_attrs)[0][d - 1];
} else {
back_calculate_dshape[d] = (*out_attrs)[0][d];
}
}
SHAPE_ASSIGN_CHECK(*in_attrs, split_enum::kData, back_calculate_dshape);
return true;
}
inline bool SplitOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
using namespace mshadow;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
mxnet::TShape dshape = in_attrs->at(split_enum::kData);
if (!mxnet::ndim_is_known(dshape)) return false;
if (param.axis >= 0) {
CHECK_LT(param.axis, dshape.ndim());
} else {
CHECK_LT(param.axis + dshape.ndim(), dshape.ndim());
}
int real_axis = param.axis;
if (real_axis < 0) {
real_axis += dshape.ndim();
}
return SplitOpShapeImpl(attrs, in_attrs, out_attrs, real_axis);
}
struct SplitKernel {
/*!
* \brief Map function for forward split_v2 operator
* \param i global thread id
* \param in_data ptr to input buffer
* \param out_data ptr to ptr of outputs buffer
* \param indices ptr to indices buffer
* \param num_sections # of sections after split
* \param axis_size size of axis to be splitted on
* \param trailing_size step size within the data buffer of the axis to be splitted on
*/
template<typename DType>
static MSHADOW_XINLINE void Map(size_t i,
const DType *in_data, DType** out_data, const size_t* indices,
const size_t num_sections, const size_t axis_size,
const size_t trailing_size) {
size_t idx = i / trailing_size % axis_size;
size_t target = 0;
for (size_t section = 0;
section < num_sections && indices[section] <= idx;
target = section++) {}
DType* target_data = out_data[target];
const size_t mid_idx = idx - indices[target];
const size_t head_idx = i / (trailing_size * axis_size);
const size_t tail_idx = i % trailing_size;
const size_t section_size = indices[target + 1] - indices[target];
const size_t target_idx =
head_idx * trailing_size * section_size + mid_idx * trailing_size + tail_idx;
target_data[target_idx] = in_data[i];
}
};
struct ConcatenateKernel {
/*!
* \brief Map function for backward split_v2 operator
* \param i global thread id
* \param out_grad ptr to ptr of out grads buffer
* \param in_grad ptr to input grad buffer
* \param indices ptr to indices buffer
* \param num_sections # of sections after split
* \param axis_size size of axis to be splitted on
* \param trailing_size step size within the data buffer of the axis to be splitted on
*/
template<typename DType>
static MSHADOW_XINLINE void Map(size_t i,
DType** out_grad, DType* in_grad, const size_t* indices,
const size_t num_sections, const size_t axis_size,
const size_t trailing_size) {
size_t idx = i / trailing_size % axis_size;
size_t src = 0;
for (size_t section = 0;
section < num_sections && indices[section] <= idx;
src = section++) {}
DType* src_grad = out_grad[src];
const size_t mid_idx = idx - indices[src];
const size_t head_idx = i / (trailing_size * axis_size);
const size_t tail_idx = i % trailing_size;
const size_t section_size = indices[src + 1] - indices[src];
const size_t src_idx =
head_idx * trailing_size * section_size + mid_idx * trailing_size + tail_idx;
in_grad[i] = src_grad[src_idx];
}
};
template<typename xpu>
inline void SplitOpForwardImpl(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs,
const int real_axis) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& input_data = inputs[split_enum::kData];
size_t leading = 1, trailing = 1;
CHECK_LT(real_axis, input_data.ndim());
size_t mid = input_data.shape_[real_axis];
for (int i = 0; i < real_axis; ++i) {
leading *= input_data.shape_[i];
}
for (int i = real_axis + 1; i < input_data.ndim(); ++i) {
trailing *= input_data.shape_[i];
}
size_t workspace_size = 0;
const mxnet::TShape& ishape = input_data.shape_;
const mxnet::TShape split_pts =
(param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices;
std::vector<size_t> indices;
for (const auto& section : split_pts) {
indices.push_back(section);
}
if (param.sections == 0) {
indices.push_back(ishape[real_axis]);
}
workspace_size += indices.size() * sizeof(size_t);
MSHADOW_TYPE_SWITCH(input_data.type_flag_, DType, {
std::vector<DType*> output_data;
for (const TBlob& data : outputs) {
output_data.push_back(data.dptr<DType>());
}
workspace_size += output_data.size() * sizeof(DType*);
Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(Shape1(workspace_size), s);
Tensor<cpu, 1, size_t> indices_cpu_tensor(indices.data(), Shape1(indices.size()));
Tensor<xpu, 1, size_t> indices_xpu_tensor(
reinterpret_cast<size_t*>(workspace.dptr_), Shape1(indices.size()));
Tensor<cpu, 1, DType*> ptrs_cpu_tensor(output_data.data(), Shape1(output_data.size()));
Tensor<xpu, 1, DType*> ptrs_xpu_tensor(
reinterpret_cast<DType**>(workspace.dptr_ + indices.size() * sizeof(size_t)),
Shape1(output_data.size()));
mshadow::Copy(indices_xpu_tensor, indices_cpu_tensor, s);
mshadow::Copy(ptrs_xpu_tensor, ptrs_cpu_tensor, s);
Kernel<SplitKernel, xpu>::Launch(
s, input_data.Size(), input_data.dptr<DType>(), ptrs_xpu_tensor.dptr_,
indices_xpu_tensor.dptr_, indices.size() - 1, mid, trailing);
});
}
template<typename xpu>
inline void SplitOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), (param.sections > 0) ? param.sections : param.indices.ndim());
const TBlob& input_data = inputs[split_enum::kData];
int real_axis = param.axis;
if (real_axis < 0) {
real_axis += input_data.ndim();
}
SplitOpForwardImpl<xpu>(attrs, ctx, inputs, req, outputs, real_axis);
}
template<typename xpu>
inline void SplitOpBackwardImpl(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs,
const int real_axis) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
Stream<xpu> *s = ctx.get_stream<xpu>();
TBlob input_grad = outputs[split_enum::kData];
size_t leading = 1, trailing = 1;
CHECK_LT(real_axis, input_grad.ndim());
size_t mid = input_grad.shape_[real_axis];
for (int i = 0; i < real_axis; ++i) {
leading *= input_grad.shape_[i];
}
for (int i = real_axis + 1; i < input_grad.ndim(); ++i) {
trailing *= input_grad.shape_[i];
}
size_t workspace_size = 0;
const mxnet::TShape& ishape = input_grad.shape_;
const mxnet::TShape split_pts =
(param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices;
std::vector<size_t> indices;
for (const auto& section : split_pts) {
indices.push_back(section);
}
if (param.sections == 0) {
indices.push_back(ishape[real_axis]);
}
workspace_size += indices.size() * sizeof(size_t);
MSHADOW_TYPE_SWITCH(input_grad.type_flag_, DType, {
std::vector<DType*> out_grads;
for (const TBlob& output_grad : inputs) {
out_grads.push_back(output_grad.dptr<DType>());
}
workspace_size += out_grads.size() * sizeof(DType*);
Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(Shape1(workspace_size), s);
Tensor<cpu, 1, size_t> indices_cpu_tensor(indices.data(), Shape1(indices.size()));
Tensor<xpu, 1, size_t> indices_xpu_tensor(
reinterpret_cast<size_t*>(workspace.dptr_), Shape1(indices.size()));
Tensor<cpu, 1, DType*> ptrs_cpu_tensor(out_grads.data(), Shape1(inputs.size()));
Tensor<xpu, 1, DType*> ptrs_xpu_tensor(
reinterpret_cast<DType**>(workspace.dptr_ + indices.size() * sizeof(size_t)),
Shape1(inputs.size()));
mshadow::Copy(indices_xpu_tensor, indices_cpu_tensor, s);
mshadow::Copy(ptrs_xpu_tensor, ptrs_cpu_tensor, s);
Kernel<ConcatenateKernel, xpu>::Launch(
s, input_grad.Size(), ptrs_xpu_tensor.dptr_, input_grad.dptr<DType>(),
indices_xpu_tensor.dptr_, indices.size() - 1, mid, trailing);
});
}
template<typename xpu>
inline void SplitOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
CHECK_EQ(inputs.size(), (param.sections > 0) ? param.sections : param.indices.ndim())
<< "out grad vector size mush match the output size";
CHECK_EQ(outputs.size(), 1U);
int real_axis = param.axis;
if (real_axis < 0) {
real_axis += outputs[split_enum::kData].ndim();
}
SplitOpBackwardImpl<xpu>(attrs, ctx, inputs, req, outputs, real_axis);
}
inline uint32_t SplitNumOutputs(const NodeAttrs& attrs) {
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
return (param.sections > 0) ? param.sections : param.indices.ndim();
}
} // namespace op
} // namespace mxnet
namespace std {
template<>
struct hash<mxnet::op::TransposeParam> {
size_t operator()(const mxnet::op::TransposeParam& val) {
size_t ret = 0;
ret = dmlc::HashCombine(ret, val.axes);
return ret;
}
};
template<>
struct hash<mxnet::op::ReshapeParam> {
size_t operator()(const mxnet::op::ReshapeParam& val) {
size_t ret = 0;
ret = dmlc::HashCombine(ret, val.target_shape);
ret = dmlc::HashCombine(ret, val.keep_highest);
ret = dmlc::HashCombine(ret, val.shape);
ret = dmlc::HashCombine(ret, val.reverse);
return ret;
}
};
template<>
struct hash<mxnet::op::ExpandDimParam> {
size_t operator()(const mxnet::op::ExpandDimParam& val) {
size_t ret = 0;
ret = dmlc::HashCombine(ret, val.axis);
return ret;
}
};
} // namespace std
#endif // MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_
|
omp_parsec.c | #include <omp.h>
#include <stdio.h>
int main(int argc, char *argv[])
{
const size_t N = 20;
int i;
float a[N], b[N], c[N], d[N];
#pragma omp parallel
{
#pragma omp for
for (i = 0; i < N; ++i)
{
a[i] = i;
b[i] = N - i;
}
}
#pragma omp parallel shared(a,b,c,d) private(i)
{
#pragma omp sections nowait
{
#pragma omp section
for (i = 0; i < N; ++i)
{
c[i] = a[i] + b[i];
}
#pragma omp section
for (i = 0; i < N; ++i)
{
d[i] = a[i] * b[i];
}
}
}
for (i = 0; i < N; ++i)
{
printf("c[%1$d] = %2$f, d[%1$d] = %3$f\n", i, c[i], d[i]);
}
return 0;
} |
a.30.1.c | /* { dg-do compile } */
void
a30 (int n, float *a, float *b)
{
int i;
#pragma omp parallel
{
#pragma omp for lastprivate(i)
for (i = 0; i < n - 1; i++)
a[i] = b[i] + b[i + 1];
}
a[i] = b[i]; /* i == n-1 here */
}
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 24;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
RendererDeferred.h | #pragma once
#include "pc.h"
#include "RendererBase.h"
class RendererDeferred: public RendererBase {
private:
// Defaults
struct {
union {
float4 black_void1[4];
const float black_void2[4] = { 0.f, 0.f, 0.f, 0.f };
};
union {
float4 white_snow1[4];
const float white_snow2[4] = { 1.f, 1.f, 1.f, 1.f };
};
} s_clear;
struct {
union {
Texture* texture_list[8];
struct {
Texture *checkboard{};
Texture *tile_normal{};
Texture *bluenoise_rg_512{};
Texture *black{};
Texture *white{};
Texture *mgray{}; // Middle gray; 127
Texture *dgray{}; // Dark gray; 63
Texture *lgray{}; // Light gray; 190
} tex{};
} ti{};
struct {
SamplerState point{};
SamplerState linear{};
SamplerState point_comp{};
SamplerState linear_comp{};
} sampl{};
// TODO: Do clustered rendering?
Texture *mCubemap{};
} s_material{};
struct {
struct {
BlendState *normal;
BlendState *add;
BlendState *no_blend;
BlendState *one_one;
} blend;
struct {
// Depth test/write, no stencil
DepthStencilState *normal; // RW; Greater Than | Default
DepthStencilState *norw; // No RW
DepthStencilState *ro; // Read Only; Greater
DepthStencilState *ro_eq; // Read Only; Equal
DepthStencilState *ro_get; // Read Only; Greater Equal Than
DepthStencilState *ro_lt; // Read Only; Less Than
DepthStencilState *ro_let; // Read Only; Less Equal Than
} depth;
struct {
RasterState *normal;
RasterState *wire;
RasterState *normal_scissors;
RasterState *wire_scissors;
RasterState *normal_cfront;
RasterState *normal_cback;
} raster;
} s_states{};
struct {
MeshComponent unit_sphere;
MeshComponent unit_cone;
void ReleaseMesh(MeshComponent& mesh) {
SAFE_RELEASE(mesh.mIndexBuffer);
SAFE_RELEASE(mesh.mVBPosition);
SAFE_RELEASE(mesh.mVBTangent);
SAFE_RELEASE(mesh.mVBTexcoord);
}
void Release() {
ReleaseMesh(unit_sphere);
ReleaseMesh(unit_cone);
}
#define GET_BUFFER(x) ((x) ? (x)->GetBuffer() : nullptr)
#define GET_STRIDE(x) ((x) ? (x)->GetStride() : 0u)
void Bind(const MeshComponent& mesh) {
ID3D11Buffer *buffs[4] = { GET_BUFFER(mesh.mVBPosition), GET_BUFFER(mesh.mVBTexcoord),
GET_BUFFER(mesh.mVBNormal), GET_BUFFER(mesh.mVBTangent) };
UINT strides[4] = { GET_STRIDE(mesh.mVBPosition), GET_STRIDE(mesh.mVBTexcoord),
GET_STRIDE(mesh.mVBNormal), GET_STRIDE(mesh.mVBTangent) };
UINT offsets[4] = { 0, 0, 0, 0 };
gDirectX->gContext->IASetVertexBuffers(0, ARRAYSIZE(buffs), buffs, strides, offsets);
gDirectX->gContext->IASetIndexBuffer(mesh.mIndexBuffer->GetBuffer(), DXGI_FORMAT_R32_UINT, 0);
}
#undef GET_BUFFER
#undef GET_STRIDE
} s_mesh{};
// Deferred renderer
ConstantBuffer *cbDeferredGlobal;
struct DeferredGlobal {
#include "Deferred/Global.h"
};
// Volumetric lights
ConstantBuffer *cbVolumetricSettings;
struct VolumetricSettings {
// fQ = fFar / (fNear - fFar);
float4 _ProjValues; // fNear * fQ, fQ, 1 / m[0][0], 1 / m[1][1] // Player
float4 _ProjValues2; // // Light
float2 _Scaling; // Width, Height / Downscaling Factor
float _GScattering; // [-1; 1]
float _MaxDistance; // 0 - Light Far?
uint _FrameIndex; // 0 -> Interleaved; based on frame index
uint _Interleaved; // pow(2, n)
float _Exposure; // Default: 10.f
uint _Padding;
};
// Debug views
struct DebugDataGBuffer {
uint tUseMipMapLUT;
uint3 align;
};
ConstantBuffer* cbDebugDataGBuffer;
Texture *mMipmappingLerpLUT;
Texture *mMipmappingLUT;
uint tUseMipMapLUT = 0u; // 0 - Disabled, 1 - Lerp with alpha, 2 - MipMaps
// HDR Luma View
struct HDRDebugSettings {
float _LumScale;
uint _UseAvg;
float _MaxLum;
float1 dummy;
};
Texture *mHDRGradationLUT;
ConstantBuffer *cbGradationLUT;
uint bUseHDRLUT = 0u; // 1st bit - UseHDRLUT; 2nd bit - UseAvg lum Per frame
float fHDRLUTScale = 1.9f;
float fHDRLUTMaxLum = 100.f;
// Blur filter
ConstantBuffer *cbBlurFilter;
struct BlurFilter {
// Res of downscaled target: x - width, y - height
uint2 _Res; // Backbuffer / 4
// Total pixels in the downscaled img
uint _Domain; // Res.x * Res.y
// Number of groups dispatched on 1st pass
uint _GroupSize; // Domain / 1024
//
float4 _Alignment;
} gBlurFilter{};
// DSSDO
ConstantBuffer *cbDSSDOSettings;
struct DSSDOSettings {
// fQ = fFar / (fNear - fFar);
float4 _ProjValues; // fNear * fQ, fQ, 1 / m[0][0], 1 / m[1][1] // Player
float2 _Scaling; // Width, Height / Downscaling Factor
uint _FrameIndex; // 0 -> Interleaved; based on frame index
uint _Interleaved; // pow(2, n)
float _OcclusionRadius;
float _OcclusionMaxDistance;
uint2 _Padding;
};
// Effects
HDRPostProcess *gHDRPostProcess{};
SSAOPostProcess *gSSAOPostProcess{};
SSLRPostProcess *gSSLRPostProcess{};
SSLFPostProcess *gSSLFPostProcess{};
CascadeShadowMapping<3, false> *gCascadeShadowMapping{};
//CoverageBuffer *gCoverageBuffer{};
OrderIndendentTransparency *gOrderIndendentTransparency{};
SSLRArgs gSSLRArgs{};
SSAOArgs gSSAOArgs{};
CSMArgs gCSMArgs{};
//CBuffArgs gCBuffArgs{};
OITSettings gOITSettings{};
VolumetricSettings gVolumetricSettings{};
DSSDOSettings gDSSDOSettings{};
// Resources
RenderTarget2DDepthMSAA *rtDepth{};
RenderTarget2DColor5DepthMSAA *rtGBuffer{};
RenderTarget2DColor4DepthMSAA *rtTransparency{}, *rtCombinedGBuffer{};
RenderTarget2DColor1 *rtFinalPass{};
RenderTarget2DColor1DepthMSAA *rtDeferred{};
RenderTarget2DColor1 *rtDeferredAccumulation{};
Texture *mVolumetricLightAccum{};
Texture *mDepth2{}, *mDepthI{}; // Main depth buffer & Intermidiate
Texture *mDSSDOAccumulation{};
Shader *shSurface{}, *shVertexOnly{}, *shGUI{}, *shPostProcess{}, *shCombinationPass{};
Shader *shDeferredPointLights{}, *shDeferredAccumulation{}, *shDeferredSpotLights{};
Shader *shVolumetricLight;
Shader *shSimpleGUI{}, *shHDRView{};
Shader *shHorizontalFilterDepth{}, *shVerticalFilterDepth{};
Shader *shDSSDOAccumulate{};
// Local
Scene *mScene{};
// Transformation
TransformComponent *IdentityTransf;
ConstantBuffer *cbTransform;
//
uint gFrameIndex{};
// ImGui
bool mRenderDoc{};
ImTextureID mRenderDocImageID{};
Texture *mRenderDocTex{};
enum class LitIndex {
Lit, Unlit,
AO, Indirect,
Volumetric,
SSDO,
Normal,
Deferred,
DeferredA, // Accumulation
Shading,
Count
};
bool mLit{};
uint32_t mLitIndex{};
ImTextureID mLitImageID[(uint)LitIndex::Count]{};
Texture *mLitImageTex[(uint)LitIndex::Count]{};
//
template<typename T>
struct LightDescriptor {
uint num;
StructuredBuffer<T>* sb;
};
// Geometry Passes
void Shadows(); // Done
void GBuffer(); // Done
void OIT(); // Done; Optimize sorting; Fix MSAA
// Occlusion tests
void CoverageBuffer();
// Screen-Space Passes
void Deferred();
void DeferredLights(const LightDescriptor<PointLightBuff>&);
void DeferredLights(const LightDescriptor<SpotLightBuff>&);
void SSAO();
void SSLR();
void SSLF();
void FSSSSS();
void Combine();
void HDR();
void VolumetricLight();
void DSSDO();
// Final Passes
void Final();
void BindOrtho();
// Internal mesh loading
TransformComponent DefaultTransformComp() const {
TransformComponent transform{};
transform.mWorld = DirectX::XMMatrixIdentity();
transform.vPosition = { 0.f, 0.f, 0.f };
transform.vRotation = { 0.f, 0.f, 0.f };
transform.vScale = { 1.f, 1.f, 1.f };
return transform;
}
uint32_t mMeshSRV{};
std::vector<MeshComponent> LoadModelExternal(const char* fname, /*ECS* ecs, */uint32_t flags) {
TransformComponent transform = DefaultTransformComp();
// Load model
Assimp::Importer importer;
const aiScene *scene = importer.ReadFile(fname, aiProcess_Triangulate | aiProcess_CalcTangentSpace
| (0*aiProcess_GenSmoothNormals) | flags);
if( !scene || scene->mFlags & AI_SCENE_FLAGS_INCOMPLETE || !scene->mRootNode ) {
std::cout << "[Scene::LoadModelExternal]: Can't load model! (" << fname << ")" << std::endl;
return { };
}
// Process scene
std::vector<MeshComponent> mMeshList;
//std::vector<MaterialComponent> mMatList;
//LoaderTextureList mTextureFileList;
uint32_t index = 0u;
ProcessNodeStatic(scene->mRootNode, scene, &mMeshList, /*&mMatList, &mTextureFileList, */index);
// Get current model's directory
/*char drive[_MAX_DRIVE];
char dir[_MAX_DIR];
char fnm[_MAX_FNAME];
char ext[_MAX_EXT];
WCHAR cdir[_MAX_DIR];
GetCurrentDirectory(_MAX_DIR, cdir);
std::string ndir = narrow(cdir) + "/";
_splitpath((ndir + fname).c_str(), drive, dir, fnm, ext);
std::string path = "";
path = drive;
path += dir;
_splitpath((ndir + "../Textures/").c_str(), drive, dir, fnm, ext);
std::string tpath = "";
tpath = drive;
tpath += dir;
// Load textures & attach to materials
for( LoaderTextureList::iterator e = mTextureFileList.begin(); e != mTextureFileList.end(); e++ ) {
LoaderTextureList::mapped_type data = e->second;
std::string fnme = e->first;
Texture *texture = 0;
// Build paths
std::string file1 = path + fnme;
std::string file2 = tpath + fnme;
// Search in current directory
bool flag = false;
if( file_exists(file1) ) {
texture = new Texture(file1, tf_MipMaps*1);
flag = true;
} else if( file_exists(file2) ) {
texture = new Texture(file2, tf_MipMaps*0);
flag = true;
}
if( flag ) {
// Assign texture
switch( data.first ) {
#define MIND(type, tex, ts, v) case type: for( uint32_t mind : data.second ) { mMatList[mind].tex = texture; mMatList[mind].ts = v; } break;
case aiTextureType_NORMALS:
MIND(aiTextureType_HEIGHT, _NormalTex, _Norm, 2);
MIND(aiTextureType_DIFFUSE , _AlbedoTex , _Alb , 2);
MIND(aiTextureType_SHININESS, _RoughnessTex , _Rough, 2);
MIND(aiTextureType_LIGHTMAP , _AmbientOcclusionTex, _AO , 2);
MIND(aiTextureType_EMISSIVE , _EmissionTex , _Emis , 2);
MIND(aiTextureType_UNKNOWN , _MetallicTex , _Metal, 2 | 4);
#undef MIND
}
printf_s("[Scene::ModelLoader]: Loaded texture. [%s]\n", fnme.c_str());
} else {
printf_s("[Scene::ModelLoader]: Failed to load texture! [%s]\n", fnme.c_str());
}
}*/
// Done
return mMeshList;
//if( mMeshList.size() == 1 ) {
// return { ecs->MakeEntity(transform, mMeshList[0], mMatList[0]) };
//}
//
//// Return list of entites
//EntityHandleList list;
//list.reserve(mMeshList.size());
//for( uint32_t i = 0; i < mMeshList.size(); i++ ) {
// list.push_back(ecs->MakeEntity(transform, mMeshList[i], mMatList[i]));
//}
//
//// Done
//return list;
}
void ProcessNodeStatic(aiNode* node, const aiScene* scene, std::vector<MeshComponent>* MeshList,
//std::vector<MaterialComponent>* MatList,
//LoaderTextureList* TextureList,
uint32_t& index) {
// Process meshes
#pragma omp parallel for num_threads(4)
for( int32_t i = 0; i < (int32_t)node->mNumMeshes; i++ ) {
aiMesh* mesh = scene->mMeshes[node->mMeshes[i]];
MeshList->push_back(ProcessMeshStatic(mesh, scene, /*MatList, TextureList, */index));
index++;
}
// Process children
for( size_t i = 0; i < node->mNumChildren; i++ ) {
ProcessNodeStatic(node->mChildren[i], scene, MeshList, /*MatList, TextureList, */index);
}
}
MeshComponent ProcessMeshStatic(aiMesh* inMesh, const aiScene* scene,
//std::vector<MaterialComponent>* MatList,
//LoaderTextureList* TextureList,
uint32_t index) {
//MaterialComponent mat = DefaultMaterialComp();
MeshComponent mesh{};
//
std::vector<float3> Position;
std::vector<float2> Texcoord;
std::vector<float3> Normal;
std::vector<float3> Tangent;
std::vector<uint32_t> Index;
// Process vertices
Position.reserve(inMesh->mNumVertices);
Texcoord.reserve(inMesh->mNumVertices);
Tangent.reserve( inMesh->mNumVertices);
Normal.reserve( inMesh->mNumVertices);
for( size_t i = 0; i < inMesh->mNumVertices; i++ ) {
Position.push_back({ inMesh->mVertices[i].x, inMesh->mVertices[i].y, inMesh->mVertices[i].z });
Normal .push_back({ inMesh->mNormals [i].x, inMesh->mNormals [i].y, inMesh->mNormals [i].z });
if( inMesh->mTangents ) {
Tangent.push_back({ inMesh->mTangents[i].x, inMesh->mTangents[i].y, inMesh->mTangents[i].z });
} else {
Tangent.push_back({ 0, 0, 0 });
}
if( inMesh->mTextureCoords[0] ) {
Texcoord.push_back({ inMesh->mTextureCoords[0][i].x, inMesh->mTextureCoords[0][i].y });
} else {
Texcoord.push_back({ 0, 0 });
}
}
// Process indices
uint32_t IndexNum = 0;
Index.reserve(inMesh->mNumFaces * 3); // Assume that each face has at least 3 indices
for( size_t i = 0; i < inMesh->mNumFaces; i++ ) {
aiFace face = inMesh->mFaces[i];
IndexNum += face.mNumIndices;
for( size_t j = 0; j < face.mNumIndices; j++ ) {
Index.push_back(face.mIndices[j]);
}
}
// Material
/*uint32_t mat_index = inMesh->mMaterialIndex;
aiMaterial* m = scene->mMaterials[mat_index];
// Opacity
ai_real alpha;
m->Get(AI_MATKEY_OPACITY, alpha);
if( alpha < 0.f ) alpha = 1.f;
mat._Alpha = alpha;
if( mat._Alpha < 1.f ) { mat._IsTransparent = 1.f; }
// Gather material textures
auto AddTexture = [&TextureList, index, m](aiTextureType type, uint32_t i=0u) {
aiString fname;
m->GetTexture(type, i, &fname);
std::string s = fname.C_Str();
printf_s(" - %u:%u\n", type, m->GetTextureCount(type));
// No texture found
if( !strcmp(fname.C_Str(), "") ) return false;
LoaderTextureList::iterator it = TextureList->find(s);
if( it == TextureList->end() ) {
printf_s("%s; %u\n", s.c_str(), type);
// Add
TextureList->insert_or_assign(s, LoaderTextureList::mapped_type({ aiTextureType(type + i * (AI_TEXTURE_TYPE_MAX + 1)), { index } }));
return true;
}
TextureList->at(s).second.push_back(index);
return true;
};
printf_s("-------------\n");
/*AddTexture(aiTextureType_DIFFUSE ); // Albedo
AddTexture(aiTextureType_SHININESS); // Roughness
AddTexture(aiTextureType_LIGHTMAP ); // Ambient Occlusion / Lightmap
AddTexture(aiTextureType_EMISSIVE ); // Emission
AddTexture(aiTextureType_UNKNOWN ); // Metall Roughness
// Normals
if( !AddTexture(aiTextureType_HEIGHT) )
AddTexture(aiTextureType_NORMALS);
AddTexture(aiTextureType_OPACITY);
/*AddTexture(aiTextureType_DISPLACEMENT);
AddTexture(aiTextureType_AMBIENT);
AddTexture(aiTextureType_SPECULAR);
AddTexture(aiTextureType_REFLECTION);
AddTexture(aiTextureType_BASE_COLOR);
AddTexture(aiTextureType_NORMAL_CAMERA);
AddTexture(aiTextureType_EMISSION_COLOR);
AddTexture(aiTextureType_METALNESS);
AddTexture(aiTextureType_DIFFUSE_ROUGHNESS);
AddTexture(aiTextureType_AMBIENT_OCCLUSION);
AddTexture(aiTextureType_DIFFUSE, 1);*/
// Create buffers
mesh.mIndexBuffer = new IndexBuffer();
mesh.mVBPosition = new VertexBuffer();
mesh.mVBTexcoord = new VertexBuffer();
mesh.mVBNormal = new VertexBuffer();
mesh.mVBTangent = new VertexBuffer();
mesh.mReferenced = false;
mesh.mVBPosition->SetSRV(mMeshSRV);
mesh.mVBTexcoord->SetSRV(mMeshSRV);
mesh.mVBTangent->SetSRV(mMeshSRV);
mesh.mVBNormal->SetSRV(mMeshSRV);
mesh.mIndexBuffer->SetSRV(mMeshSRV);
mesh.mVBPosition->CreateDefault(Position.size(), sizeof(float3), &Position[0]);
mesh.mVBTexcoord->CreateDefault(Texcoord.size(), sizeof(float2), &Texcoord[0]);
mesh.mVBTangent->CreateDefault( Tangent.size() , sizeof(float3), &Tangent [0]);
mesh.mVBNormal->CreateDefault( Normal .size(), sizeof(float3), &Normal [0]);
mesh.mIndexBuffer->CreateDefault(IndexNum, &Index[0]);
// Add material
//MatList->push_back(mat);
// Done
return mesh;
}
MeshComponent LoadMeshInternal(const char* fname) {
std::ifstream file(fname, std::ios::binary);
if( !file.is_open() ) {
printf_s("[RendererDeferred::LoadMeshInternal]: Failed to open file %s.\n", fname);
return {};
}
// "VB P/N/UV; I_N=" + Index Num
// Position
// Normal
// UV
struct Header {
uint8_t V, B, Space;
uint8_t P, G0;
uint8_t N, G1;
uint8_t UV, G2;
uint8_t G3;
uint8_t I_N_[5];
} head{};
file.read((char*)&head, sizeof(Header));
uint8_t x[10];
uint32_t IndexNum = 0u, e = 0u;
for( uint32_t i = 0; i < 10; i++ ) {
file.read((char*)&x[i], 1);
if( x[i] == '|' ) { break; }
e++;
}
// 256
// 2 * 10^2 | 2 - (2 - 0)
// +5 * 10^1 | 2 - (2 - 1)
// +6 * 10^0 | 2 - (2 - 2)
// e=3
for( uint32_t i = 0; i < e; i++ ) {
IndexNum += (uint32_t)powf(10.f, (e - 1) - i) * (x[i] - '0');
}
// Load mesh data
std::vector<float3> Position, Normal;
std::vector<float2> Texcoord;
std::vector<uint32_t> Index;
Index.reserve(IndexNum);
//Position.reserve(IndexNum * 3);
//Normal.reserve(IndexNum * 3);
//Texcoord.reserve(IndexNum * 2);
Position.resize(IndexNum * 3);
Normal.resize(IndexNum * 3);
Texcoord.resize(IndexNum * 3);
file.read((char*)Position.data(), IndexNum * sizeof(float3) * 3);
file.read((char*)Normal .data(), IndexNum * sizeof(float3) * 3);
file.read((char*)Texcoord.data(), IndexNum * sizeof(float2) * 3);
//std::copy(std::istream_iterator<float3>(file), std::istream_iterator<float3>(file) + IndexNum * 3, std::back_inserter(Position));
//std::copy(std::istream_iterator<float3>(file), std::istream_iterator<float3>(file) + IndexNum * 3, std::back_inserter(Normal ));
//std::copy(std::istream_iterator<float2>(file), std::istream_iterator<float2>(file) + IndexNum * 2, std::back_inserter(Texcoord));
file.close();
// Create indices
for( uint32_t i = 0; i < IndexNum; i++ )
Index.push_back(i);
// Create mesh
MeshComponent mesh{};
// Create buffers
mesh.mIndexBuffer = new IndexBuffer();
mesh.mVBPosition = new VertexBuffer();
mesh.mVBTexcoord = new VertexBuffer();
mesh.mVBNormal = new VertexBuffer();
//mesh.mVBTangent = new VertexBuffer();
mesh.mReferenced = false;
mesh.mVBPosition->SetSRV(mMeshSRV);
mesh.mVBTexcoord->SetSRV(mMeshSRV);
//mesh.mVBTangent->SetSRV(mMeshSRV);
mesh.mVBNormal->SetSRV(mMeshSRV);
mesh.mIndexBuffer->SetSRV(mMeshSRV);
mesh.mVBPosition->CreateDefault(Position.size(), sizeof(float3), &Position[0]);
mesh.mVBTexcoord->CreateDefault(Texcoord.size(), sizeof(float2), &Texcoord[0]);
//mesh.mVBTangent->CreateDefault( Tangent.size() , sizeof(float3), &Tangent [0]);
mesh.mVBNormal->CreateDefault( Normal .size(), sizeof(float3), &Normal [0]);
mesh.mIndexBuffer->CreateDefault(IndexNum, &Index[0]);
return mesh;
}
public:
RendererDeferred(): RendererBase() {};
~RendererDeferred() {
printf_s("[~RendererDeferred]\n");
};
virtual void Init() override;
virtual void Resize() override;
virtual void Render() override;
virtual void FinalScreen() override;
virtual void Release() override;
virtual void ImGui() override;
virtual void ClearMainRT() override;
virtual void DebugHUD() override;
//
virtual void BindRSWireframe() const override;
virtual void BindRSNormal() const override;
//inline Texture* GetTexture(uint32_t index) const override { return s_material.texture_list[index]; };
inline Texture* GetTexture(TextureList index) const { return s_material.ti.texture_list[(uint32_t)index]; };
};
|
full_buffer_variabe_size.c | #include "correctness-checking-partitioned-impl.h"
#include "mpi.h"
#include <stdio.h>
#include <stdlib.h>
#define TOTAL_SIZE 4000
#define ITERATIONS 10
#define TAG 42
//buffer:
// RECV SEND LOCAL SEND RECV
// TOTAL_SIZE must be at least 4 times STENCIL_SIZE
void debug_function(long a, long b) {
printf(" %ld,%ld\n", a, b);
}
int main(int argc, char **argv) {
MPI_Init(&argc, &argv);
int buffer_size = atoi(argv[1]);
assert(buffer_size>0);
int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
//int pre = (rank == 0) ? -1 : rank - 1;
//int nxt = (rank == size - 1) ? -1 : rank + 1;
int nxt = (rank + 1) % size;
int pre = (rank - 1) % size;
pre = pre<0?size+pre:pre;// if % is negative: "start counting backwards at size"
//printf("Rank %i in comm with %d and %d\n", rank, pre,nxt);
int *buffer = (int*) malloc(sizeof(int) * buffer_size);
int *buffer_r = (int*) malloc(sizeof(int) * buffer_size);
// buffer access
#pragma omp parallel for firstprivate(buffer) schedule(static,1000)
//#pragma omp parallel for firstprivate(buffer) schedule(dynamic,1000)
//#pragma omp parallel for
for (int i = 0; i < buffer_size; ++i) {
buffer[i] = i * rank;
}
// communication
// no deadlock
MPI_Request req;
printf("Rank %d recv from %d send to %d\n",rank,pre,nxt);
MPI_Irecv(buffer_r,
buffer_size, MPI_INT, pre, TAG,
MPI_COMM_WORLD, &req);
MPI_Send(buffer, buffer_size, MPI_INT, nxt, TAG,
MPI_COMM_WORLD);
MPI_Wait(&req, MPI_STATUS_IGNORE);
free(buffer);
free(buffer_r);
MPI_Finalize();
}
|
main.c | #include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <time.h>
#include <omp.h>
#include <assert.h>
#define N 10000// max number of elements
#define NDEBUG
#ifndef NDEBUG
#define DEBUG(cmd) cmd;
#else
#define DEBUG(cmd) ;
#endif
int read_from_file(char* filename, int* arr)
{
FILE* fin = fopen(filename, "r");
if (!fin)
{
fprintf(stderr, "ERROR: Cannot open file %s\n", filename);
assert(fin);
}
int value = 0;
int n = 0;
while(fscanf(fin, "%d", &value) != EOF)
{
arr[n++] = value;
assert(n < N);
}
fclose(fin);
return n;
}
void read_from_keyboard(int n, int* arr)
{
printf("Enter numbers: ");
int value = 0;
for (int i = 0; i < n; i++)
{
scanf("%d", &value);
arr[i] = value;
}
}
void sort_task(int arr[N], int n, int low, int high)
{
if (low < 0 || low >= high)
return;
DEBUG(
int thread_id = omp_get_thread_num();
printf("[Thread %.2d] %d %d\n", thread_id, low, high);
)
int pivot = arr[(high + low) / 2];
int i = low - 1;
int j = high + 1;
while (true)
{
do {} while (arr[++i] < pivot);
do {} while (arr[--j] > pivot);
if (i >= j)
break;
int tmp = arr[i];
arr[i] = arr[j];
arr[j] = tmp;
}
#pragma omp task firstprivate(low, j)
sort_task(arr, n, low, j);
#pragma omp task firstprivate(j, high)
sort_task(arr, n, j + 1, high);
}
void sort(int arr[N], int n, int low, int high)
{
#pragma omp parallel
{
#pragma omp single
{
#pragma omp task firstprivate(low, high)
sort_task(arr, n, low, high);
}
}
}
int main(int argc, char** argv)
{
int n = 0; // default number of elements in range
int arr[N] = {0}; // initial array
int sarr[N] = {0}; // sorted array
if (argc == 3) // input
{
if (strcmp(argv[1], "-f") == 0)
{
n = read_from_file(argv[2], arr);
}
else if (strcmp(argv[1], "-n") == 0)
{
n = atoi(argv[2]);
assert(n < N);
read_from_keyboard(n, arr);
}
else
{
fprintf(stderr, "ERROR: Wrong input.\nExample ./run.out [-f {filename}] [-n {length < 10000}]\n");
}
}
else
{
fprintf(stderr, "ERROR: Wrong input.\nExample ./run.out [-f {filename}] [-n {length < 10000}]\n");
return -1;
}
assert(n);
sort(arr, n, 0, n - 1);
for (int i = 0; i < n; i++)
{
printf("%d ", arr[i]);
}
printf("\n");
return 0;
}
|
GB_emult_phase0.c | //------------------------------------------------------------------------------
// GB_emult_phase0: find vectors of C to compute for C=A.*B or C<M>=A.*B
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// The eWise multiply of two matrices, C=A.*B, C<M>=A.*B, or C<!M>=A.*B starts
// with this phase, which determines which vectors of C need to be computed.
// On input, A and B are the two matrices being ewise multiplied, and M is the
// optional mask matrix. If present, it is not complemented.
// The M, A, and B matrices are sparse or hypersparse (not a slice or
// hyperslice). C will be standard (if Ch is returned NULL) or hypersparse
// (if Ch is returned non-NULL).
// Ch: the vectors to compute in C. Not allocated, but equal to either
// A->h, B->h, or M->h, or NULL if C is not hypersparse.
// C_to_A: if A is hypersparse, and Ch is not A->h, then C_to_A [k] = kA
// if the kth vector j = Ch [k] is equal to Ah [kA]. If j does not appear
// in A, then C_to_A [k] = -1. Otherwise, C_to_A is returned as NULL.
// C is always hypersparse in this case.
// C_to_B: if B is hypersparse, and Ch is not B->h, then C_to_B [k] = kB
// if the kth vector j = Ch [k] is equal to Bh [kB]. If j does not appear
// in B, then C_to_B [k] = -1. Otherwise, C_to_B is returned as NULL.
// C is always hypersparse in this case.
// C_to_M: if M is hypersparse, and Ch is not M->h, then C_to_M [k] = kM
// if the kth vector j = (Ch == NULL) ? k : Ch [k] is equal to Mh [kM].
// If j does not appear in M, then C_to_M [k] = -1. Otherwise, C_to_M is
// returned as NULL. C is always hypersparse in this case.
// FUTURE:: exploit A==M, B==M, and A==B aliases
#include "GB_emult.h"
GrB_Info GB_emult_phase0 // find vectors in C for C=A.*B or C<M>=A.*B
(
int64_t *p_Cnvec, // # of vectors to compute in C
const int64_t *GB_RESTRICT *Ch_handle, // Ch is M->h, A->h, B->h, or NULL
int64_t *GB_RESTRICT *C_to_M_handle, // C_to_M: size Cnvec, or NULL
int64_t *GB_RESTRICT *C_to_A_handle, // C_to_A: size Cnvec, or NULL
int64_t *GB_RESTRICT *C_to_B_handle, // C_to_B: size Cnvec, or NULL
// original input:
const GrB_Matrix M, // optional mask, may be NULL
const GrB_Matrix A,
const GrB_Matrix B,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (p_Cnvec != NULL) ;
ASSERT (Ch_handle != NULL) ;
ASSERT (C_to_A_handle != NULL) ;
ASSERT (C_to_B_handle != NULL) ;
ASSERT_MATRIX_OK (A, "A for emult phase0", GB0) ;
ASSERT_MATRIX_OK (B, "B for emult phase0", GB0) ;
ASSERT_MATRIX_OK_OR_NULL (M, "M for emult phase0", GB0) ;
ASSERT (A->vdim == B->vdim) ;
ASSERT (GB_IMPLIES (M != NULL, A->vdim == M->vdim)) ;
//--------------------------------------------------------------------------
// initializations
//--------------------------------------------------------------------------
const int64_t *GB_RESTRICT Ch = NULL ;
int64_t *GB_RESTRICT C_to_M = NULL ;
int64_t *GB_RESTRICT C_to_A = NULL ;
int64_t *GB_RESTRICT C_to_B = NULL ;
(*Ch_handle ) = NULL ;
if (C_to_M_handle != NULL)
{
(*C_to_M_handle) = NULL ;
}
(*C_to_A_handle) = NULL ;
(*C_to_B_handle) = NULL ;
//--------------------------------------------------------------------------
// get content of M, A, and B
//--------------------------------------------------------------------------
int64_t n = A->vdim ;
int64_t Anvec = A->nvec ;
const int64_t *GB_RESTRICT Ah = A->h ;
bool A_is_hyper = A->is_hyper ;
ASSERT (!A->is_slice) ;
int64_t Bnvec = B->nvec ;
const int64_t *GB_RESTRICT Bh = B->h ;
bool B_is_hyper = B->is_hyper ;
ASSERT (!B->is_slice) ;
int64_t Mnvec = 0 ;
const int64_t *GB_RESTRICT Mh = NULL ;
bool M_is_hyper = false ;
if (M != NULL)
{
Mnvec = M->nvec ;
Mh = M->h ;
M_is_hyper = M->is_hyper ;
ASSERT (!M->is_slice) ;
}
//--------------------------------------------------------------------------
// determine how to construct the vectors of C
//--------------------------------------------------------------------------
if (M != NULL)
{
//----------------------------------------------------------------------
// 8 cases to consider: A, B, M can each be hyper or standard
//----------------------------------------------------------------------
// Mask is present and not complemented
if (A_is_hyper)
{
if (B_is_hyper)
{
if (M_is_hyper)
{
//----------------------------------------------------------
// (1) A hyper, B hyper, M hyper: C hyper
//----------------------------------------------------------
// Ch = smaller of Mh, Bh, Ah
int64_t nvec = GB_IMIN (Anvec, Bnvec) ;
nvec = GB_IMIN (nvec, Mnvec) ;
if (nvec == Anvec)
{
Ch = Ah ;
}
else if (nvec == Bnvec)
{
Ch = Bh ;
}
else // (nvec == Mnvec)
{
Ch = Mh ;
}
}
else
{
//----------------------------------------------------------
// (2) A hyper, B hyper, M standard: C hyper
//----------------------------------------------------------
// Ch = smaller of Ah, Bh
if (Anvec <= Bnvec)
{
Ch = Ah ;
}
else
{
Ch = Bh ;
}
}
}
else
{
if (M_is_hyper)
{
//----------------------------------------------------------
// (3) A hyper, B standard, M hyper: C hyper
//----------------------------------------------------------
// Ch = smaller of Mh, Ah
if (Anvec <= Mnvec)
{
Ch = Ah ;
}
else
{
Ch = Mh ;
}
}
else
{
//----------------------------------------------------------
// (4) A hyper, B standard, M standard: C hyper
//----------------------------------------------------------
Ch = Ah ;
}
}
}
else
{
if (B_is_hyper)
{
if (M_is_hyper)
{
//----------------------------------------------------------
// (5) A standard, B hyper, M hyper: C hyper
//----------------------------------------------------------
// Ch = smaller of Mh, Bh
if (Bnvec <= Mnvec)
{
Ch = Bh ;
}
else
{
Ch = Mh ;
}
}
else
{
//----------------------------------------------------------
// (6) A standard, B hyper, M standard: C hyper
//----------------------------------------------------------
Ch = Bh ;
}
}
else
{
if (M_is_hyper)
{
//----------------------------------------------------------
// (7) A standard, B standard, M hyper: C hyper
//----------------------------------------------------------
Ch = Mh ;
}
else
{
//----------------------------------------------------------
// (8) A standard, B standard, M standard: C standard
//----------------------------------------------------------
;
}
}
}
}
else
{
//----------------------------------------------------------------------
// 4 cases to consider: A, B can be hyper or standard
//----------------------------------------------------------------------
// Mask is not present, or present and complemented.
if (A_is_hyper)
{
if (B_is_hyper)
{
//--------------------------------------------------------------
// (1) A hyper, B hyper: C hyper
//--------------------------------------------------------------
// Ch = smaller of Ah, Bh
if (Anvec <= Bnvec)
{
Ch = Ah ;
}
else
{
Ch = Bh ;
}
}
else
{
//--------------------------------------------------------------
// (2) A hyper, B standard: C hyper
//--------------------------------------------------------------
Ch = Ah ;
}
}
else
{
if (B_is_hyper)
{
//--------------------------------------------------------------
// (3) A standard, B hyper: C hyper
//--------------------------------------------------------------
Ch = Bh ;
}
else
{
//--------------------------------------------------------------
// (4) A standard, B standard: C standard
//--------------------------------------------------------------
;
}
}
}
//--------------------------------------------------------------------------
// find Cnvec
//--------------------------------------------------------------------------
int64_t Cnvec ;
if (Ch == NULL)
{
// C is standard
Cnvec = n ;
}
else if (Ch == Ah)
{
Cnvec = Anvec ;
}
else if (Ch == Bh)
{
Cnvec = Bnvec ;
}
else // (Ch == Mh)
{
Cnvec = Mnvec ;
}
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (Cnvec, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// construct C_to_M mapping
//--------------------------------------------------------------------------
if (M_is_hyper && Ch != Mh)
{
// allocate C_to_M
GB_MALLOC_MEMORY (C_to_M, Cnvec, sizeof (int64_t)) ;
if (C_to_M == NULL)
{
// out of memory
return (GB_OUT_OF_MEMORY) ;
}
// compute C_to_M
ASSERT (Ch != NULL) ;
const int64_t *GB_RESTRICT Mp = M->p ;
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < Cnvec ; k++)
{
int64_t pM, pM_end, kM = 0 ;
int64_t j = Ch [k] ;
GB_lookup (true, Mh, Mp, &kM, Mnvec-1, j, &pM, &pM_end) ;
C_to_M [k] = (pM < pM_end) ? kM : -1 ;
}
}
//--------------------------------------------------------------------------
// construct C_to_A mapping
//--------------------------------------------------------------------------
if (A_is_hyper && Ch != Ah)
{
// allocate C_to_A
GB_MALLOC_MEMORY (C_to_A, Cnvec, sizeof (int64_t)) ;
if (C_to_A == NULL)
{
// out of memory
GB_FREE_MEMORY (C_to_M, Cnvec, sizeof (int64_t)) ;
return (GB_OUT_OF_MEMORY) ;
}
// compute C_to_A
ASSERT (Ch != NULL) ;
const int64_t *GB_RESTRICT Ap = A->p ;
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < Cnvec ; k++)
{
int64_t pA, pA_end, kA = 0 ;
int64_t j = Ch [k] ;
GB_lookup (true, Ah, Ap, &kA, Anvec-1, j, &pA, &pA_end) ;
C_to_A [k] = (pA < pA_end) ? kA : -1 ;
}
}
//--------------------------------------------------------------------------
// construct C_to_B mapping
//--------------------------------------------------------------------------
if (B_is_hyper && Ch != Bh)
{
// allocate C_to_B
GB_MALLOC_MEMORY (C_to_B, Cnvec, sizeof (int64_t)) ;
if (C_to_B == NULL)
{
// out of memory
GB_FREE_MEMORY (C_to_M, Cnvec, sizeof (int64_t)) ;
GB_FREE_MEMORY (C_to_A, Cnvec, sizeof (int64_t)) ;
return (GB_OUT_OF_MEMORY) ;
}
// compute C_to_B
ASSERT (Ch != NULL) ;
const int64_t *GB_RESTRICT Bp = B->p ;
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < Cnvec ; k++)
{
int64_t pB, pB_end, kB = 0 ;
int64_t j = Ch [k] ;
GB_lookup (true, Bh, Bp, &kB, Bnvec-1, j, &pB, &pB_end) ;
C_to_B [k] = (pB < pB_end) ? kB : -1 ;
}
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
(*p_Cnvec ) = Cnvec ;
(*Ch_handle ) = Ch ;
if (C_to_M_handle != NULL)
{
(*C_to_M_handle) = C_to_M ;
}
(*C_to_A_handle) = C_to_A ;
(*C_to_B_handle) = C_to_B ;
//--------------------------------------------------------------------------
// The code below describes what the output contains:
//--------------------------------------------------------------------------
#ifdef GB_DEBUG
ASSERT (A != NULL) ; // A and B are always present
ASSERT (B != NULL) ;
int64_t jlast = -1 ;
for (int64_t k = 0 ; k < Cnvec ; k++)
{
// C(:,j) is in the list, as the kth vector
int64_t j ;
if (Ch == NULL)
{
// C will be constructed as standard sparse
j = k ;
}
else
{
// C will be constructed as hypersparse
j = Ch [k] ;
}
// vectors j in Ch are sorted, and in the range 0:n-1
ASSERT (j >= 0 && j < n) ;
ASSERT (j > jlast) ;
jlast = j ;
// see if A (:,j) exists
if (C_to_A != NULL)
{
// A is hypersparse
ASSERT (A->is_hyper)
int64_t kA = C_to_A [k] ;
ASSERT (kA >= -1 && kA < A->nvec) ;
if (kA >= 0)
{
int64_t jA = A->h [kA] ;
ASSERT (j == jA) ;
}
}
else if (A->is_hyper)
{
// A is hypersparse, and Ch is a shallow copy of A->h
ASSERT (Ch == A->h) ;
}
// see if B (:,j) exists
if (C_to_B != NULL)
{
// B is hypersparse
ASSERT (B->is_hyper)
int64_t kB = C_to_B [k] ;
ASSERT (kB >= -1 && kB < B->nvec) ;
if (kB >= 0)
{
int64_t jB = B->h [kB] ;
ASSERT (j == jB) ;
}
}
else if (B->is_hyper)
{
// A is hypersparse, and Ch is a shallow copy of A->h
ASSERT (Ch == B->h) ;
}
// see if M (:,j) exists
if (Ch != NULL && M != NULL && Ch == M->h)
{
// Ch is the same as Mh
ASSERT (M != NULL) ;
ASSERT (M->is_hyper) ;
ASSERT (Ch != NULL && M->h != NULL && Ch [k] == M->h [k]) ;
ASSERT (C_to_M == NULL) ;
}
else if (C_to_M != NULL)
{
// M is present and hypersparse
ASSERT (M != NULL) ;
ASSERT (M->is_hyper) ;
int64_t kM = C_to_M [k] ;
ASSERT (kM >= -1 && kM < M->nvec) ;
if (kM >= 0)
{
int64_t jM = M->h [kM] ;
ASSERT (j == jM) ;
}
}
else
{
// M is not present, or in standard form
ASSERT (M == NULL || !(M->is_hyper)) ;
}
}
#endif
return (GrB_SUCCESS) ;
}
|
deflation_utils.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
//
//
#if !defined(KRATOS_DEFLATION_UTILS )
#define KRATOS_DEFLATION_UTILS
/* System includes */
#include "includes/define.h"
#include "includes/model_part.h"
#ifdef KRATOS_USE_AMATRIX // This macro definition is for the migration period and to be removed afterward please do not use it
#include "boost/numeric/ublas/matrix.hpp" // for the vector used here.
#else
#endif // KRATOS_USE_AMATRIX
/* External includes */
/* Project includes */
namespace Kratos
{
/**@name Kratos Globals */
/*@{ */
/*@} */
/**@name Type Definitions */
/*@{ */
/*@} */
/**@name Enum's */
/*@{ */
/*@} */
/**@name Functions */
/*@{ */
/*@} */
/**@name Kratos Classes */
/*@{ */
/** This class defines utility for aggregation of node clusters to be used in deflated solvers.
Detail class definition.
\URL[Example of use html]{ extended_documentation/no_ex_of_use.html}
\URL[Example of use pdf]{ extended_documentation/no_ex_of_use.pdf}
\URL[Example of use doc]{ extended_documentation/no_ex_of_use.doc}
\URL[Example of use ps]{ extended_documentation/no_ex_of_use.ps}
\URL[Extended documentation html]{ extended_documentation/no_ext_doc.html}
\URL[Extended documentation pdf]{ extended_documentation/no_ext_doc.pdf}
\URL[Extended documentation doc]{ extended_documentation/no_ext_doc.doc}
\URL[Extended documentation ps]{ extended_documentation/no_ext_doc.ps}
*/
class DeflationUtils
{
public:
/**@name Type Definitions */
/*@{ */
typedef boost::numeric::ublas::compressed_matrix<double> SparseMatrixType;
typedef boost::numeric::ublas::vector<double> SparseVectorType;
/*@} */
/**@name Life Cycle
*/
/*@{ */
/** Constructor.
*/
/** Destructor.
*/
/*@} */
/**@name Operators
*/
/*@{ */
///visualize aggregates. This function assumes that neighbours are calculated and builds the connectivity matrix
///then writes it to a nodal variable so that it can be used for visualizing it.
void VisualizeAggregates(ModelPart::NodesContainerType& rNodes, Variable<double>& rVariable, const int max_reduced_size)
{
SparseMatrixType A(rNodes.size(),rNodes.size());
SparseMatrixType Adeflated;
//first of all build the connectivty matrix
std::vector< std::vector<int> > index_list(rNodes.size());
std::size_t total_size = 0;
//renumber nodes consecutively
int new_id = 1;
for(ModelPart::NodesContainerType::iterator in = rNodes.begin(); in!=rNodes.end(); in++)
in->SetId(new_id++);
//constructing the system matrix row by row
std::size_t index_i;
for(ModelPart::NodesContainerType::iterator in = rNodes.begin();
in!=rNodes.end(); in++)
{
index_i = (in)->Id()-1;
WeakPointerVector< Node<3> >& neighb_nodes = in->GetValue(NEIGHBOUR_NODES);
std::vector<int>& indices = index_list[index_i];
indices.reserve(neighb_nodes.size()+1);
//filling the first neighbours list
indices.push_back(index_i);
for( WeakPointerVector< Node<3> >::iterator i = neighb_nodes.begin();
i != neighb_nodes.end(); i++)
{
int index_j = i->Id()-1;
indices.push_back(index_j);
}
//sorting the indices and elminating the duplicates
std::sort(indices.begin(),indices.end());
std::vector<int>::iterator new_end = std::unique(indices.begin(),indices.end());
indices.erase(new_end,indices.end());
total_size += indices.size();
}
A.reserve(total_size,false);
//setting to zero the matrix (and the diagonal matrix)
for(unsigned int i=0; i<A.size1(); i++)
{
std::vector<int>& indices = index_list[i];
for(unsigned int j=0; j<indices.size(); j++)
{
A.push_back(i,indices[j] , 0.00);
}
}
// std::cout << "matrix constructed" << std::endl;
//now call aggregation function to color the nodes
std::vector<int> w(rNodes.size());
ConstructW(max_reduced_size, A, w, Adeflated);
// std::cout << "aggregates constructed" << std::endl;
//finally write the color to the nodes so that it can be visualized
int counter = 0;
for(ModelPart::NodesContainerType::iterator in=rNodes.begin(); in!=rNodes.end(); in++)
{
in->FastGetSolutionStepValue(rVariable) = w[counter++];
}
// std::cout << "finished" << std::endl;
}
///this function constructs the structure of a smaller matrix using a technique taken from MIS aggregation
///the algorythm is taken from the pyamg lib
static void ConstructW(const std::size_t max_reduced_size, SparseMatrixType& rA, std::vector<int>& w, SparseMatrixType& deflatedA)
{
KRATOS_TRY
std::size_t full_size = rA.size1();
w.resize(full_size,0);
//call aggregation function to fill mw with "colors"
std::size_t reduced_size = standard_aggregation<int>(rA.size1(),rA.index1_data().begin(), rA.index2_data().begin(), &w[0]);
// for( int i=0; i<full_size; i++)
// std::cout << w[i] << std::endl;
// Non-zero structure of deflatedA
std::vector<std::set<std::size_t> > deflatedANZ(reduced_size);
// Loop over non-zero structure of A and build non-zero structure of deflatedA
SparseMatrixType::iterator1 a_iterator = rA.begin1();
for (std::size_t i = 0; i < full_size; i++)
{
#ifndef BOOST_UBLAS_NO_NESTED_CLASS_RELATION
for (SparseMatrixType::iterator2 row_iterator = a_iterator.begin() ;
row_iterator != a_iterator.end() ; ++row_iterator)
{
#else
for (typename SparseMatrixType::iterator2 row_iterator = begin(a_iterator,
boost::numeric::ublas::iterator1_tag());
row_iterator != end(a_iterator,
boost::numeric::ublas::iterator1_tag()); ++row_iterator )
{
#endif
deflatedANZ[w[a_iterator.index1()]].insert(w[row_iterator.index2()]);
}
a_iterator++;
}
// std::cout << "********** NZS built!" << std::endl;
// Count the number of non-zeros in deflatedA
std::size_t NZ = 0;
for (std::size_t i = 0; i < reduced_size; i++)
NZ += deflatedANZ[i].size();
// std::cout << "********** NZ = " << NZ << std::endl;
// KRATOS_WATCH(reduced_size);
deflatedA = SparseMatrixType(reduced_size, reduced_size,NZ);
// Insert the non-zero structure into deflatedA
for(std::size_t i = 0 ; i < reduced_size ; i++)
{
for(std::set<std::size_t>::iterator j = deflatedANZ[i].begin() ; j != deflatedANZ[i].end() ; j++)
{
deflatedA.push_back(i,*j, 0.00);
}
}
// KRATOS_WATCH(__LINE__)
if(reduced_size > max_reduced_size)
{
SparseMatrixType Areduced;
std::vector<int> wsmaller;
// KRATOS_WATCH(__LINE__)
//need to reduce further!! - do it recursively
ConstructW(max_reduced_size, deflatedA, wsmaller, Areduced);
// KRATOS_WATCH(__LINE__)
//now change deflatedA and w on the coarser size
for(unsigned int i=0; i<full_size; i++)
{
int color = w[i];
int new_color = wsmaller[color];
w[i] = new_color;
}
deflatedA.clear();
deflatedA = Areduced;
// KRATOS_WATCH(__LINE__)
reduced_size = wsmaller.size();
}
// KRATOS_WATCH(reduced_size);
// std::cout << "reduction factor ="<<double(full_size)/double(reduced_size)<<std::endl;
KRATOS_CATCH("")
}
///block version of ConstructW. To be used when multiple DOFS are associated to the same node.
static void ConstructW(const int max_reduced_size, SparseMatrixType& rA, std::vector<int>& w, SparseMatrixType& deflatedA, const std::size_t block_size)
{
if(block_size == 1)
{
ConstructW(max_reduced_size,rA, w, deflatedA);
}
else
{
//simple checks to verify blocks are effectively respected
if(rA.size1()%block_size != 0 || rA.size2()%block_size != 0)
KRATOS_THROW_ERROR(std::logic_error,"the number of rows is not a multiple of block_size. Can not use the block deflation","")
if(rA.nnz()%block_size != 0)
KRATOS_THROW_ERROR(std::logic_error,"the number of non zeros is not a multiple of block_size. Can not use the block deflation","")
//construct Ascalar
SparseMatrixType Ascalar;
ConstructScalarMatrix(rA.size1(),block_size,rA.index1_data().begin(), rA.index2_data().begin(), Ascalar);
//deflate it using the standard methods
SparseMatrixType deflatedAscalar;
std::vector<int> wscalar;
ConstructW(max_reduced_size/block_size,Ascalar, wscalar, deflatedAscalar);
//compute w for the block structured problem
std::vector<int> w(wscalar.size()*block_size);
for(std::size_t i=0; i<wscalar.size(); i++)
{
for(std::size_t j=0; j<block_size; j++)
{
w[i*block_size + j] = wscalar[i]*block_size+j;
}
}
//compute deflatedA
SparseMatrixType deflatedA(deflatedAscalar.size1()*block_size,deflatedAscalar.size2()*block_size);
//do reserve!!
deflatedA.reserve(deflatedAscalar.nnz()*block_size*block_size);
ExpandScalarMatrix(rA.size1(),block_size,rA.index1_data().begin(), rA.index2_data().begin(), deflatedA);
}
}
//W is the deflation matrix, stored as a single vector of indices
//y is a vector of "full" size
//x is a vector of reduced size
//y = W*x;
static void ApplyW(const std::vector<int>& w, const SparseVectorType& x, SparseVectorType& y)
{
#pragma omp parallel for
for(int i=0; i<static_cast<int>(w.size()); i++)
{
y[i] = x[w[i]];
}
}
//W is the deflation matrix, stored as a single vector of indices
//y is a vector of "reduced" size
//s is a vector of "full" size
//y = Wtranspose*x;
static void ApplyWtranspose(const std::vector<int>& w, const SparseVectorType& x, SparseVectorType& y)
{
//first set to zero the destination vector
#pragma omp parallel for
for(int i=0; i<static_cast<int>(y.size()); i++)
y[i] = 0.0;
//Pragma atomic does not work with MSVC 19.0.23506 ( aka Visual Studio 2015 Update1 )
#if(_MSC_FULL_VER == 190023506)
for(int i=0; i<static_cast<int>(w.size()); i++)
{
y[w[i]] += x[i];
}
#else
//now apply the Wtranspose
#pragma omp parallel for
for(int i=0; i<static_cast<int>(w.size()); i++)
{
#pragma omp atomic
y[w[i]] += x[i];
}
#endif
}
//*******************************************************************************
//*******************************************************************************
static void FillDeflatedMatrix( const SparseMatrixType& rA, std::vector<int>& w, SparseMatrixType& Ah)
{
KRATOS_TRY
double* abegin = Ah.value_data().begin();
int size = Ah.value_data().size();
#pragma omp parallel for
for (int i = 0; i < size; i++)
{
*(abegin+i) = 0.0;
}
// TSparseSpaceType::SetToZero(Ah);
// Now building Ah
SparseMatrixType::const_iterator1 a_iterator = rA.begin1();
int full_size = rA.size1();
for (int i = 0; i < full_size; i++)
{
#ifndef BOOST_UBLAS_NO_NESTED_CLASS_RELATION
for (SparseMatrixType::const_iterator2 row_iterator = a_iterator.begin() ;
row_iterator != a_iterator.end() ; ++row_iterator)
{
#else
for (typename SparseMatrixType::iterator2 row_iterator = begin(a_iterator,
boost::numeric::ublas::iterator1_tag());
row_iterator != end(a_iterator,
boost::numeric::ublas::iterator1_tag()); ++row_iterator )
{
#endif
Ah(w[a_iterator.index1()], w[row_iterator.index2()]) += *row_iterator;
}
a_iterator++;
}
// std::cout << "********** W^T * A * W built!" << std::endl;
KRATOS_CATCH("");
}
/*@} */
/**@name Operations */
/*@{ */
/*@} */
/**@name Acces */
/*@{ */
/*@} */
/**@name Inquiry */
/*@{ */
/*@} */
/**@name Friends */
/*@{ */
/*@} */
private:
/**@name Static Member Variables */
/*@{ */
/*@} */
/**@name Member Variables */
/*@{ */
/*@} */
/**@name Private Operators*/
/*@{ */
/*
* Compute aggregates for a matrix A stored in CSR format
*
* Parameters:
* n_row - number of rows in A
* Ap[n_row + 1] - CSR row pointer
* Aj[nnz] - CSR column indices
* x[n_row] - aggregate numbers for each node
*
* Returns:
* The number of aggregates (== max(x[:]) + 1 )
*
* Notes:
* It is assumed that A is structurally symmetric.
* A may contain diagonal entries (self loops)
* Unaggregated nodes are marked with a -1
*
*/
template <class I>
static I standard_aggregation(const I n_row,
const std::size_t Ap[],
const std::size_t Aj[],
I x[])
{
// Bj[n] == -1 means i-th node has not been aggregated
std::fill(x, x + n_row, 0);
I next_aggregate = 1; // number of aggregates + 1
//Pass #1
for(I i = 0; i < n_row; i++)
{
if(x[i])
{
continue; //already marked
}
const I row_start = Ap[i];
const I row_end = Ap[i+1];
//Determine whether all neighbors of this node are free (not already aggregates)
bool has_aggregated_neighbors = false;
bool has_neighbors = false;
for(I jj = row_start; jj < row_end; jj++)
{
const I j = Aj[jj];
if( i != j )
{
has_neighbors = true;
if( x[j] )
{
has_aggregated_neighbors = true;
break;
}
}
}
if(!has_neighbors)
{
//isolated node, do not aggregate
x[i] = -n_row;
}
else if (!has_aggregated_neighbors)
{
//Make an aggregate out of this node and its neighbors
x[i] = next_aggregate;
for(I jj = row_start; jj < row_end; jj++)
{
x[Aj[jj]] = next_aggregate;
}
next_aggregate++;
}
}
//Pass #2
// Add unaggregated nodes to any neighboring aggregate
for(I i = 0; i < n_row; i++)
{
if(x[i])
{
continue; //already marked
}
for(I jj = static_cast<I>(Ap[i]); jj < static_cast<I>(Ap[i+1]); jj++)
{
const I j = Aj[jj];
const I xj = x[j];
if(xj > 0)
{
x[i] = -xj;
break;
}
}
}
next_aggregate--;
//Pass #3
for(I i = 0; i < n_row; i++)
{
const I xi = x[i];
if(xi != 0)
{
// node i has been aggregated
if(xi > 0)
x[i] = xi - 1;
else if(xi == -n_row)
x[i] = -1;
else
x[i] = -xi - 1;
continue;
}
// node i has not been aggregated
const I row_start = Ap[i];
const I row_end = Ap[i+1];
x[i] = next_aggregate;
for(I jj = row_start; jj < row_end; jj++)
{
const I j = Aj[jj];
if(x[j] == 0) //unmarked neighbors
{
x[j] = next_aggregate;
}
}
next_aggregate++;
}
return next_aggregate; //number of aggregates
}
static void ConstructScalarMatrix(const std::size_t n_row, const std::size_t block_size,
const std::size_t Ap[],
const std::size_t Aj[],
SparseMatrixType& Ascalar
)
{
Ascalar.resize(n_row/block_size,n_row/block_size,0.0);
std::size_t scalar_size = (Ap[n_row]-Ap[0])/(block_size*block_size);
Ascalar.reserve(scalar_size);
for(std::size_t i = 0; i < n_row; i++)
{
if(i%block_size == 0)
{
std::size_t iscalar = i/block_size;
const std::size_t row_start = Ap[i];
const std::size_t row_end = Ap[i+1];
for(std::size_t jj = row_start; jj < row_end; jj++)
{
const std::size_t j = Aj[jj];
if(j%block_size == 0)
{
std::size_t jscalar = j/block_size;
Ascalar.push_back(iscalar,jscalar,0.0);
}
}
}
}
}
static void ExpandScalarMatrix(const std::size_t n_row, const std::size_t block_size,
const std::size_t Ap[],
const std::size_t Aj[],
SparseMatrixType& Aexpanded
)
{
Aexpanded.resize(n_row*block_size,n_row*block_size,0.0);
std::size_t expanded_size = (Ap[n_row]-Ap[0])*block_size*block_size;
Aexpanded.reserve(expanded_size);
for(std::size_t i = 0; i < n_row; i++)
{
const std::size_t row_start = Ap[i];
const std::size_t row_end = Ap[i+1];
for(std::size_t isub=0; isub<block_size; isub++)
{
std::size_t iexpanded = i*block_size + isub;
for(std::size_t jj = row_start; jj < row_end; jj++)
{
const std::size_t j = Aj[jj];
for(std::size_t jsub=0; jsub<block_size; jsub++)
{
std::size_t jexpanded = j*block_size+jsub;
Aexpanded.push_back(iexpanded,jexpanded,0.0);
}
}
}
}
}
/*@} */
/**@name Private Operations*/
/*@{ */
/*@} */
/**@name Private Acces */
/*@{ */
/*@} */
/**@name Private Inquiry */
/*@{ */
/*@} */
/**@name Un accessible methods */
/*@{ */
/*@} */
}; /* Class ClassName */
/*@} */
/**@name Type Definitions */
/*@{ */
/*@} */
} /* namespace Kratos.*/
#endif /* DEFLATION_UTILS defined */
|
convolution_omp.c | //
// convolution.c
//
// Base code created by Josep Lluis Lerida on 11/03/15.
// Base code updated by Vitor da Silva on 17/04/2021
//
// Created by Josep Lluis Lerida on 11/03/15.
// Updated by Vitor da Silva on 17/04/2021
// OMP solution implemented by Albert Pérez & Francesc Contreras 17/05/2021
//
// This program allows you to apply the convolution to an image file with a * .ppm extension.
// The program receives the file with the source image, the file with the kernel for the convolution and the path of the output file.
// The 2D matrix of the image is represented by a 1D vector for each R, G and B channel. The convolution is applied for each channel separately.
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <stdlib.h>
#include <sys/time.h>
#include <time.h>
#include <omp.h>
#define MAX_THREADS 8
// Structure for storing the content of an image.
struct imagenppm{
int height;
int width;
char *comment;
int maxcolor;
int P;
int *R;
int *G;
int *B;
};
typedef struct imagenppm* ImagenData;
// Structure for storing the contents of a kernel.
struct structkernel{
int kernelX;
int kernelY;
float *vkern;
};
typedef struct structkernel* kernelData;
// Definition of the main functions: reading an image, duplicating the image, reading the kernel, convolution, saving an image in PPM format, etc.
ImagenData readImage(char* name);
kernelData readKernel(char* name);
ImagenData duplicateImageData(ImagenData src);
int convolve2D(int*, int*, int, int, float*, int, int);
int saveFile(ImagenData Img, char* name);
// This Function allows us to read a ppm file and place the information: encoding, size, RGB, etc. of the image in a structure.
// The value of each RGB pixel in the 2D image is saved and represented by 1D vectors for each channel.
ImagenData readImage(char* name){
FILE *fp;
char c;
char comment[300];
int i=0;
ImagenData Img=NULL;
// The ppm file is opened
fp=fopen(name,"r");
if(!fp){
perror("Error");
}
else{
// We reserve memory for the Image structure
Img=(ImagenData) malloc(sizeof(struct imagenppm));
//The magic number is read and saved
fscanf(fp,"%c%d ",&c,&(Img->P));
// The comment is read and saved character by character
while((c=fgetc(fp))!= '\n'){comment[i]=c;i++;}
Img->comment = calloc(strlen(comment),sizeof(char));
strcpy(Img->comment,comment);
// Read and save the width, height and maximum color
fscanf(fp,"%d %d %d",&Img->width,&Img->height,&Img->maxcolor);
// Memory is reserved in R, G and B according to width and height
// And the values of R, G and B of the file are assigned
if ((Img->R=calloc(Img->width*Img->height,sizeof(int))) == NULL) {return NULL;}
if ((Img->G=calloc(Img->width*Img->height,sizeof(int))) == NULL) {return NULL;}
if ((Img->B=calloc(Img->width*Img->height,sizeof(int))) == NULL) {return NULL;}
for(i=0;i<Img->width*Img->height;i++){
fscanf(fp,"%d %d %d ",&Img->R[i],&Img->G[i],&Img->B[i]);
}
fclose(fp);
}
return Img;
}
// This function allows us to read a kernel from a file, the kernel is represented by a 1D vector.
kernelData readKernel(char* name){
FILE *fp;
int i=0;
kernelData kern=NULL;
//Opening ppm file
fp=fopen(name,"r");
if(!fp){
perror("Error: ");
}
else{
//We reserve memory for the structure that the kernel will store
kern=(kernelData) malloc(sizeof(struct structkernel));
// We read the dimensions of the kernel.
fscanf(fp,"%d,%d,", &kern->kernelX, &kern->kernelY);
kern->vkern = (float *)malloc(kern->kernelX*kern->kernelY*sizeof(float));
//kernel reading
for (i=0;i<(kern->kernelX*kern->kernelY)-1;i++){
fscanf(fp,"%f,",&kern->vkern[i]);
}
fscanf(fp,"%f",&kern->vkern[i]);
fclose(fp);
}
return kern;
}
// This function allows you to copy the main data from the original image data structure into a second structure.
ImagenData duplicateImageData(ImagenData src){
char c;
char comment[300];
unsigned int imageX, imageY;
int i=0;
// We reserve memory for the target Image structure
ImagenData dst=(ImagenData) malloc(sizeof(struct imagenppm));
//Magic number is copied
dst->P=src->P;
// Comment is copied
dst->comment = calloc(strlen(src->comment),sizeof(char));
strcpy(dst->comment,src->comment);
// Width, height and maximum color are copied
imageX=src->width;
imageY=src->height;
dst->width=imageX;
dst->height=imageY;
dst->maxcolor=src->maxcolor;
// Memory is reserved in R, G and B according to width and height
if ((dst->R=calloc(imageX*imageY,sizeof(int))) == NULL) {return NULL;}
if ((dst->G=calloc(imageX*imageY,sizeof(int))) == NULL) {return NULL;}
if ((dst->B=calloc(imageX*imageY,sizeof(int))) == NULL) {return NULL;}
memcpy(dst->R, src->R, (imageX*imageY)*sizeof(int));
memcpy(dst->G, src->G, (imageX*imageY)*sizeof(int));
memcpy(dst->B, src->B, (imageX*imageY)*sizeof(int));
return dst;
}
// This function stores the new Image data in a ppm file.
int saveFile(ImagenData Img, char* name){
int i,j;
FILE *fp;
// Resulting image is created
if (!(fp=fopen(name,"w"))) {
printf("Error opening the result file: %s\n",name);
return -1;
}
//The magic number, comment, width, height and maximum color are written to the file
fprintf(fp,"P%d\n%s\n%d %d\n%d\n",Img->P,Img->comment,Img->width,Img->height,Img->maxcolor);
//Pixels are written
for(i=0;i<Img->width*Img->height;i++){
fprintf(fp,"%d %d %d ",Img->R[i],Img->G[i],Img->B[i]);
if (i%Img->height==0) fprintf(fp,"\n");
}
fclose(fp);
return 0;
}
///////////////////////////////////////////////////////////////////////////////
// 2D convolution
// 2D data are usually stored in computer memory as contiguous 1D array.
// So, we are using 1D array for 2D data.
// 2D convolution assumes the kernel is center originated, which means, if
// kernel size 3 then, k[-1], k[0], k[1]. The middle of index is always 0.
// The following programming logics are somewhat complicated because of using
// pointer indexing in order to minimize the number of multiplications.
//
//
// signed integer (32bit) version:
///////////////////////////////////////////////////////////////////////////////
int convolve2D(int* in, int* out, int dataSizeX, int dataSizeY,
float* kernel, int kernelSizeX, int kernelSizeY)
{
int i, j, m, n;
int *inPtr, *inPtr2, *outPtr;
float *kPtr;
int kCenterX, kCenterY;
int rowMin, rowMax; // to check boundary of input array
int colMin, colMax; //
float sum; // temp accumulation buffer
// check validity of params
if(!in || !out || !kernel) return -1;
if(dataSizeX <= 0 || kernelSizeX <= 0) return -1;
// find center position of kernel (half of kernel size)
kCenterX = (int)kernelSizeX / 2;
kCenterY = (int)kernelSizeY / 2;
// init working pointers
inPtr = inPtr2 = &in[dataSizeX * kCenterY + kCenterX]; // note that it is shifted (kCenterX, kCenterY),
outPtr = out;
kPtr = kernel;
// start convolution
#pragma omp parallel for schedule(dynamic) num_threads(MAX_THREADS) private(sum, i, rowMax, rowMin, j, m, n, colMax, colMin) firstprivate(kPtr, inPtr, inPtr2, outPtr)
for(i= 0; i < dataSizeY; ++i) // number of rows
{
// compute the range of convolution, the current row of kernel should be between these
rowMax = i + kCenterY;
rowMin = i - dataSizeY + kCenterY;
for(j = 0; j < dataSizeX; ++j) // number of columns
{
// compute the range of convolution, the current column of kernel should be between these
colMax = j + kCenterX;
colMin = j - dataSizeX + kCenterX;
sum = 0; // set to 0 before accumulate
// flip the kernel and traverse all the kernel values
// multiply each kernel value with underlying input data
for(m = 0; m < kernelSizeY; ++m) // kernel rows
{
// check if the index is out of bound of input array
if(m <= rowMax && m > rowMin)
{
for(n = 0; n < kernelSizeX; ++n)
{
// check the boundary of array
if(n <= colMax && n > colMin)
sum += *(inPtr - n) * *kPtr;
++kPtr; // next kernel
}
}
else
kPtr += kernelSizeX; // out of bound, move to next row of kernel
inPtr -= dataSizeX; // move input data 1 raw up
}
// convert integer number
if(sum >= 0) *outPtr = (int)(sum + 0.5f);
//else *outPtr = (int)(sum - 0.5f)*(-1);
// For using with image editors like GIMP or others...
else *outPtr = (int)(sum - 0.5f);
// For using with a text editor that read ppm images like libreoffice or others...
// else *outPtr = 0;
kPtr = kernel; // reset kernel to (0,0)
inPtr = ++inPtr2; // next input
++outPtr; // next output
}
}
return 0;
}
int main(int argc, char **argv)
{
int i=0,j=0,k=0;
if(argc != 4)
{
printf("Usage: %s <image-file> <kernel-file> <result-file>\n", argv[0]);
printf("\n\nError, missing parameters:\n");
printf("format: image_file kernel_file result_file\n");
printf("- image_file : source image path (*.ppm)\n");
printf("- kernel_file: kernel path (text file with 1D kernel matrix)\n");
printf("- result_file: result image path (*.ppm)\n\n");
return -1;
}
struct timeval tim;
gettimeofday(&tim, NULL);
double t1=tim.tv_sec+(tim.tv_usec/1000000.0);
//Read the source image.
ImagenData source=NULL, output=NULL;
if ( (source=readImage(argv[1]))==NULL) {
return -1;
}
gettimeofday(&tim, NULL);
double t2=tim.tv_sec+(tim.tv_usec/1000000.0);
// Duplicate the image in a new structure that will contain the output image
if ( (output=duplicateImageData(source)) == NULL) {
return -1;
}
gettimeofday(&tim, NULL);
double t3=tim.tv_sec+(tim.tv_usec/1000000.0);
//Kernel reading
kernelData kern=NULL;
if ( (kern = readKernel(argv[2]))==NULL) {
free(source);
free(output);
return -1;
}
gettimeofday(&tim, NULL);
double t4=tim.tv_sec+(tim.tv_usec/1000000.0);
#pragma omp parallel sections num_threads(MAX_THREADS) default(none) shared(source,output,kern)
{
#pragma omp section
convolve2D(source->R, output->R, source->width, source->height, kern->vkern, kern->kernelX, kern->kernelY);
#pragma omp section
convolve2D(source->G, output->G, source->width, source->height, kern->vkern, kern->kernelX, kern->kernelY);
#pragma omp section
convolve2D(source->B, output->B, source->width, source->height, kern->vkern, kern->kernelX, kern->kernelY);
}
gettimeofday(&tim, NULL);
double t5=tim.tv_sec+(tim.tv_usec/1000000.0);
// Image writing
if (saveFile(output, argv[3])!=0) {
printf("Error saving the image\n");
free(source);
free(output);
return -1;
}
gettimeofday(&tim, NULL);
double t6=tim.tv_sec+(tim.tv_usec/1000000.0);
clock_t finish=clock();
printf("Image: %s\n", argv[1]);
printf("SizeX : %d\n", source->width);
printf("SizeY : %d\n", source->height);
printf("%.6lf seconds elapsed for Reading image file.\n", t2-t1);
printf("%.6lf seconds elapsed for copying image structure.\n", t3-t2);
printf("%.6lf seconds elapsed for Reading kernel matrix.\n", t4-t3);
printf("%.6lf seconds elapsed for make the convolution.\n", t5-t4);
printf("%.6lf seconds elapsed for writing the resulting image.\n", t6-t5);
printf("%.6lf seconds elapsed\n", t6-t1);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.