source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
GB_unop__abs_int32_int32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__abs_int32_int32)
// op(A') function: GB (_unop_tran__abs_int32_int32)
// C type: int32_t
// A type: int32_t
// cast: int32_t cij = aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CAST(z, aij) \
int32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = aij ; \
Cx [pC] = GB_IABS (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__abs_int32_int32)
(
int32_t *Cx, // Cx and Ax may be aliased
const int32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
int32_t z = aij ;
Cx [p] = GB_IABS (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int32_t aij = Ax [p] ;
int32_t z = aij ;
Cx [p] = GB_IABS (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__abs_int32_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mbf.c | //////////////////////////////////////
// Cunren Liang, NASA JPL/Caltech
// Copyright 2017
//////////////////////////////////////
#include "resamp.h"
#include <fftw3.h>
#include <omp.h>
#define SWAP4(a) (*(unsigned int *)&(a) = (((*(unsigned int *)&(a) & 0x000000ff) << 24) | ((*(unsigned int *)&(a) & 0x0000ff00) << 8) | ((*(unsigned int *)&(a) >> 8) & 0x0000ff00) | ((*(unsigned int *)&(a) >> 24) & 0x000000ff)))
int mbf(char *inputfile, char *outputfile, int nrg, int naz, float prf, float prf_frac, float nb, float nbg, float nboff, float bsl, float *kacoeff, float *dopcoeff1, float *dopcoeff2, int byteorder, long imageoffset, long lineoffset){
/*
inputfile: input file
outputfile: output file
nrg: file width
naz: file length
prf: PRF
prf_frac: fraction of PRF processed
(represents azimuth bandwidth)
nb: number of lines in a burst
(float, in terms of 1/PRF)
nbg: number of lines in a burst gap
(float, in terms of 1/PRF)
nboff: number of unsynchronized lines in a burst
(float, in terms of 1/PRF, with sign, see burst_sync.py for rules of sign)
(the image to be processed is always considered to be master)
bsl: start line number of a burst
(float, the line number of the first line of the full-aperture SLC is zero)
(no need to be first burst, any one is OK)
kacoeff[0-3]: FM rate coefficients
(four coefficients of a third order polynomial with regard to)
(range sample number. range sample number starts with zero)
dopcoeff1[0-3]: Doppler centroid frequency coefficients of this image
(four coefficients of a third order polynomial with regard to)
(range sample number. range sample number starts with zero)
dopcoeff2[0-3]: Doppler centroid frequency coefficients of the other image
(four coefficients of a third order polynomial with regard to)
(range sample number. range sample number starts with zero)
byteorder: (0) LSB, little endian; (1) MSB, big endian of intput file
imageoffset: offset from start of the image of input file
lineoffset: length of each line of input file
*/
FILE *infp;
FILE *outfp;
fcomplex **in; //data read in
fcomplex *out; //data written to output file
fcomplex *filter; //multi-band bandpass filter
fcomplex *filter_j;
fcomplex *deramp; //deramp signal
fcomplex *reramp; //reramp signal
fcomplex *data; //data to be filtered.
//int nrg; //file width
//int naz; //file length
//float prf; //assume prf are the same
//float prf_frac; // azimuth processed bandwidth = prf_frac * prf
//float nb; //burst length in terms of pri. number of lines
//float nbg; //burst gap length in terms of pri. number of lines
float nbc; //burst cycle length in terms of pri. number of lines
//float nboff; //number of unsynchronized lines in a burst with sign
//see burst_sync.py for rules of sign.
//the image to be processed is always considered to be master
//and the other image is always considered to be slave
//float bsl; //burst start line, input float
//float kacoeff[3]; //FM rate along range (experessed by quadratic polynomial
//as a function of range sample number)
//float dopcoeff1[4]; //doppler centroid frequency along range (expressed by quadratic polynomial
//as a function of range sample number). this image
//float dopcoeff2[4]; //doppler centroid frequency along range (expressed by quadratic polynomial
//as a function of range sample number). the other image
//ATTENTION: MAKE RANGE NUMBER THE SAME ACCORDING RANGE OFFSET!!!
float pri; // 1.0/prf
float *ka;
float *dop1;
float *dop2;
float *nfa; //full aperture length in terms of pri. number of lines
float *freqs; //burst starting doppler centroid frequency
float *freqe; //burst ending doppler centroid frequency
float *bis; //burst imaged area start line numbers
float *bie; //burst imaged area ending line numbers
float *bic; //burst imaged area center line number, corresponding to the center of raw burst,
//rather than the actual center of imaged area
float *bica; //burst imaged area center line number, corresponding to the actual center of imaged area
float deramp_center; //line number where center frequency is zero Hz after deramping
float bis_min;
float bis_max;
float bie_min;
float bie_max;
int bis_out; //starting line number of the data block written out
int bie_out; //ending line number of the data block written out
int bis_in; //start line number of the data block read in
int bie_in; //ending line number of the data block read in
int bis_out2; //starting line number of the data block written out
int bie_out2; //ending line number of the data block written out
int bis_in2; //start line number of the data block read in
int bie_in2; //ending line number of the data block read in
float nb_new;
float nbg_new;
float nbc_new;
float bsl_new;
int nbc_new_int;
int nburst_new; //number of bursts in a burst cycle
float bfw; //bandwidth of burst in Hz
float bfc; //center frequency of burst in Hz
int nfft; //fft length
int nfilter; //filter length, MUST BE ODD
int hnfilter; //half filter length
int edgl; //number of lines on the starting and ending edges
float beta; //kaiser window beta
float sc; //constant to scale the data read in to avoid large values
//during fft and ifft
int edgl_flag; //flag to indicate how many lines to keep on the starting and ending edges
//0: do not remove data on the edges
//1: remove data less than half convolution
//2: remove all data of incomplete convolution
int deramp_center_flag; //flag to indicate the location with zero center frequency after
//deramping
//0: center (raw burst center) of the burst whose ending/start line number is used
//1: center of the burst cycle being processed
//2: center (raw burst center) of the center burst in the burst cycle being processed
float tmp1, tmp2, tmp3;
int i, j, k;
fftwf_plan p_forward;
fftwf_plan p_backward;
fftwf_plan p_forward_filter;
/*****************************************************************************/
//I just put these parametes which can be set here. These can also be set via
//arguments before running the programs if modifying the code to accept these
//arguments.
beta = 1.0;
nfilter = 257; //MUST BE ODD
sc = 10000.0;
edgl_flag = 0;
deramp_center_flag = 0;
/*****************************************************************************/
//open files
infp = openfile(inputfile, "rb");
outfp = openfile(outputfile, "wb");
printf("\n\ninput parameters:\n");
printf("input file: %s\n", inputfile);
printf("output file: %s\n", outputfile);
printf("nrg: %d\n", nrg);
printf("prf: %f\n", prf);
printf("prf_frac: %f\n", prf_frac);
printf("nb: %f\n", nb);
printf("nbg: %f\n", nbg);
printf("nboff: %f\n", nboff);
printf("bsl: %f\n", bsl);
printf("kacoeff: %f, %f, %f, %f\n", kacoeff[0], kacoeff[1], kacoeff[2], kacoeff[3]);
printf("dopcoeff1: %f, %f, %f, %f\n", dopcoeff1[0], dopcoeff1[1], dopcoeff1[2], dopcoeff1[3]);
printf("dopcoeff2: %f, %f, %f, %f\n", dopcoeff2[0], dopcoeff2[1], dopcoeff2[2], dopcoeff2[3]);
if(byteorder == 0){
printf("inputfile byte order: little endian\n");
}
else{
printf("inputfile byte order: big endian\n");
}
printf("input file image offset [byte]: %ld\n", imageoffset);
printf("input file line offset [byte]: %ld\n", lineoffset);
if(imageoffset < 0){
fprintf(stderr, "image offset must be >= 0\n");
exit(1);
}
if(lineoffset < 0){
fprintf(stderr, "lineoffset offset must be >= 0\n");
exit(1);
}
if(nfilter % 2 != 1){
fprintf(stderr, "filter length must be odd!\n");
exit(1);
}
//naz = file_length(infp, nrg, sizeof(fcomplex));
//fseeko(infp,0L,SEEK_END);
//naz = (ftello(infp) - imageoffset) / (lineoffset + nrg*sizeof(fcomplex));
//rewind(infp);
printf("file width: %d, file length: %d\n\n", nrg, naz);
ka = array1d_float(nrg);
dop1 = array1d_float(nrg);
dop2 = array1d_float(nrg);
nfa = array1d_float(nrg);
freqs = array1d_float(nrg);
freqe = array1d_float(nrg);
bis = array1d_float(nrg);
bie = array1d_float(nrg);
bic = array1d_float(nrg);
bica = array1d_float(nrg);
in = array2d_fcomplex(naz, nrg);
out = array1d_fcomplex(naz);
pri = 1.0/prf;
nbc = nb + nbg;
hnfilter = (nfilter - 1) / 2;
//find burst starting line closest to first line and after first line
for(i = -100000; i < 100000; i++){
tmp1 = bsl + (nb + nbg) * i;
if(tmp1 >= 0){
bsl = tmp1;
break;
}
}
//calculate something
for(i = 0; i < nrg; i++){
//azimuth FM rate. we follow the convention ka > 0
ka[i] = kacoeff[3] * i * i * i + kacoeff[2] * i * i + kacoeff[1] * i + kacoeff[0];
ka[i] = -ka[i];
//doppler centroid frequency
dop1[i] = dopcoeff1[0] + dopcoeff1[1] * i + dopcoeff1[2] * i * i + dopcoeff1[3] * i * i * i;
//dop1[i] *= prf;
dop2[i] = dopcoeff2[0] + dopcoeff2[1] * i + dopcoeff2[2] * i * i + dopcoeff2[3] * i * i * i;
//dop2[i] *= prf;
//full aperture length
nfa[i] = prf * prf_frac / ka[i] / pri;
//consider burst synchronization
//these are the same for all columns
if(fabs(nboff) >= 0.8 * nb){
fprintf(stderr, "burst synchronization is too small!\n\n");
exit(1);
}
if(nboff < 0){
bsl_new = bsl - nboff;
}
else{
bsl_new = bsl;
}
nb_new = nb - fabs(nboff);
nbg_new = nbg + fabs(nboff);
nbc_new = nbc;
nbc_new_int = (int)(nbc_new + 0.5);
//starting and ending doppler centroid frequency of the burst
//if the overall doppler centroid frequency = 0
freqs[i] = -(prf * prf_frac - nb_new * pri * ka[i]) / 2.0;
freqe[i] = (prf * prf_frac - nb_new * pri * ka[i]) / 2.0;
//consider doppler centroid frequency
freqs[i] += dop1[i];
freqe[i] += dop1[i];
//consider doppler centroid frequency of the other image
tmp1 = dop2[i] - dop1[i];
if(tmp1 > 0){
freqs[i] += tmp1;
}
else{
freqe[i] += tmp1;
}
//check if doppler centroid frequency difference too big
if(freqe[i] - freqs[i] < nbc_new * pri * ka[i]){
fprintf(stderr, "Doppler centroid frequency difference too large!\n\n");
exit(1);
}
//starting and ending index of imaged area by the burst
bic[i] = bsl_new + (nb_new - 1.0) / 2.0; //this should be the same for all columns
bis[i] = freqs[i] / ka[i] / pri + bic[i];
bie[i] = freqe[i] / ka[i] / pri + bic[i];
bica[i] = (bis[i] + bie[i]) / 2.0;
}
//find the max and min of starting and ending index
bis_min = bis[0];
bis_max = bis[0];
bie_min = bie[0];
bie_max = bie[0];
for(i = 0; i < nrg; i++){
if(bis[i] < bis_min){
bis_min = bis[i];
}
if(bis[i] > bis_max){
bis_max = bis[i];
}
if(bie[i] < bie_min){
bie_min = bie[i];
}
if(bie[i] > bie_max){
bie_max = bie[i];
}
}
///////////////////////////////////////////////////////////////////////////////////////
//This section is for reading data
printf("reading data...\n");
//skip image header
fseek(infp, imageoffset, SEEK_SET);
for(i = 0; i < naz; i++){
if(i!=0)
fseek(infp, lineoffset-(size_t)nrg*sizeof(fcomplex), SEEK_CUR);
readdata((fcomplex *)in[i], (size_t)nrg * sizeof(fcomplex), infp);
}
//read image data
//if(lineoffset == 0){
// readdata((fcomplex *)in[0], (size_t)naz * (size_t)nrg * sizeof(fcomplex), infp);
//}
//else{
// for(i = 0; i < naz; i++){
// fseek(infp, lineoffset, SEEK_CUR);
// readdata((fcomplex *)in[i], (size_t)nrg * sizeof(fcomplex), infp);
// }
//}
//swap bytes
if(byteorder!=0){
printf("swapping bytes...\n");
for(i = 0; i < naz; i++)
for(j = 0; j < nrg; j++){
SWAP4(in[i][j].re);
SWAP4(in[i][j].im);
}
}
int debug=0;
if(debug){
printf("%f, %f\n", in[0][0].re, in[0][0].im);
printf("%f, %f\n", in[100][100].re, in[100][100].im);
printf("%f, %f\n", in[naz-1][nrg-1].re, in[naz-1][nrg-1].im);
}
///////////////////////////////////////////////////////////////////////////////////////
//initialize output data
//for(j = 0; j < naz; j++){
// for(k = 0; k < nrg; k++){
// out[j][k].re = 0.0;
// out[j][k].im = 0.0;
// }
//}
printf("filtering image...\n");
for(i = 0; i < nrg; i++){
if((i + 1) % 100 == 0 || (i+1) == nrg)
fprintf(stderr,"processing: %6d of %6d\r", i+1, nrg);
if((i+1) == nrg)
fprintf(stderr,"\n");
//initialize output data
memset((void *)out, 0, (size_t)naz*sizeof(fcomplex));
//initialize start and ending line number
if(dop1[i] > dop2[i]){
bis_out = roundfi(bie[i]) + 1;
//bie_out = roundfi(bie[i]) + 1 + (nbc_new - 1);
//changed to use nbc_new_int. 27-JAN-2015
bie_out = roundfi(bie[i]) + 1 + (nbc_new_int - 1);
}
else{
bis_out = roundfi(bis[i]);
//bie_out = roundfi(bis[i]) + (nbc_new - 1);
//changed to use nbc_new_int. 27-JAN-2015
bie_out = roundfi(bis[i]) + (nbc_new_int - 1);
}
//consider the filter length
bis_in = bis_out - (nfilter - 1) / 2;
bie_in = bie_out + (nfilter - 1) / 2;
//to make circular convolution equivalent to linear convolution
nfft = next_pow2(bie_in - bis_in + 1 + nfilter - 1);
//initialize filter
filter = array1d_fcomplex(nfft);
filter_j = array1d_fcomplex(nfft);
//create plans before initializing data, because FFTW_MEASURE overwrites the in/out arrays.
p_forward_filter = fftwf_plan_dft_1d(nfft, (fftwf_complex*)filter, (fftwf_complex*)filter, FFTW_FORWARD, FFTW_ESTIMATE);
//for(j = 0; j < nfft; j++){
// filter[j].re = 0.0;
// filter[j].im = 0.0;
//}
//initialize output data
memset((void *)filter, 0, (size_t)nfft*sizeof(fcomplex));
nburst_new = (int)ceil( fabs(freqe[i]-freqs[i]) / (nbc_new * pri * ka[i]) );
//choose deramp center
if(dop1[i] > dop2[i]){
if(deramp_center_flag == 0){
deramp_center = bic[i];
}
else if(deramp_center_flag == 1){
deramp_center = (bica[i] + nbc_new);
}
else{
deramp_center = bic[i] + (int)((nburst_new+1) / 2) * nbc_new;
}
}
else{
if(deramp_center_flag == 0){
deramp_center = bic[i];
}
else if(deramp_center_flag == 1){
deramp_center = bica[i];
}
else{
deramp_center = bic[i] + (int)(nburst_new / 2) * nbc_new;
}
}
//create filters
for(j = 0; j <= nburst_new; j++){
//center frequency of bandpass filter
//determined by distance of raw burst center and deramp center
if(dop1[i] > dop2[i]){
bfc = (deramp_center - (bic[i] + j*nbc_new)) * pri * ka[i];
//do not include first burst in this case
if(j == 0){
continue;
}
}
else{
bfc = (deramp_center - (bic[i] - j*nbc_new)) * pri * ka[i];
//do not include last burst in this case
if(j == nburst_new){
break;
}
}
//bandwidth of bandpass filter
bfw = nb_new * pri * ka[i];
//create filter: first sample corresponding to first fully convolution sample
bandpass_filter(bfw/prf, bfc/prf, nfilter, nfft, nfilter-1, beta, filter_j);
//add the filters to form the filter to be used
for(k = 0; k < nfft; k++){
filter[k].re += filter_j[k].re;
filter[k].im += filter_j[k].im;
}
}
//forward fft
//four1((float *)filter - 1, nfft, -1);
fftwf_execute(p_forward_filter);
//create deramp signal: this applies no matter whether dop1[i] is larger,
//and no matter bic is on the left or right.
deramp = array1d_fcomplex(nfft);
for(j = 0; j < nfft; j++){
//distance between fft center and deramp center
//tmp1 = bis_in + (nfft - 1.0) / 2.0 - bic[i];
tmp1 = bis_in + (nfft - 1.0) / 2.0 - deramp_center;
//if(tmp1 <= 0){
// fprintf(stderr, "WARNING: very large doppler centroid frequnecy\n\n");
//}
//index used in deramp signal
tmp2 = j - (nfft - 1.0) / 2.0 + tmp1;
//deramp signal
tmp3 = - PI * ka[i] * (tmp2 * pri) * (tmp2 * pri);
deramp[j].re = cos(tmp3);
deramp[j].im = sin(tmp3);
}
//rereamp signal
reramp = array1d_fcomplex(nfft);
for(j = 0; j < nfft; j++){
reramp[j].re = deramp[j].re;
reramp[j].im = -deramp[j].im;
}
//circ_shift(reramp, nfft, -abs(nfilter-1));
circ_shift(reramp, nfft, -abs( (nfilter-1)/2 ));
/**********************************************/
/* do the filtering */
/**********************************************/
//filter the data
data = array1d_fcomplex(nfft);
//create plans before initializing data, because FFTW_MEASURE overwrites the in/out arrays.
p_forward = fftwf_plan_dft_1d(nfft, (fftwf_complex*)data, (fftwf_complex*)data, FFTW_FORWARD, FFTW_ESTIMATE);
p_backward = fftwf_plan_dft_1d(nfft, (fftwf_complex*)data, (fftwf_complex*)data, FFTW_BACKWARD, FFTW_ESTIMATE);
for(j = -10000; j < 10000; j++){
//bis_out2 = bis_out + j * nbc_new;
//bie_out2 = bie_out + j * nbc_new;
//bis_in2 = bis_in + j * nbc_new;
//bie_in2 = bie_in + j * nbc_new;
//changed to use nbc_new_int. 27-JAN-2015
bis_out2 = bis_out + j * nbc_new_int;
bie_out2 = bie_out + j * nbc_new_int;
bis_in2 = bis_in + j * nbc_new_int;
bie_in2 = bie_in + j * nbc_new_int;
//find data to be filtered
if(bie_in2 <= -1){
continue;
}
else if(bis_in2 >= naz){
break;
}
else{
//first zero the data
//for(k = 0; k < nfft; k++){
// data[k].re = 0.0;
// data[k].im = 0.0;
//}
memset((void *)data, 0, (size_t)nfft*sizeof(fcomplex));
//get data
for(k = bis_in2; k <= bie_in2; k++){
if(k <= -1 || k >= naz){
data[k-bis_in2].re = 0.0;
data[k-bis_in2].im = 0.0;
}
else{
data[k-bis_in2].re = in[k][i].re / sc;
data[k-bis_in2].im = in[k][i].im / sc;
}
}
}
//deramp the data
#pragma omp parallel for private(k) shared(nfft, data, deramp)
for(k = 0; k < nfft; k++){
data[k] = cmul(data[k], deramp[k]);
}
//forward fft
//four1((float *)data - 1, nfft, -1);
fftwf_execute(p_forward);
//multiplication in the frequency domain
#pragma omp parallel for private(k) shared(nfft, data, filter)
for(k = 0; k < nfft; k++)
data[k] = cmul(data[k], filter[k]);
//backward fft
//four1((float *)data - 1, nfft, 1);
fftwf_execute(p_backward);
//reramp
#pragma omp parallel for private(k) shared(nfft, data, reramp)
for(k = 0; k < nfft; k++){
data[k] = cmul(data[k], reramp[k]);
}
//get the filtered data
for(k = bis_out2; k <= bie_out2; k++){
if(edgl_flag == 0){ //do not remove data on the edges
edgl = 0;
}
else if(edgl_flag == 1){ //remove data less than half convolution
edgl = (nfft - 1) / 2;
}
else{ //remove data of incomplete convolution
edgl = nfft - 1;
}
if((k >= (0+edgl)) && (k <= naz-1-edgl)){
out[k].re = data[k-bis_out2].re * sc / nfft;
out[k].im = data[k-bis_out2].im * sc / nfft;
}
}
}//j: block of data of each column
fftwf_destroy_plan(p_forward);
fftwf_destroy_plan(p_backward);
fftwf_destroy_plan(p_forward_filter);
free_array1d_fcomplex(filter);
free_array1d_fcomplex(filter_j);
free_array1d_fcomplex(deramp);
free_array1d_fcomplex(reramp);
free_array1d_fcomplex(data);
//overwrite original data
for(j = 0; j < naz; j++){
in[j][i].re = out[j].re;
in[j][i].im = out[j].im;
}
}//i: each column
printf("writing filtering result...\n");
writedata((fcomplex *)in[0], (size_t)naz * (size_t)nrg * sizeof(fcomplex), outfp);
//free arrays
free_array1d_float(ka);
free_array1d_float(dop1);
free_array1d_float(dop2);
free_array1d_float(nfa);
free_array1d_float(freqs);
free_array1d_float(freqe);
free_array1d_float(bis);
free_array1d_float(bie);
free_array1d_float(bic);
free_array1d_float(bica);
free_array2d_fcomplex(in);
free_array1d_fcomplex(out);
//close files
fclose(infp);
fclose(outfp);
return 0;
}//end main()
|
day05_omp.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <stdbool.h>
#include <omp.h>
#define NUM_SEATS 1024 // 128 * 8
int find_element(char* pass, int len);
int main(int argc, char* argv[]) {
char rowstrings[NUM_SEATS][8], colstrings[NUM_SEATS][4];
int row, col, id, largest_id = 0;
bool seats[NUM_SEATS] = {false};
int i = 0, num_tickets = 0, my_id;
// Need to read in stream first before allocating to threads
while (scanf("%7s%3s\n", rowstrings[i], colstrings[i]) > 0) {
i++;
}
num_tickets = i; // Will be less than the number of seats
# pragma omp parallel\
shared(num_tickets, seats, rowstrings, colstrings, my_id) \
private(i, row, col, id) default(none) \
reduction(max:largest_id)
{
if (omp_get_thread_num() == 0) printf("Running on %d threads\n", omp_get_num_threads());
#pragma omp for schedule(static)
for (i=0; i<num_tickets; i++) {
row = find_element(rowstrings[i], 7);
col = find_element(colstrings[i], 3);
id = row * 8 + col;
seats[id] = true;
if (id > largest_id) largest_id = id;
}
#pragma omp for schedule(static)
for (i=1; i<NUM_SEATS; i++) {
if (seats[i] == false && seats[i-1] && seats[i+1]) my_id = i; // Real risk of race condition here
}
}
printf("The largest Ticket ID is: %d\n", largest_id);
printf("My Ticket ID is: %d\n", my_id);
}
int find_element(char* pass, int len) {
int lo = 0, hi, i, mid;
hi = (int) pow(2, len) - 1;
for (i=0; i<len; i++) {
mid = (hi + lo) / 2;
if (pass[i] == 'F' || pass[i] == 'L') hi = mid - 1;
else if (pass[i] == 'B' || pass[i] == 'R') lo = mid + 1;
}
return lo;
}
|
estimator.h | #ifndef RANSAC_ESTIMATOR_H
#define RANSAC_ESTIMATOR_H
#include <glog/logging.h>
// #ifdef THEIA_USE_OPENMP
#include <omp.h>
// #endif
#include <vector>
namespace GraphSfM {
// Templated class for estimating a model for RANSAC. This class is purely a
// virtual class and should be implemented for the specific task that RANSAC is
// being used for. Two methods must be implemented: EstimateModel and Error. All
// other methods are optional, but will likely enhance the quality of the RANSAC
// output.
//
// NOTE: RANSAC, ARRSAC, and other RANSAC work best if Datum and Model are
// lightweight classes or structs.
template <typename DatumType, typename ModelType> class Estimator {
public:
typedef DatumType Datum;
typedef ModelType Model;
Estimator() {}
virtual ~Estimator() {}
// Get the minimum number of samples needed to generate a model.
virtual double SampleSize() const = 0;
// Given a set of data points, estimate the model. Users should implement this
// function appropriately for the task being solved. Returns true for
// successful model estimation (and outputs model), false for failed
// estimation. Typically, this is a minimal set, but it is not required to be.
virtual bool EstimateModel(const std::vector<Datum>& data,
std::vector<Model>* model) const = 0;
// Estimate a model from a non-minimal sampling of the data. E.g. for a line,
// use SVD on a set of points instead of constructing a line from two points.
// By default, this simply implements the minimal case.
virtual bool EstimateModelNonminimal(const std::vector<Datum>& data,
std::vector<Model>* model) const {
return EstimateModel(data, model);
}
// Refine the model based on an updated subset of data, and a pre-computed
// model. Can be optionally implemented.
virtual bool RefineModel(const std::vector<Datum>& data, Model* model) const {
return true;
}
// Given a model and a data point, calculate the error. Users should implement
// this function appropriately for the task being solved.
virtual double Error(const Datum& data, const Model& model) const = 0;
// Compute the residuals of many data points. By default this is just a loop
// that calls Error() on each data point, but this function can be useful if
// the errors of multiple points may be estimated simultanesously (e.g.,
// matrix multiplication to compute the reprojection error of many points at
// once).
virtual std::vector<double> Residuals(const std::vector<Datum>& data,
const Model& model) const {
std::vector<double> residuals(data.size());
#pragma omp parallel for
for (int i = 0; i < data.size(); i++) {
residuals[i] = Error(data[i], model);
}
return residuals;
}
// Returns the set inliers of the data set based on the error threshold
// provided.
std::vector<int> GetInliers(const std::vector<Datum>& data,
const Model& model,
double error_threshold) const {
std::vector<int> inliers;
inliers.reserve(data.size());
for (int i = 0; i < data.size(); i++) {
if (Error(data[i], model) < error_threshold) {
inliers.push_back(i);
}
}
return inliers;
}
// Enable a quick check to see if the model is valid. This can be a geometric
// check or some other verification of the model structure.
virtual bool ValidModel(const Model& model) const { return true; }
};
} // namespace GraphSfM
#endif // RANSAC_ESTIMATOR_H
|
axpy_ompacc_parseonly.c | // Experimental test input for Accelerator directives
// simplest scalar*vector operations
// Testing extensions for multiple devices
// This one has some exaggerated cases for testing parsing only
// Liao 2/2/2015
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <omp.h>
#if 0
double time_stamp()
{
struct timeval t;
double time;
gettimeofday(&t, NULL);
time = t.tv_sec + 1.0e-6*t.tv_usec;
return time;
}
#endif
/* in second */
#define read_timer() omp_get_wtime()
//#define read_timer() time_stamp()
/* change this to do saxpy or daxpy : single precision or double precision*/
#define REAL double
#define VEC_LEN 1024000 //use a fixed number for now
/* zero out the entire vector */
void zero(REAL *A, int n)
{
int i;
for (i = 0; i < n; i++) {
A[i] = 0.0;
}
}
/* initialize a vector with random floating point numbers */
void init(REAL *A, int n)
{
int i;
for (i = 0; i < n; i++) {
A[i] = (double)drand48();
}
}
REAL check(REAL*A, REAL*B, int n)
{
int i;
REAL sum = 0.0;
for (i = 0; i < n; i++) {
sum += A[i] - B[i];
}
return sum;
}
// reference CPU version
void axpy_omp(REAL* x, REAL* y, long n, REAL a) {
int i;
#pragma omp parallel for shared(x, y, n, a) private(i)
for (i = 0; i < n; ++i)
{
y[i] += a * x[i];
}
}
// GPU version
void axpy_ompacc(REAL* x, REAL* y, int n, REAL a) {
int i;
//For testing parsing only, 3 policies are used for even 1-D arrays.
#pragma omp target device (gpu0) map(tofrom: y[0:n] dist_data(block, duplicate, cyclic(5)) ) map(to: x dist_data(block(5), cyclic(3)),a,n)
#pragma omp parallel for shared(x, y, n, a) private(i)
for (i = 0; i < n; ++i)
y[i] += a * x[i];
}
int main(int argc, char *argv[])
{
int n;
REAL *y_omp, *y_ompacc, *x;
REAL a = 123.456;
n = VEC_LEN;
y_omp = (REAL *) malloc(n * sizeof(REAL));
y_ompacc = (REAL *) malloc(n * sizeof(REAL));
x = (REAL *) malloc(n * sizeof(REAL));
srand48(1<<12);
init(x, n);
init(y_ompacc, n);
memcpy(y_ompacc, y_omp, n*sizeof(REAL));
int num_threads;
#pragma omp parallel shared (num_threads)
{
if (omp_get_thread_num() == 0)
num_threads = omp_get_num_threads();
}
/* CPU threading version*/
double omp_time = read_timer();
axpy_omp(x, y_omp, n, a);
omp_time = read_timer() - omp_time;
/* openmp acc version */
double ompacc_time = read_timer();
axpy_ompacc(x, y_ompacc, n, a);
ompacc_time = read_timer() - ompacc_time;
printf("axpy(%d): checksum: %g; time(s):\tOMP(%d threads)\tOMPACC\n", n, check(y_omp, y_ompacc, n),num_threads);
printf("\t\t\t\t\t\t%4f\t%4f\n", omp_time, ompacc_time);
free(y_omp);
free(y_ompacc);
free(x);
return 0;
}
|
pt.c | /*
* Copyright (c) 2016-2018 Ilya Kaliman
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <err.h>
#ifdef LIBPT_USE_MPI
#include <mpi.h>
#endif
#include "pt.h"
extern void *(*libpt_malloc)(size_t);
extern void (*libpt_free)(void *);
void dgemm_(char *, char *, int *, int *, int *, double *, double *,
int *, double *, int *, double *, double *, int *);
static void
gemm(char transa, char transb, int m, int n, int k, double alpha,
const double *a, int lda, const double *b, int ldb, double beta,
double *c, int ldc)
{
dgemm_(&transa, &transb, &m, &n, &k, &alpha, (double *)a, &lda,
(double *)b, &ldb, &beta, c, &ldc);
}
static void
t2_i_ovvv_half(size_t o, size_t v, size_t i, size_t j, size_t k,
double *abc, const double *t2, const double *i_ovvv)
{
const double *t2_p = &t2[i*o*v*v+j*v*v];
const double *i_ovvv_p = &i_ovvv[k*v*v*(v-1)/2];
/* out(i,j,k,a,b,c) = contract(d, t2(i,j,a,d), i_ovvv(k,d,b,c)) */
gemm('T', 'T', v, v*(v-1)/2, v, 1.0, t2_p, v,
i_ovvv_p, v*(v-1)/2, 0.0, abc, v);
}
static void
t2_baba_i_ovvv_aaaa_half(size_t oa, size_t va, size_t ob, size_t vb,
size_t i, size_t j, size_t k, double *abc, const double *t2,
const double *i_ovvv)
{
const double *t2_p = &t2[i*oa*vb*va+j*vb*va];
const double *i_ovvv_p = &i_ovvv[k*va*va*(va-1)/2];
(void)ob; /* unused */
/* out(i,j,k,a,b,c) = contract(d, t2(i,j,a,d), i_ovvv(k,d,b,c)) */
gemm('T', 'T', vb, va*(va-1)/2, va, 1.0, t2_p, va,
i_ovvv_p, va*(va-1)/2, 0.0, abc, vb);
}
static void
t2_aaaa_i_ovvv_baba(size_t oa, size_t va, size_t ob, size_t vb,
size_t i, size_t j, size_t k, double *abc, const double *t2,
const double *i_ovvv)
{
const double *t2_p = &t2[i*oa*va*va+j*va*va];
const double *i_ovvv_p = &i_ovvv[k*va*vb*va];
(void)ob; /* unused */
/* out(i,j,k,a,b,c) = contract(d, t2(i,j,a,d), i_ovvv(k,d,b,c)) */
gemm('T', 'T', va, va*vb, va, 1.0, t2_p, va,
i_ovvv_p, va*vb, 0.0, abc, va);
}
static void
t2_abab_i_ovvv_abab(size_t oa, size_t va, size_t ob, size_t vb,
size_t i, size_t j, size_t k, double *abc, const double *t2,
const double *i_ovvv)
{
const double *t2_p = &t2[i*ob*va*vb+j*va*vb];
const double *i_ovvv_p = &i_ovvv[k*vb*va*vb];
(void)oa; /* unused */
/* out(i,j,k,a,b,c) = contract(d, t2(i,j,a,d), i_ovvv(k,d,b,c)) */
gemm('T', 'T', va, va*vb, vb, 1.0, t2_p, vb,
i_ovvv_p, va*vb, 0.0, abc, va);
}
static void
t2_i_oovo(size_t o, size_t v, size_t i, size_t j, size_t k,
double *abc, const double *t2, const double *i_oovo)
{
const double *t2_p = &t2[i*o*v*v];
const double *i_oovo_p = &i_oovo[j*o*o*v+k*o*v];
/* out(i,j,k,a,b,c) = contract(l, t2(i,l,a,b), i_oovo(j,k,c,l)) */
gemm('N', 'N', v*v, v, o, 1.0, t2_p, v*v,
i_oovo_p, o, 0.0, abc, v*v);
}
static void
t2_aaaa_i_oovo_baba(size_t oa, size_t va, size_t ob, size_t vb,
size_t i, size_t j, size_t k, double *abc, const double *t2,
const double *i_oovo)
{
const double *t2_p = &t2[i*oa*va*va];
const double *i_oovo_p = &i_oovo[j*oa*vb*oa+k*vb*oa];
(void)ob; /* unused */
/* out(i,j,k,a,b,c) = contract(l, t2(i,l,a,b), i_oovo(j,k,c,l)) */
gemm('N', 'N', va*va, vb, oa, 1.0, t2_p, va*va,
i_oovo_p, oa, 0.0, abc, va*va);
}
static void
t2_abab_i_oovo_abab(size_t oa, size_t va, size_t ob, size_t vb,
size_t i, size_t j, size_t k, double *abc, const double *t2,
const double *i_oovo)
{
const double *t2_p = &t2[i*ob*va*vb];
const double *i_oovo_p = &i_oovo[j*ob*va*ob+k*va*ob];
(void)oa; /* unused */
/* out(i,j,k,a,b,c) = contract(l, t2(i,l,a,b), i_oovo(j,k,c,l)) */
gemm('N', 'N', va*vb, va, ob, 1.0, t2_p, va*vb,
i_oovo_p, ob, 0.0, abc, va*vb);
}
static void
t2_baba_i_oovo_aaaa(size_t oa, size_t va, size_t ob, size_t vb,
size_t i, size_t j, size_t k, double *abc, const double *t2,
const double *i_oovo)
{
const double *t2_p = &t2[i*oa*vb*va];
const double *i_oovo_p = &i_oovo[j*oa*va*oa+k*va*oa];
(void)ob; /* unused */
/* out(i,j,k,a,b,c) = contract(l, t2(i,l,a,b), i_oovo(j,k,c,l)) */
gemm('N', 'N', va*vb, va, oa, 1.0, t2_p, va*vb,
i_oovo_p, oa, 0.0, abc, va*vb);
}
static double
i_jk_a_bc_ov_oovv(size_t o, size_t v, const double *ov, const double *oovv,
size_t i, size_t j, size_t k, size_t a, size_t b, size_t c)
{
return +ov[i*v+a]*oovv[j*o*v*v+k*v*v+b*v+c]
-ov[j*v+a]*oovv[i*o*v*v+k*v*v+b*v+c]
-ov[k*v+a]*oovv[j*o*v*v+i*v*v+b*v+c]
-ov[i*v+b]*oovv[j*o*v*v+k*v*v+a*v+c]
+ov[j*v+b]*oovv[i*o*v*v+k*v*v+a*v+c]
+ov[k*v+b]*oovv[j*o*v*v+i*v*v+a*v+c]
-ov[i*v+c]*oovv[j*o*v*v+k*v*v+b*v+a]
+ov[j*v+c]*oovv[i*o*v*v+k*v*v+b*v+a]
+ov[k*v+c]*oovv[j*o*v*v+i*v*v+b*v+a];
}
static double
comp_t3b_ijkabc(size_t v1, size_t o2, size_t v2a, size_t v2b,
size_t i, size_t j, size_t k, size_t a, size_t b, size_t c,
const double *t1, const double *i_oovv, const double *f_ov,
const double *t2)
{
return t1[i*v1+a] * i_oovv[j*o2*v2a*v2b+k*v2a*v2b+b*v2b+c] +
f_ov[i*v1+a] * t2[j*o2*v2a*v2b+k*v2a*v2b+b*v2b+c];
}
static double
cc_pt_aaa(size_t oa, size_t va, const double *d_ov, const double *f_ov,
const double *t1, const double *t2_aaaa, const double *i_oovo_aaaa,
const double *i_oovv_aaaa, const double *i_ovvv_aaaa)
{
double e_pt = 0.0;
int rank = 0, size = 1;
if (oa == 0 || va == 0)
return 0.0;
#ifdef LIBPT_USE_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
#endif
#ifdef _OPENMP
#pragma omp parallel
#endif
{
size_t i, j, k, a, b, c, it, *ijk, nijk = 0;
double *t3ax1, *abc1;
if ((ijk = libpt_malloc(oa*oa*oa*sizeof(*ijk))) == NULL)
err(1, "libpt malloc ijk");
for (i = 0, it = 0; i < oa; i++) {
for (j = i+1; j < oa; j++) {
for (k = j+1; k < oa; k++, it++) {
if ((int)it % size == rank) {
ijk[3*nijk+0] = i;
ijk[3*nijk+1] = j;
ijk[3*nijk+2] = k;
nijk++;
}
}
}
}
if ((t3ax1 = libpt_malloc(2*va*va*va*sizeof(*t3ax1))) == NULL)
err(1, "libpt malloc work");
abc1 = t3ax1 + va*va*va;
#ifdef _OPENMP
#pragma omp for reduction(+:e_pt) schedule(dynamic)
#endif
for (it = 0; it < nijk; it++) {
i = ijk[3*it+0];
j = ijk[3*it+1];
k = ijk[3*it+2];
t2_i_ovvv_half(oa,va,i,j,k,abc1,t2_aaaa,i_ovvv_aaaa);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++)
t3ax1[a*va*va+b*va+c] =
+abc1[a*(a-1)/2*va+b*va+c]
-abc1[a*(a-1)/2*va+c*va+b]
+abc1[b*(b-1)/2*va+c*va+a];
t2_i_ovvv_half(oa,va,i,k,j,abc1,t2_aaaa,i_ovvv_aaaa);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++)
t3ax1[a*va*va+b*va+c] +=
-abc1[a*(a-1)/2*va+b*va+c]
+abc1[a*(a-1)/2*va+c*va+b]
-abc1[b*(b-1)/2*va+c*va+a];
t2_i_ovvv_half(oa,va,k,j,i,abc1,t2_aaaa,i_ovvv_aaaa);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++)
t3ax1[a*va*va+b*va+c] +=
-abc1[a*(a-1)/2*va+b*va+c]
+abc1[a*(a-1)/2*va+c*va+b]
-abc1[b*(b-1)/2*va+c*va+a];
t2_i_oovo(oa,va,i,j,k,abc1,t2_aaaa,i_oovo_aaaa);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++)
t3ax1[a*va*va+b*va+c] +=
+abc1[a*va*va+b*va+c]
-abc1[b*va*va+a*va+c]
-abc1[c*va*va+b*va+a];
t2_i_oovo(oa,va,j,i,k,abc1,t2_aaaa,i_oovo_aaaa);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++)
t3ax1[a*va*va+b*va+c] +=
-abc1[a*va*va+b*va+c]
+abc1[b*va*va+a*va+c]
+abc1[c*va*va+b*va+a];
t2_i_oovo(oa,va,k,j,i,abc1,t2_aaaa,i_oovo_aaaa);
for (a = 0; a < va; a++) {
for (b = 0; b < a; b++) {
for (c = 0; c < b; c++) {
double t3ax, t3bx, dn;
t3ax1[a*va*va+b*va+c] +=
-abc1[a*va*va+b*va+c]
+abc1[b*va*va+a*va+c]
+abc1[c*va*va+b*va+a];
dn = d_ov[i*va+a] + d_ov[j*va+b] + d_ov[k*va+c];
t3ax = t3ax1[a*va*va+b*va+c];
t3bx = +i_jk_a_bc_ov_oovv(oa,va,t1,i_oovv_aaaa,i,j,k,a,b,c)
+i_jk_a_bc_ov_oovv(oa,va,f_ov,t2_aaaa,i,j,k,a,b,c);
e_pt += t3ax * (t3ax-t3bx) / dn;
}}}
}
libpt_free(ijk);
libpt_free(t3ax1);
}
return (e_pt);
}
static double
cc_pt_aab(size_t oa, size_t va, size_t ob, size_t vb,
const double *d_ov_aa, const double *d_ov_bb,
const double *f_ov_aa, const double *f_ov_bb,
const double *t1_aa, const double *t1_bb,
const double *t2_aaaa, const double *t2_abab, const double *t2_baba,
const double *i_oovo_aaaa, const double *i_oovo_abab,
const double *i_oovo_baba, const double *i_oovv_aaaa,
const double *i_oovv_abab, const double *i_ovvv_aaaa,
const double *i_ovvv_abab, const double *i_ovvv_baba)
{
double e_pt = 0.0;
int rank = 0, size = 1;
if (oa == 0 || va == 0 || ob == 0 || vb == 0)
return 0.0;
#ifdef LIBPT_USE_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
#endif
#ifdef _OPENMP
#pragma omp parallel
#endif
{
size_t i, j, k, a, b, c, it, *ijk, nijk = 0;
double *t3ax1, *abc1, *abc11, *abc12;
if ((ijk = libpt_malloc(2*oa*oa*ob*sizeof(*ijk))) == NULL)
err(1, "libpt malloc ijk");
for (i = 0, it = 0; i < oa; i++) {
for (j = i+1; j < oa; j++) {
for (k = 0; k < ob; k++, it++) {
if ((int)it % size == rank) {
ijk[3*nijk+0] = i;
ijk[3*nijk+1] = j;
ijk[3*nijk+2] = k;
nijk++;
}
}
}
}
if ((t3ax1 = libpt_malloc(2*va*va*vb*sizeof(*t3ax1))) == NULL)
err(1, "libpt malloc work");
abc1 = t3ax1 + va*va*vb;
abc11 = t3ax1 + va*va*vb;
abc12 = t3ax1 + va*va*vb + vb*va*(va-1)/2;
#ifdef _OPENMP
#pragma omp for reduction(+:e_pt) schedule(dynamic)
#endif
for (it = 0; it < nijk; it++) {
i = ijk[3*it+0];
j = ijk[3*it+1];
k = ijk[3*it+2];
t2_aaaa_i_ovvv_baba(oa,va,ob,vb,i,j,k,abc1,t2_aaaa,i_ovvv_baba);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++)
t3ax1[a*va*vb+b*vb+c] =
-abc1[a+b*va+c*va*va]
+abc1[b+a*va+c*va*va];
t2_abab_i_ovvv_abab(oa,va,ob,vb,i,k,j,abc1,t2_abab,i_ovvv_abab);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++)
t3ax1[a*va*vb+b*vb+c] +=
-abc1[a+c*va+b*va*vb]
+abc1[b+c*va+a*va*vb];
t2_abab_i_ovvv_abab(oa,va,ob,vb,j,k,i,abc1,t2_abab,i_ovvv_abab);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++)
t3ax1[a*va*vb+b*vb+c] +=
+abc1[a+c*va+b*va*vb]
-abc1[b+c*va+a*va*vb];
t2_baba_i_ovvv_aaaa_half(oa,va,ob,vb,k,j,i,abc11,t2_baba,i_ovvv_aaaa);
t2_baba_i_ovvv_aaaa_half(oa,va,ob,vb,k,i,j,abc12,t2_baba,i_ovvv_aaaa);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++)
t3ax1[a*va*vb+b*vb+c] +=
-abc11[c+vb*a*(a-1)/2+vb*b]
+abc12[c+vb*a*(a-1)/2+vb*b];
t2_aaaa_i_oovo_baba(oa,va,ob,vb,i,k,j,abc1,t2_aaaa,i_oovo_baba);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++)
t3ax1[a*va*vb+b*vb+c] +=
-abc1[b+a*va+c*va*va];
t2_aaaa_i_oovo_baba(oa,va,ob,vb,j,k,i,abc1,t2_aaaa,i_oovo_baba);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++)
t3ax1[a*va*vb+b*vb+c] +=
+abc1[b+a*va+c*va*va];
t2_abab_i_oovo_abab(oa,va,ob,vb,i,j,k,abc1,t2_abab,i_oovo_abab);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++)
t3ax1[a*va*vb+b*vb+c] +=
-abc1[c+a*vb+b*vb*va]
+abc1[c+b*vb+a*vb*va];
t2_abab_i_oovo_abab(oa,va,ob,vb,j,i,k,abc1,t2_abab,i_oovo_abab);
for (a = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++)
t3ax1[a*va*vb+b*vb+c] +=
-abc1[c+b*vb+a*vb*va]
+abc1[c+a*vb+b*vb*va];
t2_baba_i_oovo_aaaa(oa,va,ob,vb,k,j,i,abc1,t2_baba,i_oovo_aaaa);
for (a = 0; a < va; a++) {
for (b = 0; b < a; b++) {
for (c = 0; c < vb; c++) {
double t3ax, t3bx, dn;
t3ax1[a*va*vb+b*vb+c] +=
-abc1[a+c*va+b*va*vb]
+abc1[b+c*va+a*va*vb];
t3bx = +comp_t3b_ijkabc(va,ob,va,vb,i,j,k,a,b,c,
t1_aa,i_oovv_abab,f_ov_aa,t2_abab)
-comp_t3b_ijkabc(va,ob,va,vb,i,j,k,b,a,c,
t1_aa,i_oovv_abab,f_ov_aa,t2_abab)
-comp_t3b_ijkabc(va,ob,va,vb,j,i,k,a,b,c,
t1_aa,i_oovv_abab,f_ov_aa,t2_abab)
+comp_t3b_ijkabc(va,ob,va,vb,j,i,k,b,a,c,
t1_aa,i_oovv_abab,f_ov_aa,t2_abab)
+comp_t3b_ijkabc(vb,oa,va,va,k,j,i,c,b,a,
t1_bb,i_oovv_aaaa,f_ov_bb,t2_aaaa);
dn = d_ov_aa[i*va+a] + d_ov_aa[j*va+b] + d_ov_bb[k*vb+c];
t3ax = t3ax1[a*va*vb+b*vb+c];
e_pt += t3ax * (t3ax-t3bx) / dn;
}}}
}
libpt_free(ijk);
libpt_free(t3ax1);
}
return (e_pt);
}
double
libpt_rpt(size_t oa, size_t va, const double *d_ov, const double *f_ov,
const double *t1, const double *t2, const double *i_oovo,
const double *i_oovv, const double *i_ovvv)
{
double e_pt = 0.0;
const double *t2_aaaa = t2;
const double *t2_abab = t2 + oa*oa*va*va;
const double *i_ovvv_aaaa = i_ovvv;
const double *i_ovvv_abab = i_ovvv + oa*va*va*(va-1)/2;
const double *i_oovo_aaaa = i_oovo;
const double *i_oovo_abab = i_oovo + oa*oa*oa*va;
const double *i_oovv_aaaa = i_oovv;
const double *i_oovv_abab = i_oovv + oa*oa*va*va;
e_pt += cc_pt_aaa(oa, va, d_ov, f_ov, t1, t2_aaaa,
i_oovo_aaaa, i_oovv_aaaa, i_ovvv_aaaa);
e_pt += cc_pt_aab(oa, va, oa, va, d_ov, d_ov, f_ov, f_ov, t1, t1,
t2_aaaa, t2_abab, t2_abab, i_oovo_aaaa, i_oovo_abab, i_oovo_abab,
i_oovv_aaaa, i_oovv_abab, i_ovvv_aaaa, i_ovvv_abab, i_ovvv_abab);
#ifdef LIBPT_USE_MPI
MPI_Allreduce(MPI_IN_PLACE, &e_pt, 1, MPI_DOUBLE,
MPI_SUM, MPI_COMM_WORLD);
#endif
return 2.0 * e_pt;
}
double
libpt_upt(size_t oa, size_t va, size_t ob, size_t vb, const double *d_ov,
const double *f_ov, const double *t1, const double *t2,
const double *i_oovo, const double *i_oovv, const double *i_ovvv)
{
double e_pt = 0.0;
const double *d_ov_aa = d_ov;
const double *d_ov_bb = d_ov_aa + oa*va;
const double *f_ov_aa = f_ov;
const double *f_ov_bb = f_ov_aa + oa*va;
const double *t1_aa = t1;
const double *t1_bb = t1_aa + oa*va;
const double *t2_aaaa = t2;
const double *t2_abab = t2_aaaa + oa*oa*va*va;
const double *t2_bbbb = t2_abab + oa*ob*va*vb;
const double *t2_baba = t2_bbbb + ob*ob*vb*vb;
const double *i_oovo_aaaa = i_oovo;
const double *i_oovo_abab = i_oovo_aaaa + oa*oa*va*oa;
const double *i_oovo_bbbb = i_oovo_abab + oa*ob*va*ob;
const double *i_oovo_baba = i_oovo_bbbb + ob*ob*vb*ob;
const double *i_oovv_aaaa = i_oovv;
const double *i_oovv_abab = i_oovv_aaaa + oa*oa*va*va;
const double *i_oovv_bbbb = i_oovv_abab + oa*ob*va*vb;
const double *i_oovv_baba = i_oovv_bbbb + ob*ob*vb*vb;
const double *i_ovvv_aaaa = i_ovvv;
const double *i_ovvv_abab = i_ovvv_aaaa + oa*va*va*(va-1)/2;
const double *i_ovvv_bbbb = i_ovvv_abab + oa*vb*va*vb;
const double *i_ovvv_baba = i_ovvv_bbbb + ob*vb*vb*(vb-1)/2;
/* aaaaaa */
e_pt += cc_pt_aaa(oa, va, d_ov_aa, f_ov_aa, t1_aa, t2_aaaa,
i_oovo_aaaa, i_oovv_aaaa, i_ovvv_aaaa);
/* bbbbbb */
e_pt += cc_pt_aaa(ob, vb, d_ov_bb, f_ov_bb, t1_bb, t2_bbbb,
i_oovo_bbbb, i_oovv_bbbb, i_ovvv_bbbb);
/* aabaab */
e_pt += cc_pt_aab(oa, va, ob, vb, d_ov_aa, d_ov_bb, f_ov_aa, f_ov_bb,
t1_aa, t1_bb, t2_aaaa, t2_abab, t2_baba, i_oovo_aaaa, i_oovo_abab,
i_oovo_baba, i_oovv_aaaa, i_oovv_abab, i_ovvv_aaaa, i_ovvv_abab,
i_ovvv_baba);
/* bbabba */
e_pt += cc_pt_aab(ob, vb, oa, va, d_ov_bb, d_ov_aa, f_ov_bb, f_ov_aa,
t1_bb, t1_aa, t2_bbbb, t2_baba, t2_abab, i_oovo_bbbb, i_oovo_baba,
i_oovo_abab, i_oovv_bbbb, i_oovv_baba, i_ovvv_bbbb, i_ovvv_baba,
i_ovvv_abab);
#ifdef LIBPT_USE_MPI
MPI_Allreduce(MPI_IN_PLACE, &e_pt, 1, MPI_DOUBLE,
MPI_SUM, MPI_COMM_WORLD);
#endif
return e_pt;
}
static double
cc_ft_aaa(size_t oa, size_t va, const double *d_ov, const double *f2_ov,
const double *l1, const double *t2, const double *l2, const double *i_oovv,
const double *i2_t2f2_oovo, const double *i3_ovvv, const double *i6_oovo,
const double *i7_ovvv)
{
double e_pt = 0.0;
int rank = 0, size = 1;
if (oa == 0 || va == 0)
return 0.0;
#ifdef LIBPT_USE_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
#endif
#ifdef _OPENMP
#pragma omp parallel
#endif
{
size_t i, j, k, a, b, c, t, it, *ijk, nijk = 0;
double *sigvvvl, *sigvvvr, *abc1;
if ((ijk = libpt_malloc(oa*oa*oa*sizeof(*ijk))) == NULL)
err(1, "libpt malloc ijk");
for (i = 0, it = 0; i < oa; i++) {
for (j = i+1; j < oa; j++) {
for (k = j+1; k < oa; k++, it++) {
if ((int)it % size == rank) {
ijk[3*nijk+0] = i;
ijk[3*nijk+1] = j;
ijk[3*nijk+2] = k;
nijk++;
}
}
}
}
if ((sigvvvl = libpt_malloc(2*va*va*va*sizeof(*sigvvvl))) == NULL)
err(1, "libpt malloc work");
sigvvvr = sigvvvl + va*va*(va-1)/2;
abc1 = sigvvvl + va*va*va;
#ifdef _OPENMP
#pragma omp for reduction(+:e_pt) schedule(dynamic)
#endif
for (it = 0; it < nijk; it++) {
i = ijk[3*it+0];
j = ijk[3*it+1];
k = ijk[3*it+2];
t2_i_ovvv_half(oa,va,i,j,k,abc1,l2,i7_ovvv);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++, t++)
sigvvvl[t] =
+abc1[a*(a-1)/2*va+b*va+c]
-abc1[a*(a-1)/2*va+c*va+b]
+abc1[b*(b-1)/2*va+c*va+a];
t2_i_ovvv_half(oa,va,k,j,i,abc1,l2,i7_ovvv);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++, t++)
sigvvvl[t] +=
-abc1[a*(a-1)/2*va+b*va+c]
+abc1[a*(a-1)/2*va+c*va+b]
-abc1[b*(b-1)/2*va+c*va+a];
t2_i_ovvv_half(oa,va,i,k,j,abc1,l2,i7_ovvv);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++, t++)
sigvvvl[t] +=
-abc1[a*(a-1)/2*va+b*va+c]
+abc1[a*(a-1)/2*va+c*va+b]
-abc1[b*(b-1)/2*va+c*va+a];
t2_i_oovo(oa,va,i,j,k,abc1,l2,i6_oovo);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++, t++)
sigvvvl[t] +=
+abc1[a*va*va+b*va+c]
-abc1[b*va*va+a*va+c]
-abc1[c*va*va+b*va+a];
t2_i_oovo(oa,va,j,i,k,abc1,l2,i6_oovo);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++, t++)
sigvvvl[t] +=
-abc1[a*va*va+b*va+c]
+abc1[b*va*va+a*va+c]
+abc1[c*va*va+b*va+a];
t2_i_oovo(oa,va,k,j,i,abc1,l2,i6_oovo);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++, t++)
sigvvvl[t] +=
-abc1[a*va*va+b*va+c]
+abc1[b*va*va+a*va+c]
+abc1[c*va*va+b*va+a];
t2_i_ovvv_half(oa,va,i,j,k,abc1,t2,i3_ovvv);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++, t++)
sigvvvr[t] =
+abc1[a*(a-1)/2*va+b*va+c]
-abc1[a*(a-1)/2*va+c*va+b]
+abc1[b*(b-1)/2*va+c*va+a];
t2_i_ovvv_half(oa,va,k,j,i,abc1,t2,i3_ovvv);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++, t++)
sigvvvr[t] +=
-abc1[a*(a-1)/2*va+b*va+c]
+abc1[a*(a-1)/2*va+c*va+b]
-abc1[b*(b-1)/2*va+c*va+a];
t2_i_ovvv_half(oa,va,i,k,j,abc1,t2,i3_ovvv);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++, t++)
sigvvvr[t] +=
-abc1[a*(a-1)/2*va+b*va+c]
+abc1[a*(a-1)/2*va+c*va+b]
-abc1[b*(b-1)/2*va+c*va+a];
t2_i_oovo(oa,va,i,j,k,abc1,t2,i2_t2f2_oovo);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++, t++)
sigvvvr[t] +=
+abc1[a*va*va+b*va+c]
-abc1[b*va*va+a*va+c]
-abc1[c*va*va+b*va+a];
t2_i_oovo(oa,va,j,i,k,abc1,t2,i2_t2f2_oovo);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < b; c++, t++)
sigvvvr[t] +=
-abc1[a*va*va+b*va+c]
+abc1[b*va*va+a*va+c]
+abc1[c*va*va+b*va+a];
t2_i_oovo(oa,va,k,j,i,abc1,t2,i2_t2f2_oovo);
for (a = 0, t = 0; a < va; a++) {
for (b = 0; b < a; b++) {
for (c = 0; c < b; c++, t++) {
double dn, l1t;
sigvvvr[t] +=
-abc1[a*va*va+b*va+c]
+abc1[b*va*va+a*va+c]
+abc1[c*va*va+b*va+a];
dn = d_ov[i*va+a] + d_ov[j*va+b] + d_ov[k*va+c];
l1t = +i_jk_a_bc_ov_oovv(oa,va,l1,i_oovv,i,j,k,a,b,c)
+i_jk_a_bc_ov_oovv(oa,va,f2_ov,l2,i,j,k,a,b,c);
e_pt += (sigvvvl[t] - l1t) * sigvvvr[t] / dn;
}}}
}
libpt_free(ijk);
libpt_free(sigvvvl);
}
return (e_pt);
}
static double
cc_ft_aab(size_t oa, size_t va, size_t ob, size_t vb,
const double *d_ov_aa, const double *d_ov_bb,
const double *f2_ov_aa, const double *f2_ov_bb,
const double *l1_aa, const double *l1_bb,
const double *t2_aaaa, const double *t2_abab, const double *t2_baba,
const double *l2_aaaa, const double *l2_abab, const double *l2_baba,
const double *i_oovv_aaaa, const double *i_oovv_abab,
const double *i2_t2f2_oovo_aaaa, const double *i2_t2f2_oovo_abab,
const double *i2_t2f2_oovo_baba,
const double *i3_ovvv_aaaa, const double *i3_ovvv_abab,
const double *i3_ovvv_baba,
const double *i6_oovo_aaaa, const double *i6_oovo_abab,
const double *i6_oovo_baba,
const double *i7_ovvv_aaaa, const double *i7_ovvv_abab,
const double *i7_ovvv_baba)
{
double e_pt = 0.0;
int rank = 0, size = 1;
if (oa == 0 || va == 0 || ob == 0 || vb == 0)
return 0.0;
#ifdef LIBPT_USE_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
#endif
#ifdef _OPENMP
#pragma omp parallel
#endif
{
size_t i, j, k, a, b, c, t, it, *ijk, nijk = 0;
double *sigvvvl, *sigvvvr, *abc1, *abc11, *abc12;
if ((ijk = libpt_malloc(2*oa*oa*ob*sizeof(*ijk))) == NULL)
err(1, "libpt malloc ijk");
for (i = 0, it = 0; i < oa; i++) {
for (j = i+1; j < oa; j++) {
for (k = 0; k < ob; k++, it++) {
if ((int)it % size == rank) {
ijk[3*nijk+0] = i;
ijk[3*nijk+1] = j;
ijk[3*nijk+2] = k;
nijk++;
}
}
}
}
if ((sigvvvl = libpt_malloc(2*va*va*vb*sizeof(*sigvvvl))) == NULL)
err(1, "libpt malloc work");
sigvvvr = sigvvvl + vb*va*(va-1)/2;
abc1 = sigvvvl + va*va*vb;
abc11 = sigvvvl + va*va*vb;
abc12 = sigvvvl + va*va*vb + vb*va*(va-1)/2;
#ifdef _OPENMP
#pragma omp for reduction(+:e_pt) schedule(dynamic)
#endif
for (it = 0; it < nijk; it++) {
i = ijk[3*it+0];
j = ijk[3*it+1];
k = ijk[3*it+2];
t2_aaaa_i_ovvv_baba(oa,va,ob,vb,i,j,k,abc1,l2_aaaa,i7_ovvv_baba);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvl[t] =
-abc1[a+b*va+c*va*va]
+abc1[b+a*va+c*va*va];
t2_abab_i_ovvv_abab(oa,va,ob,vb,i,k,j,abc1,l2_abab,i7_ovvv_abab);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvl[t] +=
-abc1[a+c*va+b*va*vb]
+abc1[b+c*va+a*va*vb];
t2_abab_i_ovvv_abab(oa,va,ob,vb,j,k,i,abc1,l2_abab,i7_ovvv_abab);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvl[t] +=
+abc1[a+c*va+b*va*vb]
-abc1[b+c*va+a*va*vb];
t2_baba_i_ovvv_aaaa_half(oa,va,ob,vb,k,j,i,abc11,l2_baba,i7_ovvv_aaaa);
t2_baba_i_ovvv_aaaa_half(oa,va,ob,vb,k,i,j,abc12,l2_baba,i7_ovvv_aaaa);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvl[t] +=
-abc11[c+vb*a*(a-1)/2+vb*b]
+abc12[c+vb*a*(a-1)/2+vb*b];
t2_aaaa_i_oovo_baba(oa,va,ob,vb,i,k,j,abc1,l2_aaaa,i6_oovo_baba);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvl[t] +=
-abc1[b+a*va+c*va*va];
t2_aaaa_i_oovo_baba(oa,va,ob,vb,j,k,i,abc1,l2_aaaa,i6_oovo_baba);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvl[t] +=
+abc1[b+a*va+c*va*va];
t2_abab_i_oovo_abab(oa,va,ob,vb,i,j,k,abc1,l2_abab,i6_oovo_abab);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvl[t] +=
-abc1[c+a*vb+b*vb*va]
+abc1[c+b*vb+a*vb*va];
t2_abab_i_oovo_abab(oa,va,ob,vb,j,i,k,abc1,l2_abab,i6_oovo_abab);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvl[t] +=
-abc1[c+b*vb+a*vb*va]
+abc1[c+a*vb+b*vb*va];
t2_baba_i_oovo_aaaa(oa,va,ob,vb,k,j,i,abc1,l2_baba,i6_oovo_aaaa);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvl[t] +=
-abc1[a+c*va+b*va*vb]
+abc1[b+c*va+a*va*vb];
t2_aaaa_i_ovvv_baba(oa,va,ob,vb,i,j,k,abc1,t2_aaaa,i3_ovvv_baba);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvr[t] =
-abc1[a+b*va+c*va*va]
+abc1[b+a*va+c*va*va];
t2_abab_i_ovvv_abab(oa,va,ob,vb,i,k,j,abc1,t2_abab,i3_ovvv_abab);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvr[t] +=
-abc1[a+c*va+b*va*vb]
+abc1[b+c*va+a*va*vb];
t2_abab_i_ovvv_abab(oa,va,ob,vb,j,k,i,abc1,t2_abab,i3_ovvv_abab);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvr[t] +=
+abc1[a+c*va+b*va*vb]
-abc1[b+c*va+a*va*vb];
t2_baba_i_ovvv_aaaa_half(oa,va,ob,vb,k,j,i,abc11,t2_baba,i3_ovvv_aaaa);
t2_baba_i_ovvv_aaaa_half(oa,va,ob,vb,k,i,j,abc12,t2_baba,i3_ovvv_aaaa);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvr[t] +=
-abc11[c+vb*a*(a-1)/2+vb*b]
+abc12[c+vb*a*(a-1)/2+vb*b];
t2_aaaa_i_oovo_baba(oa,va,ob,vb,i,k,j,abc1,t2_aaaa,i2_t2f2_oovo_baba);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvr[t] +=
-abc1[b+a*va+c*va*va];
t2_aaaa_i_oovo_baba(oa,va,ob,vb,j,k,i,abc1,t2_aaaa,i2_t2f2_oovo_baba);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvr[t] +=
+abc1[b+a*va+c*va*va];
t2_abab_i_oovo_abab(oa,va,ob,vb,i,j,k,abc1,t2_abab,i2_t2f2_oovo_abab);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvr[t] +=
-abc1[c+a*vb+b*vb*va]
+abc1[c+b*vb+a*vb*va];
t2_abab_i_oovo_abab(oa,va,ob,vb,j,i,k,abc1,t2_abab,i2_t2f2_oovo_abab);
for (a = 0, t = 0; a < va; a++)
for (b = 0; b < a; b++)
for (c = 0; c < vb; c++, t++)
sigvvvr[t] +=
-abc1[c+b*vb+a*vb*va]
+abc1[c+a*vb+b*vb*va];
t2_baba_i_oovo_aaaa(oa,va,ob,vb,k,j,i,abc1,t2_baba,i2_t2f2_oovo_aaaa);
for (a = 0, t = 0; a < va; a++) {
for (b = 0; b < a; b++) {
for (c = 0; c < vb; c++, t++) {
double dn, l1t;
sigvvvr[t] +=
-abc1[a+c*va+b*va*vb]
+abc1[b+c*va+a*va*vb];
l1t = +comp_t3b_ijkabc(va,ob,va,vb,i,j,k,a,b,c,
l1_aa,i_oovv_abab,f2_ov_aa,l2_abab)
-comp_t3b_ijkabc(va,ob,va,vb,i,j,k,b,a,c,
l1_aa,i_oovv_abab,f2_ov_aa,l2_abab)
-comp_t3b_ijkabc(va,ob,va,vb,j,i,k,a,b,c,
l1_aa,i_oovv_abab,f2_ov_aa,l2_abab)
+comp_t3b_ijkabc(va,ob,va,vb,j,i,k,b,a,c,
l1_aa,i_oovv_abab,f2_ov_aa,l2_abab)
+comp_t3b_ijkabc(vb,oa,va,va,k,j,i,c,b,a,
l1_bb,i_oovv_aaaa,f2_ov_bb,l2_aaaa);
dn = d_ov_aa[i*va+a] + d_ov_aa[j*va+b] + d_ov_bb[k*vb+c];
e_pt += (sigvvvl[t] - l1t) * sigvvvr[t] / dn;
}}}
}
libpt_free(ijk);
libpt_free(sigvvvl);
}
return (e_pt);
}
double
libpt_rft(size_t oa, size_t va, const double *d_ov, const double *f2_ov,
const double *l1, const double *t2, const double *l2, const double *i_oovv,
const double *i2_t2f2_oovo, const double *i3_ovvv, const double *i6_oovo,
const double *i7_ovvv)
{
double e_pt = 0.0;
const double *t2_aaaa = t2;
const double *t2_abab = t2 + oa*oa*va*va;
const double *l2_aaaa = l2;
const double *l2_abab = l2 + oa*oa*va*va;
const double *i_oovv_aaaa = i_oovv;
const double *i_oovv_abab = i_oovv + oa*oa*va*va;
const double *i2_t2f2_oovo_aaaa = i2_t2f2_oovo;
const double *i2_t2f2_oovo_abab = i2_t2f2_oovo + oa*oa*oa*va;
const double *i3_ovvv_aaaa = i3_ovvv;
const double *i3_ovvv_abab = i3_ovvv + oa*va*va*(va-1)/2;
const double *i6_oovo_aaaa = i6_oovo;
const double *i6_oovo_abab = i6_oovo + oa*oa*oa*va;
const double *i7_ovvv_aaaa = i7_ovvv;
const double *i7_ovvv_abab = i7_ovvv + oa*va*va*(va-1)/2;
e_pt += cc_ft_aaa(oa, va, d_ov, f2_ov, l1, t2_aaaa, l2_aaaa,
i_oovv_aaaa, i2_t2f2_oovo_aaaa, i3_ovvv_aaaa, i6_oovo_aaaa,
i7_ovvv_aaaa);
e_pt += cc_ft_aab(oa, va, oa, va, d_ov, d_ov, f2_ov, f2_ov,
l1, l1, t2_aaaa, t2_abab, t2_abab, l2_aaaa, l2_abab, l2_abab,
i_oovv_aaaa, i_oovv_abab, i2_t2f2_oovo_aaaa, i2_t2f2_oovo_abab,
i2_t2f2_oovo_abab, i3_ovvv_aaaa, i3_ovvv_abab, i3_ovvv_abab,
i6_oovo_aaaa, i6_oovo_abab, i6_oovo_abab,
i7_ovvv_aaaa, i7_ovvv_abab, i7_ovvv_abab);
#ifdef LIBPT_USE_MPI
MPI_Allreduce(MPI_IN_PLACE, &e_pt, 1, MPI_DOUBLE,
MPI_SUM, MPI_COMM_WORLD);
#endif
return 2.0 * e_pt;
}
double
libpt_uft(size_t oa, size_t va, size_t ob, size_t vb, const double *d_ov,
const double *f2_ov, const double *l1, const double *t2, const double *l2,
const double *i_oovv, const double *i2_t2f2_oovo, const double *i3_ovvv,
const double *i6_oovo, const double *i7_ovvv)
{
double e_pt = 0.0;
const double *d_ov_aa = d_ov;
const double *d_ov_bb = d_ov_aa + oa*va;
const double *f2_ov_aa = f2_ov;
const double *f2_ov_bb = f2_ov_aa + oa*va;
const double *l1_aa = l1;
const double *l1_bb = l1_aa + oa*va;
const double *t2_aaaa = t2;
const double *t2_abab = t2_aaaa + oa*oa*va*va;
const double *t2_bbbb = t2_abab + oa*ob*va*vb;
const double *t2_baba = t2_bbbb + ob*ob*vb*vb;
const double *l2_aaaa = l2;
const double *l2_abab = l2_aaaa + oa*oa*va*va;
const double *l2_bbbb = l2_abab + oa*ob*va*vb;
const double *l2_baba = l2_bbbb + ob*ob*vb*vb;
const double *i_oovv_aaaa = i_oovv;
const double *i_oovv_abab = i_oovv_aaaa + oa*oa*va*va;
const double *i_oovv_bbbb = i_oovv_abab + oa*ob*va*vb;
const double *i_oovv_baba = i_oovv_bbbb + ob*ob*vb*vb;
const double *i2_t2f2_oovo_aaaa = i2_t2f2_oovo;
const double *i2_t2f2_oovo_abab = i2_t2f2_oovo_aaaa + oa*oa*va*oa;
const double *i2_t2f2_oovo_bbbb = i2_t2f2_oovo_abab + oa*ob*va*ob;
const double *i2_t2f2_oovo_baba = i2_t2f2_oovo_bbbb + ob*ob*vb*ob;
const double *i3_ovvv_aaaa = i3_ovvv;
const double *i3_ovvv_abab = i3_ovvv_aaaa + oa*va*va*(va-1)/2;
const double *i3_ovvv_bbbb = i3_ovvv_abab + oa*vb*va*vb;
const double *i3_ovvv_baba = i3_ovvv_bbbb + ob*vb*vb*(vb-1)/2;
const double *i6_oovo_aaaa = i6_oovo;
const double *i6_oovo_abab = i6_oovo_aaaa + oa*oa*va*oa;
const double *i6_oovo_bbbb = i6_oovo_abab + oa*ob*va*ob;
const double *i6_oovo_baba = i6_oovo_bbbb + ob*ob*vb*ob;
const double *i7_ovvv_aaaa = i7_ovvv;
const double *i7_ovvv_abab = i7_ovvv_aaaa + oa*va*va*(va-1)/2;
const double *i7_ovvv_bbbb = i7_ovvv_abab + oa*vb*va*vb;
const double *i7_ovvv_baba = i7_ovvv_bbbb + ob*vb*vb*(vb-1)/2;
/* aaaaaa */
e_pt += cc_ft_aaa(oa, va, d_ov_aa, f2_ov_aa, l1_aa, t2_aaaa, l2_aaaa,
i_oovv_aaaa, i2_t2f2_oovo_aaaa, i3_ovvv_aaaa, i6_oovo_aaaa,
i7_ovvv_aaaa);
/* bbbbbb */
e_pt += cc_ft_aaa(ob, vb, d_ov_bb, f2_ov_bb, l1_bb, t2_bbbb, l2_bbbb,
i_oovv_bbbb, i2_t2f2_oovo_bbbb, i3_ovvv_bbbb, i6_oovo_bbbb,
i7_ovvv_bbbb);
/* aabaab */
e_pt += cc_ft_aab(oa, va, ob, vb, d_ov_aa, d_ov_bb, f2_ov_aa, f2_ov_bb,
l1_aa, l1_bb, t2_aaaa, t2_abab, t2_baba, l2_aaaa, l2_abab, l2_baba,
i_oovv_aaaa, i_oovv_abab, i2_t2f2_oovo_aaaa, i2_t2f2_oovo_abab,
i2_t2f2_oovo_baba, i3_ovvv_aaaa, i3_ovvv_abab, i3_ovvv_baba,
i6_oovo_aaaa, i6_oovo_abab, i6_oovo_baba,
i7_ovvv_aaaa, i7_ovvv_abab, i7_ovvv_baba);
/* bbabba */
e_pt += cc_ft_aab(ob, vb, oa, va, d_ov_bb, d_ov_aa, f2_ov_bb, f2_ov_aa,
l1_bb, l1_aa, t2_bbbb, t2_baba, t2_abab, l2_bbbb, l2_baba, l2_abab,
i_oovv_bbbb, i_oovv_baba, i2_t2f2_oovo_bbbb, i2_t2f2_oovo_baba,
i2_t2f2_oovo_abab, i3_ovvv_bbbb, i3_ovvv_baba, i3_ovvv_abab,
i6_oovo_bbbb, i6_oovo_baba, i6_oovo_abab,
i7_ovvv_bbbb, i7_ovvv_baba, i7_ovvv_abab);
#ifdef LIBPT_USE_MPI
MPI_Allreduce(MPI_IN_PLACE, &e_pt, 1, MPI_DOUBLE,
MPI_SUM, MPI_COMM_WORLD);
#endif
return e_pt;
}
|
record_linker.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#include "record_linker.h"
#include "jarowinkler.h"
#include "name_dict.h"
unsigned char year1, year2;
unsigned char min_age1, max_age1;
unsigned char min_age2, max_age2;
char sex_global;
int main(int argc, char *argv[]) {
char *data1, *data2; // data filenames
char *std_names; // std_names filenames
char *output;
entry_t *entries1, *entries2;
int count1, count2;
name_dict_t *name_dict;
match_t *matches;
int rc=0;
#ifdef _OPENMP
double wtime;
#endif
// Parse arguments
if (argc != 12) {
fprintf(stderr, "Please use the run.sh script.\n");
return -1;
}
data1 = argv[1]; data2 = argv[2];
std_names = argv[3];
year1 = atoi(argv[4]); year2 = atoi(argv[5]);
sex_global = argv[6][0];
min_age1 = atoi(argv[7]); max_age1 = atoi(argv[8]);
min_age2 = atoi(argv[9]); max_age2 = atoi(argv[10]);
output = argv[11];
/* Extract valid entries from data files */
fprintf(stderr, "Extracting data...\n");
#ifdef _OPENMP
wtime = omp_get_wtime();
#endif
#pragma omp parallel
#pragma omp single nowait
{
// Extract first data file
#pragma omp task
{
entries1 = extract_valid_entries(data1, &count1, 1);
fprintf(stderr, " Found %d valid entries in data 1\n", count1);
}
// Extract second data file
#pragma omp task
{
entries2 = extract_valid_entries(data2, &count2, 2);
fprintf(stderr, " Found %d valid entries in data 2\n", count2);
}
}
#ifdef _OPENMP
fprintf(stderr, "Took %lf seconds\n", omp_get_wtime() - wtime);
#endif
if (!entries1 || !entries2) EXIT_WITH_ERROR("could not extract valid entries");
/* Generate name dictionary */
name_dict = generate_name_dict(std_names);
if (name_dict == NULL) EXIT_WITH_ERROR("could not generate name dictionary");
/* Standardize fnames */
fprintf(stderr, "Standardizing names...\n");
#ifdef _OPENMP
wtime = omp_get_wtime();
#endif
#pragma omp parallel private(rc)
#pragma omp single nowait
{
// Standardize names in first entry list
#pragma omp task
{
standardize_fnames(entries1, name_dict);
fprintf(stderr, " Standardized names 1\n");
}
// Standardize names in second entry list
#pragma omp task
{
standardize_fnames(entries2, name_dict);
fprintf(stderr, " Standardized names 2\n");
}
}
#ifdef _OPENMP
fprintf(stderr, "Took %lf seconds\n", omp_get_wtime() - wtime);
#endif
/* Free name dictionary */
free_name_dict(name_dict);
#ifdef PRINT
/* Print valid entries from each list */
printf("Printing valid entries from file 1:\n");
print_entries(entries1);
printf("There were %d extracted entries.\n\n", count1);
printf("Printing valid entries from file 2:\n");
print_entries(entries2);
printf("There were %d extracted entries.\n\n", count2);
#endif
/* Find matches */
fprintf(stderr, "Finding matches...\n");
#ifdef _OPENMP
wtime = omp_get_wtime();
#endif
matches = find_matches(entries1, entries2, count1);
#ifdef _OPENMP
fprintf(stderr, "Took %lf seconds\n", omp_get_wtime() - wtime);
#endif
/* Write matches */
fprintf(stderr, "Writing matches to output file '%s'...\n", output);
#ifdef _OPENMP
wtime = omp_get_wtime();
#endif
rc = write_matches(output, matches);
if (rc == -1) EXIT_WITH_ERROR("could not write output");
#ifdef _OPENMP
fprintf(stderr, "Took %lf seconds\n", omp_get_wtime() - wtime);
#endif
/* Free data */
free_entries(entries1); free_entries(entries2);
free_matches(matches);
return 0;
} // main
/* Extracts entries from data file based on sex and age. */
entry_t *extract_valid_entries(char *filename, int *count, int id) {
FILE *fp;
char buf[4096];
char *col;
entry_t *entries, *cur;
int min_age, max_age;
char *saveptr; // strtok_r
unsigned int recID;
char sex;
unsigned char age;
int len;
char c;
// Open data file
if ((fp = fopen(filename, "r")) == NULL) {
perror("extract:fopen");
return NULL;
}
// Get age range
if (id == 1) {
min_age = min_age1;
max_age = max_age1;
} else if (id == 2) {
min_age = min_age2;
max_age = max_age2;
} else {
fprintf(stderr, "%s:%d: cannot read more than two files\n",
__FILE__, __LINE__);
return NULL;
}
cur = entries = malloc(sizeof(entry_t));
cur->fname = cur->lname = NULL; cur->bp = NULL;
cur->next = NULL;
*count = 0;
//int n=0;
// Store valid entries
while (fgets(buf, sizeof(buf), fp)) {
// RecID
col = strtok_r(buf, ";", &saveptr);
recID = atoi(col);
// Sex
col = strtok_r(NULL, ";", &saveptr);
sex = col[0];
if ((sex != sex_global) && (sex != 'U')) continue;
// Age
col = strtok_r(NULL, ";", &saveptr);
age = atoi(col);
if ((age < min_age) || (age > max_age)) continue;
// is valid entry
entry_t *new_entry;
new_entry = malloc(sizeof(entry_t));
new_entry->recID = recID;
new_entry->sex = sex;
new_entry->age = age;
new_entry->fname = NULL;
new_entry->lname = NULL;
new_entry->bp = NULL;
new_entry->next = NULL;
// Std_Par
col = strtok_r(NULL, ";", &saveptr);
c = col[0];
if (c == '.' || c == '_' || c == '-' || c == '?' || c == '[') {
free(new_entry);
continue;
} else {
len = strlen(col);
new_entry->bp = malloc(len+1);
strcpy(new_entry->bp, col);
}
// Pname
col = strtok_r(NULL, ";", &saveptr);
c = col[0];
if (c == '.' || c == '_' || c == '-' || c == '?' || c == '[') {
free(new_entry);
continue;
} else {
len = strlen(col);
new_entry->fname = malloc(len+1);
strcpy(new_entry->fname, col);
}
// Sname
col = strtok_r(NULL, ";", &saveptr);
c = col[0];
if (c == '.' || c == '_' || c == '-' || c == '?' || c == '[') {
free(new_entry);
continue;
} else {
len = strlen(col);
new_entry->lname = malloc(len);
strcpy(new_entry->lname, col);
new_entry->lname[len-1] = '\0';
}
cur->next = new_entry;
cur = cur->next;
(*count)++;
}
fclose(fp);
return entries;
} // extract_valid_entries
/* Standardize first names. */
void standardize_fnames(entry_t *entries, name_dict_t *name_dict) {
name_dict_t *head = name_dict;
int len;
// check each entry against name dictionary
while (entries->next) {
entries = entries->next;
name_dict = head->next;
while (name_dict) {
if (strcmp(entries->fname, name_dict->fname) == 0) {
// copy standardized fname
len = strlen(name_dict->fname_std);
if ((entries->fname = realloc(entries->fname, len+1)) == NULL) break;
strcpy(entries->fname, name_dict->fname_std);
break;
}
name_dict = name_dict->next;
}
}
return;
} // standardize_fnames
/* Find matches between two entry lists using age and JW distance. */
match_t *find_matches(entry_t *entries1, entry_t *entries2, int count) {
match_t *ret, *cur_ret;
entry_t *cur1, *cur2, *start_bp;
int diff = (year2 > year1) ? (year2-year1) : (year1-year2);
cur_ret = ret = malloc(sizeof(match_t));
ret->entry1 = NULL; ret->entry2 = NULL;
ret->next = NULL;
cur1 = entries1->next;
cur2 = entries2->next;
start_bp = cur2;
fprintf(stderr, "INIT: %d %s %s %c %d %s\n", cur2->recID, cur2->fname, cur2->lname, cur2->sex, cur2->age, cur2->bp);
#pragma omp parallel firstprivate(cur1, cur2, start_bp)
{
#ifdef _OPENMP
int n_threads = omp_get_num_threads();
int tid = omp_get_thread_num();
int iters = count / n_threads;
int n=1, m=0;
int i;
for (i = 0; i < iters * tid; i++) {
cur1 = cur1->next;
m++;
}
#pragma omp single
{
fprintf(stderr, " Using %d threads...\n", n_threads);
}
fprintf(stderr, "%d: starting at entry %d/%d\n", tid, m, count);
#endif
#pragma omp barrier
// for each entry1
while (cur1) {
#ifdef _OPENMP
if (n % 10000 == 0) fprintf(stderr, "%d: iter %d/%d is %d\n", tid, n, iters, cur1->recID);
if (n++ > iters) break;
#endif
//fprintf(stderr, "%d %s %s %c %d %s\n", cur1->recID, cur1->fname, cur1->lname, cur1->sex, cur1->age, cur1->bp);
// go to parish
while (cur2 && (strcmp(cur1->bp, cur2->bp) > 0)) {
cur2 = cur2->next;
start_bp = cur2;
}
//fprintf(stderr, "start_bp = %d %s %s %c %d %s\n", start_bp->recID, start_bp->fname, start_bp->lname, start_bp->sex, start_bp->age, start_bp->bp);
// check each entry2 with same parish
while (cur2 && (strcmp(cur1->bp, cur2->bp) == 0)) {
// age criteria
if ((cur1->age + diff - cur2->age) > 3) {
cur2 = cur2->next;
continue;
}
// jarowinkler criteria
if ((1-jarowinkler(cur1->fname, cur2->fname) > 0.2) ||
(1-jarowinkler(cur1->lname, cur2->lname) > 0.2) ||
(1-jarowinkler( cur1->bp, cur2->bp) > 0.2)) {
cur2 = cur2->next;
continue;
}
//fprintf(stderr, "%d %s %s %c %d %s MATCHES %d %s %s %c %d %s\n", cur1->recID, cur1->fname, cur1->lname, cur1->sex, cur1->age, cur1->bp, cur2->recID, cur2->fname, cur2->lname, cur2->sex, cur2->age, cur2->bp);
// save match
match_t *new_match = malloc(sizeof(match_t));
new_match->entry1 = cur1;
new_match->entry2 = cur2;
new_match->next = NULL;
#pragma omp critical
{
cur_ret->next = new_match;
cur_ret = cur_ret->next;
}
cur2 = cur2->next;
}
// reset entry2 pointer to starting parish
cur2 = start_bp;
cur1 = cur1->next;
}
}
return ret;
} // find_matches
/* Free entry_t list. */
void free_entries(entry_t *entries) {
entry_t *tmp;
while (entries) {
tmp = entries;
free(entries->fname);
free(entries->lname);
free(entries->bp);
entries = entries->next;
free(tmp);
}
entries = NULL;
}
/* Free match_t list. */
void free_matches(match_t *matches) {
match_t *tmp;
while (matches) {
tmp = matches;
matches = matches->next;
free(tmp);
}
matches = NULL;
}
#ifdef PRINT
/* Print the contents of an entry list. */
void print_entries(entry_t *entries) {
while (entries->next) {
entries = entries->next;
printf("%d %s %s %c %d %s\n", entries->recID, entries->fname, entries->lname,
entries->sex, entries->age, entries->bp);
}
} // print_entries
#endif
/* Print the contents of a match list. */
int write_matches(char *filename, match_t *matches) {
FILE *fp;
if ((fp = fopen(filename, "w")) == NULL) return -1;
while (matches->next) {
matches = matches->next;
fprintf(fp, "%d,%s,%s,%c,%d,%s,", matches->entry1->recID,
matches->entry1->fname, matches->entry1->lname,
matches->entry1->sex, matches->entry1->age,
matches->entry1->bp);
fprintf(fp, "%d,%s,%s,%c,%d,%s\n", matches->entry2->recID,
matches->entry2->fname, matches->entry2->lname,
matches->entry2->sex, matches->entry2->age,
matches->entry2->bp);
}
fclose(fp);
return 0;
} // print_matches
|
Example_critical.2.c | /*
* @@name: critical.1c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: success
*/
#include <omp.h>
int dequeue(float *a);
void work(int i, float *a);
void critical_example(float *x, float *y)
{
int ix_next, iy_next;
#pragma omp parallel shared(x, y) private(ix_next, iy_next)
{
#pragma omp critical (xaxis) hint(omp_lock_hint_contended)
ix_next = dequeue(x);
work(ix_next, x);
#pragma omp critical (yaxis) hint(omp_lock_hint_contended)
iy_next = dequeue(y);
work(iy_next, y);
}
}
|
kmp_abt_atomic.c | /*
* kmp_atomic.c -- ATOMIC implementation routines
*/
//===----------------------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.txt for details.
//
//===----------------------------------------------------------------------===//
#include "kmp_abt_atomic.h"
#include "kmp_abt.h" // TRUE, asm routines prototypes
typedef unsigned char uchar;
typedef unsigned short ushort;
/*!
@defgroup ATOMIC_OPS Atomic Operations
These functions are used for implementing the many different varieties of atomic operations.
The compiler is at liberty to inline atomic operations that are naturally supported
by the target architecture. For instance on IA-32 architecture an atomic like this can be inlined
@code
static int s = 0;
#pragma omp atomic
s++;
@endcode
using the single instruction: `lock; incl s`
However the runtime does provide entrypoints for these operations to support compilers that choose
not to inline them. (For instance, `__kmpc_atomic_fixed4_add` could be used to perform the
increment above.)
The names of the functions are encoded by using the data type name and the operation name, as in these tables.
Data Type | Data type encoding
-----------|---------------
int8_t | `fixed1`
uint8_t | `fixed1u`
int16_t | `fixed2`
uint16_t | `fixed2u`
int32_t | `fixed4`
uint32_t | `fixed4u`
int32_t | `fixed8`
uint32_t | `fixed8u`
float | `float4`
double | `float8`
float 10 (8087 eighty bit float) | `float10`
complex<float> | `cmplx4`
complex<double> | `cmplx8`
complex<float10> | `cmplx10`
<br>
Operation | Operation encoding
----------|-------------------
+ | add
- | sub
\* | mul
/ | div
& | andb
<< | shl
\>\> | shr
\| | orb
^ | xor
&& | andl
\|\| | orl
maximum | max
minimum | min
.eqv. | eqv
.neqv. | neqv
<br>
For non-commutative operations, `_rev` can also be added for the reversed operation.
For the functions that capture the result, the suffix `_cpt` is added.
Update Functions
================
The general form of an atomic function that just performs an update (without a `capture`)
@code
void __kmpc_atomic_<datatype>_<operation>( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs );
@endcode
@param ident_t a pointer to source location
@param gtid the global thread id
@param lhs a pointer to the left operand
@param rhs the right operand
`capture` functions
===================
The capture functions perform an atomic update and return a result, which is either the value
before the capture, or that after. They take an additional argument to determine which result is returned.
Their general form is therefore
@code
TYPE __kmpc_atomic_<datatype>_<operation>_cpt( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, int flag );
@endcode
@param ident_t a pointer to source location
@param gtid the global thread id
@param lhs a pointer to the left operand
@param rhs the right operand
@param flag one if the result is to be captured *after* the operation, zero if captured *before*.
The one set of exceptions to this is the `complex<float>` type where the value is not returned,
rather an extra argument pointer is passed.
They look like
@code
void __kmpc_atomic_cmplx4_<op>_cpt( ident_t *id_ref, int gtid, kmp_cmplx32 * lhs, kmp_cmplx32 rhs, kmp_cmplx32 * out, int flag );
@endcode
Read and Write Operations
=========================
The OpenMP<sup>*</sup> standard now supports atomic operations that simply ensure that the
value is read or written atomically, with no modification
performed. In many cases on IA-32 architecture these operations can be inlined since
the architecture guarantees that no tearing occurs on aligned objects
accessed with a single memory operation of up to 64 bits in size.
The general form of the read operations is
@code
TYPE __kmpc_atomic_<type>_rd ( ident_t *id_ref, int gtid, TYPE * loc );
@endcode
For the write operations the form is
@code
void __kmpc_atomic_<type>_wr ( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs );
@endcode
Full list of functions
======================
This leads to the generation of 376 atomic functions, as follows.
Functons for integers
---------------------
There are versions here for integers of size 1,2,4 and 8 bytes both signed and unsigned (where that matters).
@code
__kmpc_atomic_fixed1_add
__kmpc_atomic_fixed1_add_cpt
__kmpc_atomic_fixed1_add_fp
__kmpc_atomic_fixed1_andb
__kmpc_atomic_fixed1_andb_cpt
__kmpc_atomic_fixed1_andl
__kmpc_atomic_fixed1_andl_cpt
__kmpc_atomic_fixed1_div
__kmpc_atomic_fixed1_div_cpt
__kmpc_atomic_fixed1_div_cpt_rev
__kmpc_atomic_fixed1_div_float8
__kmpc_atomic_fixed1_div_fp
__kmpc_atomic_fixed1_div_rev
__kmpc_atomic_fixed1_eqv
__kmpc_atomic_fixed1_eqv_cpt
__kmpc_atomic_fixed1_max
__kmpc_atomic_fixed1_max_cpt
__kmpc_atomic_fixed1_min
__kmpc_atomic_fixed1_min_cpt
__kmpc_atomic_fixed1_mul
__kmpc_atomic_fixed1_mul_cpt
__kmpc_atomic_fixed1_mul_float8
__kmpc_atomic_fixed1_mul_fp
__kmpc_atomic_fixed1_neqv
__kmpc_atomic_fixed1_neqv_cpt
__kmpc_atomic_fixed1_orb
__kmpc_atomic_fixed1_orb_cpt
__kmpc_atomic_fixed1_orl
__kmpc_atomic_fixed1_orl_cpt
__kmpc_atomic_fixed1_rd
__kmpc_atomic_fixed1_shl
__kmpc_atomic_fixed1_shl_cpt
__kmpc_atomic_fixed1_shl_cpt_rev
__kmpc_atomic_fixed1_shl_rev
__kmpc_atomic_fixed1_shr
__kmpc_atomic_fixed1_shr_cpt
__kmpc_atomic_fixed1_shr_cpt_rev
__kmpc_atomic_fixed1_shr_rev
__kmpc_atomic_fixed1_sub
__kmpc_atomic_fixed1_sub_cpt
__kmpc_atomic_fixed1_sub_cpt_rev
__kmpc_atomic_fixed1_sub_fp
__kmpc_atomic_fixed1_sub_rev
__kmpc_atomic_fixed1_swp
__kmpc_atomic_fixed1_wr
__kmpc_atomic_fixed1_xor
__kmpc_atomic_fixed1_xor_cpt
__kmpc_atomic_fixed1u_div
__kmpc_atomic_fixed1u_div_cpt
__kmpc_atomic_fixed1u_div_cpt_rev
__kmpc_atomic_fixed1u_div_fp
__kmpc_atomic_fixed1u_div_rev
__kmpc_atomic_fixed1u_shr
__kmpc_atomic_fixed1u_shr_cpt
__kmpc_atomic_fixed1u_shr_cpt_rev
__kmpc_atomic_fixed1u_shr_rev
__kmpc_atomic_fixed2_add
__kmpc_atomic_fixed2_add_cpt
__kmpc_atomic_fixed2_add_fp
__kmpc_atomic_fixed2_andb
__kmpc_atomic_fixed2_andb_cpt
__kmpc_atomic_fixed2_andl
__kmpc_atomic_fixed2_andl_cpt
__kmpc_atomic_fixed2_div
__kmpc_atomic_fixed2_div_cpt
__kmpc_atomic_fixed2_div_cpt_rev
__kmpc_atomic_fixed2_div_float8
__kmpc_atomic_fixed2_div_fp
__kmpc_atomic_fixed2_div_rev
__kmpc_atomic_fixed2_eqv
__kmpc_atomic_fixed2_eqv_cpt
__kmpc_atomic_fixed2_max
__kmpc_atomic_fixed2_max_cpt
__kmpc_atomic_fixed2_min
__kmpc_atomic_fixed2_min_cpt
__kmpc_atomic_fixed2_mul
__kmpc_atomic_fixed2_mul_cpt
__kmpc_atomic_fixed2_mul_float8
__kmpc_atomic_fixed2_mul_fp
__kmpc_atomic_fixed2_neqv
__kmpc_atomic_fixed2_neqv_cpt
__kmpc_atomic_fixed2_orb
__kmpc_atomic_fixed2_orb_cpt
__kmpc_atomic_fixed2_orl
__kmpc_atomic_fixed2_orl_cpt
__kmpc_atomic_fixed2_rd
__kmpc_atomic_fixed2_shl
__kmpc_atomic_fixed2_shl_cpt
__kmpc_atomic_fixed2_shl_cpt_rev
__kmpc_atomic_fixed2_shl_rev
__kmpc_atomic_fixed2_shr
__kmpc_atomic_fixed2_shr_cpt
__kmpc_atomic_fixed2_shr_cpt_rev
__kmpc_atomic_fixed2_shr_rev
__kmpc_atomic_fixed2_sub
__kmpc_atomic_fixed2_sub_cpt
__kmpc_atomic_fixed2_sub_cpt_rev
__kmpc_atomic_fixed2_sub_fp
__kmpc_atomic_fixed2_sub_rev
__kmpc_atomic_fixed2_swp
__kmpc_atomic_fixed2_wr
__kmpc_atomic_fixed2_xor
__kmpc_atomic_fixed2_xor_cpt
__kmpc_atomic_fixed2u_div
__kmpc_atomic_fixed2u_div_cpt
__kmpc_atomic_fixed2u_div_cpt_rev
__kmpc_atomic_fixed2u_div_fp
__kmpc_atomic_fixed2u_div_rev
__kmpc_atomic_fixed2u_shr
__kmpc_atomic_fixed2u_shr_cpt
__kmpc_atomic_fixed2u_shr_cpt_rev
__kmpc_atomic_fixed2u_shr_rev
__kmpc_atomic_fixed4_add
__kmpc_atomic_fixed4_add_cpt
__kmpc_atomic_fixed4_add_fp
__kmpc_atomic_fixed4_andb
__kmpc_atomic_fixed4_andb_cpt
__kmpc_atomic_fixed4_andl
__kmpc_atomic_fixed4_andl_cpt
__kmpc_atomic_fixed4_div
__kmpc_atomic_fixed4_div_cpt
__kmpc_atomic_fixed4_div_cpt_rev
__kmpc_atomic_fixed4_div_float8
__kmpc_atomic_fixed4_div_fp
__kmpc_atomic_fixed4_div_rev
__kmpc_atomic_fixed4_eqv
__kmpc_atomic_fixed4_eqv_cpt
__kmpc_atomic_fixed4_max
__kmpc_atomic_fixed4_max_cpt
__kmpc_atomic_fixed4_min
__kmpc_atomic_fixed4_min_cpt
__kmpc_atomic_fixed4_mul
__kmpc_atomic_fixed4_mul_cpt
__kmpc_atomic_fixed4_mul_float8
__kmpc_atomic_fixed4_mul_fp
__kmpc_atomic_fixed4_neqv
__kmpc_atomic_fixed4_neqv_cpt
__kmpc_atomic_fixed4_orb
__kmpc_atomic_fixed4_orb_cpt
__kmpc_atomic_fixed4_orl
__kmpc_atomic_fixed4_orl_cpt
__kmpc_atomic_fixed4_rd
__kmpc_atomic_fixed4_shl
__kmpc_atomic_fixed4_shl_cpt
__kmpc_atomic_fixed4_shl_cpt_rev
__kmpc_atomic_fixed4_shl_rev
__kmpc_atomic_fixed4_shr
__kmpc_atomic_fixed4_shr_cpt
__kmpc_atomic_fixed4_shr_cpt_rev
__kmpc_atomic_fixed4_shr_rev
__kmpc_atomic_fixed4_sub
__kmpc_atomic_fixed4_sub_cpt
__kmpc_atomic_fixed4_sub_cpt_rev
__kmpc_atomic_fixed4_sub_fp
__kmpc_atomic_fixed4_sub_rev
__kmpc_atomic_fixed4_swp
__kmpc_atomic_fixed4_wr
__kmpc_atomic_fixed4_xor
__kmpc_atomic_fixed4_xor_cpt
__kmpc_atomic_fixed4u_div
__kmpc_atomic_fixed4u_div_cpt
__kmpc_atomic_fixed4u_div_cpt_rev
__kmpc_atomic_fixed4u_div_fp
__kmpc_atomic_fixed4u_div_rev
__kmpc_atomic_fixed4u_shr
__kmpc_atomic_fixed4u_shr_cpt
__kmpc_atomic_fixed4u_shr_cpt_rev
__kmpc_atomic_fixed4u_shr_rev
__kmpc_atomic_fixed8_add
__kmpc_atomic_fixed8_add_cpt
__kmpc_atomic_fixed8_add_fp
__kmpc_atomic_fixed8_andb
__kmpc_atomic_fixed8_andb_cpt
__kmpc_atomic_fixed8_andl
__kmpc_atomic_fixed8_andl_cpt
__kmpc_atomic_fixed8_div
__kmpc_atomic_fixed8_div_cpt
__kmpc_atomic_fixed8_div_cpt_rev
__kmpc_atomic_fixed8_div_float8
__kmpc_atomic_fixed8_div_fp
__kmpc_atomic_fixed8_div_rev
__kmpc_atomic_fixed8_eqv
__kmpc_atomic_fixed8_eqv_cpt
__kmpc_atomic_fixed8_max
__kmpc_atomic_fixed8_max_cpt
__kmpc_atomic_fixed8_min
__kmpc_atomic_fixed8_min_cpt
__kmpc_atomic_fixed8_mul
__kmpc_atomic_fixed8_mul_cpt
__kmpc_atomic_fixed8_mul_float8
__kmpc_atomic_fixed8_mul_fp
__kmpc_atomic_fixed8_neqv
__kmpc_atomic_fixed8_neqv_cpt
__kmpc_atomic_fixed8_orb
__kmpc_atomic_fixed8_orb_cpt
__kmpc_atomic_fixed8_orl
__kmpc_atomic_fixed8_orl_cpt
__kmpc_atomic_fixed8_rd
__kmpc_atomic_fixed8_shl
__kmpc_atomic_fixed8_shl_cpt
__kmpc_atomic_fixed8_shl_cpt_rev
__kmpc_atomic_fixed8_shl_rev
__kmpc_atomic_fixed8_shr
__kmpc_atomic_fixed8_shr_cpt
__kmpc_atomic_fixed8_shr_cpt_rev
__kmpc_atomic_fixed8_shr_rev
__kmpc_atomic_fixed8_sub
__kmpc_atomic_fixed8_sub_cpt
__kmpc_atomic_fixed8_sub_cpt_rev
__kmpc_atomic_fixed8_sub_fp
__kmpc_atomic_fixed8_sub_rev
__kmpc_atomic_fixed8_swp
__kmpc_atomic_fixed8_wr
__kmpc_atomic_fixed8_xor
__kmpc_atomic_fixed8_xor_cpt
__kmpc_atomic_fixed8u_div
__kmpc_atomic_fixed8u_div_cpt
__kmpc_atomic_fixed8u_div_cpt_rev
__kmpc_atomic_fixed8u_div_fp
__kmpc_atomic_fixed8u_div_rev
__kmpc_atomic_fixed8u_shr
__kmpc_atomic_fixed8u_shr_cpt
__kmpc_atomic_fixed8u_shr_cpt_rev
__kmpc_atomic_fixed8u_shr_rev
@endcode
Functions for floating point
----------------------------
There are versions here for floating point numbers of size 4, 8, 10 and 16 bytes.
(Ten byte floats are used by X87, but are now rare).
@code
__kmpc_atomic_float4_add
__kmpc_atomic_float4_add_cpt
__kmpc_atomic_float4_add_float8
__kmpc_atomic_float4_add_fp
__kmpc_atomic_float4_div
__kmpc_atomic_float4_div_cpt
__kmpc_atomic_float4_div_cpt_rev
__kmpc_atomic_float4_div_float8
__kmpc_atomic_float4_div_fp
__kmpc_atomic_float4_div_rev
__kmpc_atomic_float4_max
__kmpc_atomic_float4_max_cpt
__kmpc_atomic_float4_min
__kmpc_atomic_float4_min_cpt
__kmpc_atomic_float4_mul
__kmpc_atomic_float4_mul_cpt
__kmpc_atomic_float4_mul_float8
__kmpc_atomic_float4_mul_fp
__kmpc_atomic_float4_rd
__kmpc_atomic_float4_sub
__kmpc_atomic_float4_sub_cpt
__kmpc_atomic_float4_sub_cpt_rev
__kmpc_atomic_float4_sub_float8
__kmpc_atomic_float4_sub_fp
__kmpc_atomic_float4_sub_rev
__kmpc_atomic_float4_swp
__kmpc_atomic_float4_wr
__kmpc_atomic_float8_add
__kmpc_atomic_float8_add_cpt
__kmpc_atomic_float8_add_fp
__kmpc_atomic_float8_div
__kmpc_atomic_float8_div_cpt
__kmpc_atomic_float8_div_cpt_rev
__kmpc_atomic_float8_div_fp
__kmpc_atomic_float8_div_rev
__kmpc_atomic_float8_max
__kmpc_atomic_float8_max_cpt
__kmpc_atomic_float8_min
__kmpc_atomic_float8_min_cpt
__kmpc_atomic_float8_mul
__kmpc_atomic_float8_mul_cpt
__kmpc_atomic_float8_mul_fp
__kmpc_atomic_float8_rd
__kmpc_atomic_float8_sub
__kmpc_atomic_float8_sub_cpt
__kmpc_atomic_float8_sub_cpt_rev
__kmpc_atomic_float8_sub_fp
__kmpc_atomic_float8_sub_rev
__kmpc_atomic_float8_swp
__kmpc_atomic_float8_wr
__kmpc_atomic_float10_add
__kmpc_atomic_float10_add_cpt
__kmpc_atomic_float10_add_fp
__kmpc_atomic_float10_div
__kmpc_atomic_float10_div_cpt
__kmpc_atomic_float10_div_cpt_rev
__kmpc_atomic_float10_div_fp
__kmpc_atomic_float10_div_rev
__kmpc_atomic_float10_mul
__kmpc_atomic_float10_mul_cpt
__kmpc_atomic_float10_mul_fp
__kmpc_atomic_float10_rd
__kmpc_atomic_float10_sub
__kmpc_atomic_float10_sub_cpt
__kmpc_atomic_float10_sub_cpt_rev
__kmpc_atomic_float10_sub_fp
__kmpc_atomic_float10_sub_rev
__kmpc_atomic_float10_swp
__kmpc_atomic_float10_wr
__kmpc_atomic_float16_add
__kmpc_atomic_float16_add_cpt
__kmpc_atomic_float16_div
__kmpc_atomic_float16_div_cpt
__kmpc_atomic_float16_div_cpt_rev
__kmpc_atomic_float16_div_rev
__kmpc_atomic_float16_max
__kmpc_atomic_float16_max_cpt
__kmpc_atomic_float16_min
__kmpc_atomic_float16_min_cpt
__kmpc_atomic_float16_mul
__kmpc_atomic_float16_mul_cpt
__kmpc_atomic_float16_rd
__kmpc_atomic_float16_sub
__kmpc_atomic_float16_sub_cpt
__kmpc_atomic_float16_sub_cpt_rev
__kmpc_atomic_float16_sub_rev
__kmpc_atomic_float16_swp
__kmpc_atomic_float16_wr
@endcode
Functions for Complex types
---------------------------
Functions for complex types whose component floating point variables are of size 4,8,10 or 16 bytes.
The names here are based on the size of the component float, *not* the size of the complex type. So
`__kmpc_atomc_cmplx8_add` is an operation on a `complex<double>` or `complex(kind=8)`, *not* `complex<float>`.
@code
__kmpc_atomic_cmplx4_add
__kmpc_atomic_cmplx4_add_cmplx8
__kmpc_atomic_cmplx4_add_cpt
__kmpc_atomic_cmplx4_div
__kmpc_atomic_cmplx4_div_cmplx8
__kmpc_atomic_cmplx4_div_cpt
__kmpc_atomic_cmplx4_div_cpt_rev
__kmpc_atomic_cmplx4_div_rev
__kmpc_atomic_cmplx4_mul
__kmpc_atomic_cmplx4_mul_cmplx8
__kmpc_atomic_cmplx4_mul_cpt
__kmpc_atomic_cmplx4_rd
__kmpc_atomic_cmplx4_sub
__kmpc_atomic_cmplx4_sub_cmplx8
__kmpc_atomic_cmplx4_sub_cpt
__kmpc_atomic_cmplx4_sub_cpt_rev
__kmpc_atomic_cmplx4_sub_rev
__kmpc_atomic_cmplx4_swp
__kmpc_atomic_cmplx4_wr
__kmpc_atomic_cmplx8_add
__kmpc_atomic_cmplx8_add_cpt
__kmpc_atomic_cmplx8_div
__kmpc_atomic_cmplx8_div_cpt
__kmpc_atomic_cmplx8_div_cpt_rev
__kmpc_atomic_cmplx8_div_rev
__kmpc_atomic_cmplx8_mul
__kmpc_atomic_cmplx8_mul_cpt
__kmpc_atomic_cmplx8_rd
__kmpc_atomic_cmplx8_sub
__kmpc_atomic_cmplx8_sub_cpt
__kmpc_atomic_cmplx8_sub_cpt_rev
__kmpc_atomic_cmplx8_sub_rev
__kmpc_atomic_cmplx8_swp
__kmpc_atomic_cmplx8_wr
__kmpc_atomic_cmplx10_add
__kmpc_atomic_cmplx10_add_cpt
__kmpc_atomic_cmplx10_div
__kmpc_atomic_cmplx10_div_cpt
__kmpc_atomic_cmplx10_div_cpt_rev
__kmpc_atomic_cmplx10_div_rev
__kmpc_atomic_cmplx10_mul
__kmpc_atomic_cmplx10_mul_cpt
__kmpc_atomic_cmplx10_rd
__kmpc_atomic_cmplx10_sub
__kmpc_atomic_cmplx10_sub_cpt
__kmpc_atomic_cmplx10_sub_cpt_rev
__kmpc_atomic_cmplx10_sub_rev
__kmpc_atomic_cmplx10_swp
__kmpc_atomic_cmplx10_wr
__kmpc_atomic_cmplx16_add
__kmpc_atomic_cmplx16_add_cpt
__kmpc_atomic_cmplx16_div
__kmpc_atomic_cmplx16_div_cpt
__kmpc_atomic_cmplx16_div_cpt_rev
__kmpc_atomic_cmplx16_div_rev
__kmpc_atomic_cmplx16_mul
__kmpc_atomic_cmplx16_mul_cpt
__kmpc_atomic_cmplx16_rd
__kmpc_atomic_cmplx16_sub
__kmpc_atomic_cmplx16_sub_cpt
__kmpc_atomic_cmplx16_sub_cpt_rev
__kmpc_atomic_cmplx16_swp
__kmpc_atomic_cmplx16_wr
@endcode
*/
/*!
@ingroup ATOMIC_OPS
@{
*/
/*
* Global vars
*/
#ifndef KMP_GOMP_COMPAT
int __kmp_atomic_mode = 1; // Intel perf
#else
int __kmp_atomic_mode = 2; // GOMP compatibility
#endif /* KMP_GOMP_COMPAT */
KMP_ALIGN(128)
kmp_atomic_lock_t __kmp_atomic_lock; /* Control access to all user coded atomics in Gnu compat mode */
kmp_atomic_lock_t __kmp_atomic_lock_1i; /* Control access to all user coded atomics for 1-byte fixed data types */
kmp_atomic_lock_t __kmp_atomic_lock_2i; /* Control access to all user coded atomics for 2-byte fixed data types */
kmp_atomic_lock_t __kmp_atomic_lock_4i; /* Control access to all user coded atomics for 4-byte fixed data types */
kmp_atomic_lock_t __kmp_atomic_lock_4r; /* Control access to all user coded atomics for kmp_real32 data type */
kmp_atomic_lock_t __kmp_atomic_lock_8i; /* Control access to all user coded atomics for 8-byte fixed data types */
kmp_atomic_lock_t __kmp_atomic_lock_8r; /* Control access to all user coded atomics for kmp_real64 data type */
kmp_atomic_lock_t __kmp_atomic_lock_8c; /* Control access to all user coded atomics for complex byte data type */
kmp_atomic_lock_t __kmp_atomic_lock_10r; /* Control access to all user coded atomics for long double data type */
kmp_atomic_lock_t __kmp_atomic_lock_16r; /* Control access to all user coded atomics for _Quad data type */
kmp_atomic_lock_t __kmp_atomic_lock_16c; /* Control access to all user coded atomics for double complex data type*/
kmp_atomic_lock_t __kmp_atomic_lock_20c; /* Control access to all user coded atomics for long double complex type*/
kmp_atomic_lock_t __kmp_atomic_lock_32c; /* Control access to all user coded atomics for _Quad complex data type */
/*
2007-03-02:
Without "volatile" specifier in OP_CMPXCHG and MIN_MAX_CMPXCHG we have a
bug on *_32 and *_32e. This is just a temporary workaround for the problem.
It seems the right solution is writing OP_CMPXCHG and MIN_MAX_CMPXCHG
routines in assembler language.
*/
#define KMP_ATOMIC_VOLATILE volatile
#if ( KMP_ARCH_X86 ) && KMP_HAVE_QUAD
static inline void operator +=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q += rhs.q; };
static inline void operator -=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q -= rhs.q; };
static inline void operator *=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q *= rhs.q; };
static inline void operator /=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q /= rhs.q; };
static inline bool operator < ( Quad_a4_t & lhs, Quad_a4_t & rhs ) { return lhs.q < rhs.q; }
static inline bool operator > ( Quad_a4_t & lhs, Quad_a4_t & rhs ) { return lhs.q > rhs.q; }
static inline void operator +=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q += rhs.q; };
static inline void operator -=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q -= rhs.q; };
static inline void operator *=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q *= rhs.q; };
static inline void operator /=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q /= rhs.q; };
static inline bool operator < ( Quad_a16_t & lhs, Quad_a16_t & rhs ) { return lhs.q < rhs.q; }
static inline bool operator > ( Quad_a16_t & lhs, Quad_a16_t & rhs ) { return lhs.q > rhs.q; }
static inline void operator +=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q += rhs.q; };
static inline void operator -=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q -= rhs.q; };
static inline void operator *=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q *= rhs.q; };
static inline void operator /=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q /= rhs.q; };
static inline void operator +=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q += rhs.q; };
static inline void operator -=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q -= rhs.q; };
static inline void operator *=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q *= rhs.q; };
static inline void operator /=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q /= rhs.q; };
#endif
/* ------------------------------------------------------------------------ */
/* ATOMIC implementation routines */
/* one routine for each operation and operand type */
/* ------------------------------------------------------------------------ */
// All routines declarations looks like
// void __kmpc_atomic_RTYPE_OP( ident_t*, int, TYPE *lhs, TYPE rhs );
// ------------------------------------------------------------------------
#define KMP_CHECK_GTID \
if ( gtid == KMP_GTID_UNKNOWN ) { \
gtid = __kmp_entry_gtid(); \
} // check and get gtid when needed
// Beginning of a definition (provides name, parameters, gebug trace)
// TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed)
// OP_ID - operation identifier (add, sub, mul, ...)
// TYPE - operands' type
#define ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE, RET_TYPE) \
RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ) \
{ \
KMP_DEBUG_ASSERT( __kmp_global.init_serial ); \
KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid ));
// ------------------------------------------------------------------------
// Lock variables used for critical sections for various size operands
#define ATOMIC_LOCK0 __kmp_atomic_lock // all types, for Gnu compat
#define ATOMIC_LOCK1i __kmp_atomic_lock_1i // char
#define ATOMIC_LOCK2i __kmp_atomic_lock_2i // short
#define ATOMIC_LOCK4i __kmp_atomic_lock_4i // long int
#define ATOMIC_LOCK4r __kmp_atomic_lock_4r // float
#define ATOMIC_LOCK8i __kmp_atomic_lock_8i // long long int
#define ATOMIC_LOCK8r __kmp_atomic_lock_8r // double
#define ATOMIC_LOCK8c __kmp_atomic_lock_8c // float complex
#define ATOMIC_LOCK10r __kmp_atomic_lock_10r // long double
#define ATOMIC_LOCK16r __kmp_atomic_lock_16r // _Quad
#define ATOMIC_LOCK16c __kmp_atomic_lock_16c // double complex
#define ATOMIC_LOCK20c __kmp_atomic_lock_20c // long double complex
#define ATOMIC_LOCK32c __kmp_atomic_lock_32c // _Quad complex
// ------------------------------------------------------------------------
// Operation on *lhs, rhs bound by critical section
// OP - operator (it's supposed to contain an assignment)
// LCK_ID - lock identifier
// Note: don't check gtid as it should always be valid
// 1, 2-byte - expect valid parameter, other - check before this macro
#define OP_CRITICAL(OP,LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
(*lhs) OP (rhs); \
\
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid );
// ------------------------------------------------------------------------
// For GNU compatibility, we may need to use a critical section,
// even though it is not required by the ISA.
//
// On IA-32 architecture, all atomic operations except for fixed 4 byte add,
// sub, and bitwise logical ops, and 1 & 2 byte logical ops use a common
// critical section. On Intel(R) 64, all atomic operations are done with fetch
// and add or compare and exchange. Therefore, the FLAG parameter to this
// macro is either KMP_ARCH_X86 or 0 (or 1, for Intel-specific extension which
// require a critical section, where we predict that they will be implemented
// in the Gnu codegen by calling GOMP_atomic_start() / GOMP_atomic_end()).
//
// When the OP_GOMP_CRITICAL macro is used in a *CRITICAL* macro construct,
// the FLAG parameter should always be 1. If we know that we will be using
// a critical section, then we want to make certain that we use the generic
// lock __kmp_atomic_lock to protect the atomic update, and not of of the
// locks that are specialized based upon the size or type of the data.
//
// If FLAG is 0, then we are relying on dead code elimination by the build
// compiler to get rid of the useless block of code, and save a needless
// branch at runtime.
//
#ifdef KMP_GOMP_COMPAT
# define OP_GOMP_CRITICAL(OP,FLAG) \
if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \
KMP_CHECK_GTID; \
OP_CRITICAL( OP, 0 ); \
return; \
}
# else
# define OP_GOMP_CRITICAL(OP,FLAG)
#endif /* KMP_GOMP_COMPAT */
#if KMP_MIC
# define KMP_DO_PAUSE _mm_delay_32( 1 )
#else
# define KMP_DO_PAUSE KMP_CPU_PAUSE()
#endif /* KMP_MIC */
// ------------------------------------------------------------------------
// Operation on *lhs, rhs using "compare_and_store" routine
// TYPE - operands' type
// BITS - size in bits, used to distinguish low level calls
// OP - operator
#define OP_CMPXCHG(TYPE,BITS,OP) \
{ \
TYPE old_value, new_value; \
old_value = *(TYPE volatile *)lhs; \
new_value = old_value OP rhs; \
while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \
*VOLATILE_CAST(kmp_int##BITS *) &old_value, \
*VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \
{ \
KMP_DO_PAUSE; \
\
old_value = *(TYPE volatile *)lhs; \
new_value = old_value OP rhs; \
} \
}
#if USE_CMPXCHG_FIX
// 2007-06-25:
// workaround for C78287 (complex(kind=4) data type)
// lin_32, lin_32e, win_32 and win_32e are affected (I verified the asm)
// Compiler ignores the volatile qualifier of the temp_val in the OP_CMPXCHG macro.
// This is a problem of the compiler.
// Related tracker is C76005, targeted to 11.0.
// I verified the asm of the workaround.
#define OP_CMPXCHG_WORKAROUND(TYPE,BITS,OP) \
{ \
struct _sss { \
TYPE cmp; \
kmp_int##BITS *vvv; \
}; \
struct _sss old_value, new_value; \
old_value.vvv = ( kmp_int##BITS * )&old_value.cmp; \
new_value.vvv = ( kmp_int##BITS * )&new_value.cmp; \
*old_value.vvv = * ( volatile kmp_int##BITS * ) lhs; \
new_value.cmp = old_value.cmp OP rhs; \
while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \
*VOLATILE_CAST(kmp_int##BITS *) old_value.vvv, \
*VOLATILE_CAST(kmp_int##BITS *) new_value.vvv ) ) \
{ \
KMP_DO_PAUSE; \
\
*old_value.vvv = * ( volatile kmp_int##BITS * ) lhs; \
new_value.cmp = old_value.cmp OP rhs; \
} \
}
// end of the first part of the workaround for C78287
#endif // USE_CMPXCHG_FIX
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
// ------------------------------------------------------------------------
// X86 or X86_64: no alignment problems ====================================
#define ATOMIC_FIXED_ADD(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \
/* OP used as a sign for subtraction: (lhs-rhs) --> (lhs+-rhs) */ \
KMP_TEST_THEN_ADD##BITS( lhs, OP rhs ); \
}
// -------------------------------------------------------------------------
#define ATOMIC_CMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \
OP_CMPXCHG(TYPE,BITS,OP) \
}
#if USE_CMPXCHG_FIX
// -------------------------------------------------------------------------
// workaround for C78287 (complex(kind=4) data type)
#define ATOMIC_CMPXCHG_WORKAROUND(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \
OP_CMPXCHG_WORKAROUND(TYPE,BITS,OP) \
}
// end of the second part of the workaround for C78287
#endif
#else
// -------------------------------------------------------------------------
// Code for other architectures that don't handle unaligned accesses.
#define ATOMIC_FIXED_ADD(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \
if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \
/* OP used as a sign for subtraction: (lhs-rhs) --> (lhs+-rhs) */ \
KMP_TEST_THEN_ADD##BITS( lhs, OP rhs ); \
} else { \
KMP_CHECK_GTID; \
OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \
} \
}
// -------------------------------------------------------------------------
#define ATOMIC_CMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \
if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \
OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \
} else { \
KMP_CHECK_GTID; \
OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \
} \
}
#if USE_CMPXCHG_FIX
// -------------------------------------------------------------------------
// workaround for C78287 (complex(kind=4) data type)
#define ATOMIC_CMPXCHG_WORKAROUND(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \
if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \
OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \
} else { \
KMP_CHECK_GTID; \
OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \
} \
}
// end of the second part of the workaround for C78287
#endif // USE_CMPXCHG_FIX
#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
// Routines for ATOMIC 4-byte operands addition and subtraction
ATOMIC_FIXED_ADD( fixed4, add, kmp_int32, 32, +, 4i, 3, 0 ) // __kmpc_atomic_fixed4_add
ATOMIC_FIXED_ADD( fixed4, sub, kmp_int32, 32, -, 4i, 3, 0 ) // __kmpc_atomic_fixed4_sub
ATOMIC_CMPXCHG( float4, add, kmp_real32, 32, +, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add
ATOMIC_CMPXCHG( float4, sub, kmp_real32, 32, -, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub
// Routines for ATOMIC 8-byte operands addition and subtraction
ATOMIC_FIXED_ADD( fixed8, add, kmp_int64, 64, +, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_add
ATOMIC_FIXED_ADD( fixed8, sub, kmp_int64, 64, -, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub
ATOMIC_CMPXCHG( float8, add, kmp_real64, 64, +, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_add
ATOMIC_CMPXCHG( float8, sub, kmp_real64, 64, -, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub
// ------------------------------------------------------------------------
// Entries definition for integer operands
// TYPE_ID - operands type and size (fixed4, float4)
// OP_ID - operation identifier (add, sub, mul, ...)
// TYPE - operand type
// BITS - size in bits, used to distinguish low level calls
// OP - operator (used in critical section)
// LCK_ID - lock identifier, used to possibly distinguish lock variable
// MASK - used for alignment check
// TYPE_ID,OP_ID, TYPE, BITS,OP,LCK_ID,MASK,GOMP_FLAG
// ------------------------------------------------------------------------
// Routines for ATOMIC integer operands, other operators
// ------------------------------------------------------------------------
// TYPE_ID,OP_ID, TYPE, OP, LCK_ID, GOMP_FLAG
ATOMIC_CMPXCHG( fixed1, add, kmp_int8, 8, +, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_add
ATOMIC_CMPXCHG( fixed1, andb, kmp_int8, 8, &, 1i, 0, 0 ) // __kmpc_atomic_fixed1_andb
ATOMIC_CMPXCHG( fixed1, div, kmp_int8, 8, /, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div
ATOMIC_CMPXCHG( fixed1u, div, kmp_uint8, 8, /, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div
ATOMIC_CMPXCHG( fixed1, mul, kmp_int8, 8, *, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul
ATOMIC_CMPXCHG( fixed1, orb, kmp_int8, 8, |, 1i, 0, 0 ) // __kmpc_atomic_fixed1_orb
ATOMIC_CMPXCHG( fixed1, shl, kmp_int8, 8, <<, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl
ATOMIC_CMPXCHG( fixed1, shr, kmp_int8, 8, >>, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr
ATOMIC_CMPXCHG( fixed1u, shr, kmp_uint8, 8, >>, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr
ATOMIC_CMPXCHG( fixed1, sub, kmp_int8, 8, -, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub
ATOMIC_CMPXCHG( fixed1, xor, kmp_int8, 8, ^, 1i, 0, 0 ) // __kmpc_atomic_fixed1_xor
ATOMIC_CMPXCHG( fixed2, add, kmp_int16, 16, +, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_add
ATOMIC_CMPXCHG( fixed2, andb, kmp_int16, 16, &, 2i, 1, 0 ) // __kmpc_atomic_fixed2_andb
ATOMIC_CMPXCHG( fixed2, div, kmp_int16, 16, /, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div
ATOMIC_CMPXCHG( fixed2u, div, kmp_uint16, 16, /, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div
ATOMIC_CMPXCHG( fixed2, mul, kmp_int16, 16, *, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul
ATOMIC_CMPXCHG( fixed2, orb, kmp_int16, 16, |, 2i, 1, 0 ) // __kmpc_atomic_fixed2_orb
ATOMIC_CMPXCHG( fixed2, shl, kmp_int16, 16, <<, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl
ATOMIC_CMPXCHG( fixed2, shr, kmp_int16, 16, >>, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr
ATOMIC_CMPXCHG( fixed2u, shr, kmp_uint16, 16, >>, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr
ATOMIC_CMPXCHG( fixed2, sub, kmp_int16, 16, -, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub
ATOMIC_CMPXCHG( fixed2, xor, kmp_int16, 16, ^, 2i, 1, 0 ) // __kmpc_atomic_fixed2_xor
ATOMIC_CMPXCHG( fixed4, andb, kmp_int32, 32, &, 4i, 3, 0 ) // __kmpc_atomic_fixed4_andb
ATOMIC_CMPXCHG( fixed4, div, kmp_int32, 32, /, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div
ATOMIC_CMPXCHG( fixed4u, div, kmp_uint32, 32, /, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div
ATOMIC_CMPXCHG( fixed4, mul, kmp_int32, 32, *, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_mul
ATOMIC_CMPXCHG( fixed4, orb, kmp_int32, 32, |, 4i, 3, 0 ) // __kmpc_atomic_fixed4_orb
ATOMIC_CMPXCHG( fixed4, shl, kmp_int32, 32, <<, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl
ATOMIC_CMPXCHG( fixed4, shr, kmp_int32, 32, >>, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr
ATOMIC_CMPXCHG( fixed4u, shr, kmp_uint32, 32, >>, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr
ATOMIC_CMPXCHG( fixed4, xor, kmp_int32, 32, ^, 4i, 3, 0 ) // __kmpc_atomic_fixed4_xor
ATOMIC_CMPXCHG( fixed8, andb, kmp_int64, 64, &, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andb
ATOMIC_CMPXCHG( fixed8, div, kmp_int64, 64, /, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div
ATOMIC_CMPXCHG( fixed8u, div, kmp_uint64, 64, /, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div
ATOMIC_CMPXCHG( fixed8, mul, kmp_int64, 64, *, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul
ATOMIC_CMPXCHG( fixed8, orb, kmp_int64, 64, |, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orb
ATOMIC_CMPXCHG( fixed8, shl, kmp_int64, 64, <<, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl
ATOMIC_CMPXCHG( fixed8, shr, kmp_int64, 64, >>, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr
ATOMIC_CMPXCHG( fixed8u, shr, kmp_uint64, 64, >>, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr
ATOMIC_CMPXCHG( fixed8, xor, kmp_int64, 64, ^, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_xor
ATOMIC_CMPXCHG( float4, div, kmp_real32, 32, /, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div
ATOMIC_CMPXCHG( float4, mul, kmp_real32, 32, *, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul
ATOMIC_CMPXCHG( float8, div, kmp_real64, 64, /, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div
ATOMIC_CMPXCHG( float8, mul, kmp_real64, 64, *, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_mul
// TYPE_ID,OP_ID, TYPE, OP, LCK_ID, GOMP_FLAG
/* ------------------------------------------------------------------------ */
/* Routines for C/C++ Reduction operators && and || */
/* ------------------------------------------------------------------------ */
// ------------------------------------------------------------------------
// Need separate macros for &&, || because there is no combined assignment
// TODO: eliminate ATOMIC_CRIT_{L,EQV} macros as not used
#define ATOMIC_CRIT_L(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL( = *lhs OP, GOMP_FLAG ) \
OP_CRITICAL( = *lhs OP, LCK_ID ) \
}
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
// ------------------------------------------------------------------------
// X86 or X86_64: no alignment problems ===================================
#define ATOMIC_CMPX_L(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL( = *lhs OP, GOMP_FLAG ) \
OP_CMPXCHG(TYPE,BITS,OP) \
}
#else
// ------------------------------------------------------------------------
// Code for other architectures that don't handle unaligned accesses.
#define ATOMIC_CMPX_L(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(= *lhs OP,GOMP_FLAG) \
if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \
OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \
} else { \
KMP_CHECK_GTID; \
OP_CRITICAL(= *lhs OP,LCK_ID) /* unaligned - use critical */ \
} \
}
#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
ATOMIC_CMPX_L( fixed1, andl, char, 8, &&, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_andl
ATOMIC_CMPX_L( fixed1, orl, char, 8, ||, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_orl
ATOMIC_CMPX_L( fixed2, andl, short, 16, &&, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_andl
ATOMIC_CMPX_L( fixed2, orl, short, 16, ||, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_orl
ATOMIC_CMPX_L( fixed4, andl, kmp_int32, 32, &&, 4i, 3, 0 ) // __kmpc_atomic_fixed4_andl
ATOMIC_CMPX_L( fixed4, orl, kmp_int32, 32, ||, 4i, 3, 0 ) // __kmpc_atomic_fixed4_orl
ATOMIC_CMPX_L( fixed8, andl, kmp_int64, 64, &&, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andl
ATOMIC_CMPX_L( fixed8, orl, kmp_int64, 64, ||, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orl
/* ------------------------------------------------------------------------- */
/* Routines for Fortran operators that matched no one in C: */
/* MAX, MIN, .EQV., .NEQV. */
/* Operators .AND., .OR. are covered by __kmpc_atomic_*_{andl,orl} */
/* Intrinsics IAND, IOR, IEOR are covered by __kmpc_atomic_*_{andb,orb,xor} */
/* ------------------------------------------------------------------------- */
// -------------------------------------------------------------------------
// MIN and MAX need separate macros
// OP - operator to check if we need any actions?
#define MIN_MAX_CRITSECT(OP,LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
if ( *lhs OP rhs ) { /* still need actions? */ \
*lhs = rhs; \
} \
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid );
// -------------------------------------------------------------------------
#ifdef KMP_GOMP_COMPAT
#define GOMP_MIN_MAX_CRITSECT(OP,FLAG) \
if (( FLAG ) && ( __kmp_atomic_mode == 2 )) { \
KMP_CHECK_GTID; \
MIN_MAX_CRITSECT( OP, 0 ); \
return; \
}
#else
#define GOMP_MIN_MAX_CRITSECT(OP,FLAG)
#endif /* KMP_GOMP_COMPAT */
// -------------------------------------------------------------------------
#define MIN_MAX_CMPXCHG(TYPE,BITS,OP) \
{ \
TYPE KMP_ATOMIC_VOLATILE temp_val; \
TYPE old_value; \
temp_val = *lhs; \
old_value = temp_val; \
while ( old_value OP rhs && /* still need actions? */ \
! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \
*VOLATILE_CAST(kmp_int##BITS *) &old_value, \
*VOLATILE_CAST(kmp_int##BITS *) &rhs ) ) \
{ \
KMP_CPU_PAUSE(); \
temp_val = *lhs; \
old_value = temp_val; \
} \
}
// -------------------------------------------------------------------------
// 1-byte, 2-byte operands - use critical section
#define MIN_MAX_CRITICAL(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
if ( *lhs OP rhs ) { /* need actions? */ \
GOMP_MIN_MAX_CRITSECT(OP,GOMP_FLAG) \
MIN_MAX_CRITSECT(OP,LCK_ID) \
} \
}
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
// -------------------------------------------------------------------------
// X86 or X86_64: no alignment problems ====================================
#define MIN_MAX_COMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
if ( *lhs OP rhs ) { \
GOMP_MIN_MAX_CRITSECT(OP,GOMP_FLAG) \
MIN_MAX_CMPXCHG(TYPE,BITS,OP) \
} \
}
#else
// -------------------------------------------------------------------------
// Code for other architectures that don't handle unaligned accesses.
#define MIN_MAX_COMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
if ( *lhs OP rhs ) { \
GOMP_MIN_MAX_CRITSECT(OP,GOMP_FLAG) \
if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \
MIN_MAX_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \
} else { \
KMP_CHECK_GTID; \
MIN_MAX_CRITSECT(OP,LCK_ID) /* unaligned address */ \
} \
} \
}
#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
MIN_MAX_COMPXCHG( fixed1, max, char, 8, <, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_max
MIN_MAX_COMPXCHG( fixed1, min, char, 8, >, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_min
MIN_MAX_COMPXCHG( fixed2, max, short, 16, <, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_max
MIN_MAX_COMPXCHG( fixed2, min, short, 16, >, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_min
MIN_MAX_COMPXCHG( fixed4, max, kmp_int32, 32, <, 4i, 3, 0 ) // __kmpc_atomic_fixed4_max
MIN_MAX_COMPXCHG( fixed4, min, kmp_int32, 32, >, 4i, 3, 0 ) // __kmpc_atomic_fixed4_min
MIN_MAX_COMPXCHG( fixed8, max, kmp_int64, 64, <, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_max
MIN_MAX_COMPXCHG( fixed8, min, kmp_int64, 64, >, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_min
MIN_MAX_COMPXCHG( float4, max, kmp_real32, 32, <, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_max
MIN_MAX_COMPXCHG( float4, min, kmp_real32, 32, >, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_min
MIN_MAX_COMPXCHG( float8, max, kmp_real64, 64, <, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_max
MIN_MAX_COMPXCHG( float8, min, kmp_real64, 64, >, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_min
#if KMP_HAVE_QUAD
MIN_MAX_CRITICAL( float16, max, QUAD_LEGACY, <, 16r, 1 ) // __kmpc_atomic_float16_max
MIN_MAX_CRITICAL( float16, min, QUAD_LEGACY, >, 16r, 1 ) // __kmpc_atomic_float16_min
#if ( KMP_ARCH_X86 )
MIN_MAX_CRITICAL( float16, max_a16, Quad_a16_t, <, 16r, 1 ) // __kmpc_atomic_float16_max_a16
MIN_MAX_CRITICAL( float16, min_a16, Quad_a16_t, >, 16r, 1 ) // __kmpc_atomic_float16_min_a16
#endif
#endif
// ------------------------------------------------------------------------
// Need separate macros for .EQV. because of the need of complement (~)
// OP ignored for critical sections, ^=~ used instead
#define ATOMIC_CRIT_EQV(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(^=~,GOMP_FLAG) /* send assignment */ \
OP_CRITICAL(^=~,LCK_ID) /* send assignment and complement */ \
}
// ------------------------------------------------------------------------
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
// ------------------------------------------------------------------------
// X86 or X86_64: no alignment problems ===================================
#define ATOMIC_CMPX_EQV(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(^=~,GOMP_FLAG) /* send assignment */ \
OP_CMPXCHG(TYPE,BITS,OP) \
}
// ------------------------------------------------------------------------
#else
// ------------------------------------------------------------------------
// Code for other architectures that don't handle unaligned accesses.
#define ATOMIC_CMPX_EQV(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(^=~,GOMP_FLAG) \
if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \
OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \
} else { \
KMP_CHECK_GTID; \
OP_CRITICAL(^=~,LCK_ID) /* unaligned address - use critical */ \
} \
}
#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
ATOMIC_CMPXCHG( fixed1, neqv, kmp_int8, 8, ^, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_neqv
ATOMIC_CMPXCHG( fixed2, neqv, kmp_int16, 16, ^, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_neqv
ATOMIC_CMPXCHG( fixed4, neqv, kmp_int32, 32, ^, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_neqv
ATOMIC_CMPXCHG( fixed8, neqv, kmp_int64, 64, ^, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_neqv
ATOMIC_CMPX_EQV( fixed1, eqv, kmp_int8, 8, ^~, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_eqv
ATOMIC_CMPX_EQV( fixed2, eqv, kmp_int16, 16, ^~, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_eqv
ATOMIC_CMPX_EQV( fixed4, eqv, kmp_int32, 32, ^~, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_eqv
ATOMIC_CMPX_EQV( fixed8, eqv, kmp_int64, 64, ^~, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_eqv
// ------------------------------------------------------------------------
// Routines for Extended types: long double, _Quad, complex flavours (use critical section)
// TYPE_ID, OP_ID, TYPE - detailed above
// OP - operator
// LCK_ID - lock identifier, used to possibly distinguish lock variable
#define ATOMIC_CRITICAL(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) /* send assignment */ \
OP_CRITICAL(OP##=,LCK_ID) /* send assignment */ \
}
/* ------------------------------------------------------------------------- */
// routines for long double type
ATOMIC_CRITICAL( float10, add, long double, +, 10r, 1 ) // __kmpc_atomic_float10_add
ATOMIC_CRITICAL( float10, sub, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub
ATOMIC_CRITICAL( float10, mul, long double, *, 10r, 1 ) // __kmpc_atomic_float10_mul
ATOMIC_CRITICAL( float10, div, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div
#if KMP_HAVE_QUAD
// routines for _Quad type
ATOMIC_CRITICAL( float16, add, QUAD_LEGACY, +, 16r, 1 ) // __kmpc_atomic_float16_add
ATOMIC_CRITICAL( float16, sub, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub
ATOMIC_CRITICAL( float16, mul, QUAD_LEGACY, *, 16r, 1 ) // __kmpc_atomic_float16_mul
ATOMIC_CRITICAL( float16, div, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div
#if ( KMP_ARCH_X86 )
ATOMIC_CRITICAL( float16, add_a16, Quad_a16_t, +, 16r, 1 ) // __kmpc_atomic_float16_add_a16
ATOMIC_CRITICAL( float16, sub_a16, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16
ATOMIC_CRITICAL( float16, mul_a16, Quad_a16_t, *, 16r, 1 ) // __kmpc_atomic_float16_mul_a16
ATOMIC_CRITICAL( float16, div_a16, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16
#endif
#endif
// routines for complex types
#if USE_CMPXCHG_FIX
// workaround for C78287 (complex(kind=4) data type)
ATOMIC_CMPXCHG_WORKAROUND( cmplx4, add, kmp_cmplx32, 64, +, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_add
ATOMIC_CMPXCHG_WORKAROUND( cmplx4, sub, kmp_cmplx32, 64, -, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_sub
ATOMIC_CMPXCHG_WORKAROUND( cmplx4, mul, kmp_cmplx32, 64, *, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_mul
ATOMIC_CMPXCHG_WORKAROUND( cmplx4, div, kmp_cmplx32, 64, /, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_div
// end of the workaround for C78287
#else
ATOMIC_CRITICAL( cmplx4, add, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_add
ATOMIC_CRITICAL( cmplx4, sub, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub
ATOMIC_CRITICAL( cmplx4, mul, kmp_cmplx32, *, 8c, 1 ) // __kmpc_atomic_cmplx4_mul
ATOMIC_CRITICAL( cmplx4, div, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div
#endif // USE_CMPXCHG_FIX
ATOMIC_CRITICAL( cmplx8, add, kmp_cmplx64, +, 16c, 1 ) // __kmpc_atomic_cmplx8_add
ATOMIC_CRITICAL( cmplx8, sub, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub
ATOMIC_CRITICAL( cmplx8, mul, kmp_cmplx64, *, 16c, 1 ) // __kmpc_atomic_cmplx8_mul
ATOMIC_CRITICAL( cmplx8, div, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div
ATOMIC_CRITICAL( cmplx10, add, kmp_cmplx80, +, 20c, 1 ) // __kmpc_atomic_cmplx10_add
ATOMIC_CRITICAL( cmplx10, sub, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub
ATOMIC_CRITICAL( cmplx10, mul, kmp_cmplx80, *, 20c, 1 ) // __kmpc_atomic_cmplx10_mul
ATOMIC_CRITICAL( cmplx10, div, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div
#if KMP_HAVE_QUAD
ATOMIC_CRITICAL( cmplx16, add, CPLX128_LEG, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add
ATOMIC_CRITICAL( cmplx16, sub, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub
ATOMIC_CRITICAL( cmplx16, mul, CPLX128_LEG, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul
ATOMIC_CRITICAL( cmplx16, div, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div
#if ( KMP_ARCH_X86 )
ATOMIC_CRITICAL( cmplx16, add_a16, kmp_cmplx128_a16_t, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add_a16
ATOMIC_CRITICAL( cmplx16, sub_a16, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16
ATOMIC_CRITICAL( cmplx16, mul_a16, kmp_cmplx128_a16_t, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul_a16
ATOMIC_CRITICAL( cmplx16, div_a16, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16
#endif
#endif
#if OMP_40_ENABLED
// OpenMP 4.0: x = expr binop x for non-commutative operations.
// Supported only on IA-32 architecture and Intel(R) 64
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
// ------------------------------------------------------------------------
// Operation on *lhs, rhs bound by critical section
// OP - operator (it's supposed to contain an assignment)
// LCK_ID - lock identifier
// Note: don't check gtid as it should always be valid
// 1, 2-byte - expect valid parameter, other - check before this macro
#define OP_CRITICAL_REV(OP,LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
(*lhs) = (rhs) OP (*lhs); \
\
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid );
#ifdef KMP_GOMP_COMPAT
#define OP_GOMP_CRITICAL_REV(OP,FLAG) \
if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \
KMP_CHECK_GTID; \
OP_CRITICAL_REV( OP, 0 ); \
return; \
}
#else
#define OP_GOMP_CRITICAL_REV(OP,FLAG)
#endif /* KMP_GOMP_COMPAT */
// Beginning of a definition (provides name, parameters, gebug trace)
// TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed)
// OP_ID - operation identifier (add, sub, mul, ...)
// TYPE - operands' type
#define ATOMIC_BEGIN_REV(TYPE_ID,OP_ID,TYPE, RET_TYPE) \
RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID##_rev( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ) \
{ \
KMP_DEBUG_ASSERT( __kmp_global.init_serial ); \
KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID "_rev: T#%d\n", gtid ));
// ------------------------------------------------------------------------
// Operation on *lhs, rhs using "compare_and_store" routine
// TYPE - operands' type
// BITS - size in bits, used to distinguish low level calls
// OP - operator
// Note: temp_val introduced in order to force the compiler to read
// *lhs only once (w/o it the compiler reads *lhs twice)
#define OP_CMPXCHG_REV(TYPE,BITS,OP) \
{ \
TYPE KMP_ATOMIC_VOLATILE temp_val; \
TYPE old_value, new_value; \
temp_val = *lhs; \
old_value = temp_val; \
new_value = rhs OP old_value; \
while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \
*VOLATILE_CAST(kmp_int##BITS *) &old_value, \
*VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \
{ \
KMP_DO_PAUSE; \
\
temp_val = *lhs; \
old_value = temp_val; \
new_value = rhs OP old_value; \
} \
}
// -------------------------------------------------------------------------
#define ATOMIC_CMPXCHG_REV(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN_REV(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL_REV(OP,GOMP_FLAG) \
OP_CMPXCHG_REV(TYPE,BITS,OP) \
}
// ------------------------------------------------------------------------
// Entries definition for integer operands
// TYPE_ID - operands type and size (fixed4, float4)
// OP_ID - operation identifier (add, sub, mul, ...)
// TYPE - operand type
// BITS - size in bits, used to distinguish low level calls
// OP - operator (used in critical section)
// LCK_ID - lock identifier, used to possibly distinguish lock variable
// TYPE_ID,OP_ID, TYPE, BITS,OP,LCK_ID,GOMP_FLAG
// ------------------------------------------------------------------------
// Routines for ATOMIC integer operands, other operators
// ------------------------------------------------------------------------
// TYPE_ID,OP_ID, TYPE, BITS, OP, LCK_ID, GOMP_FLAG
ATOMIC_CMPXCHG_REV( fixed1, div, kmp_int8, 8, /, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_rev
ATOMIC_CMPXCHG_REV( fixed1u, div, kmp_uint8, 8, /, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_rev
ATOMIC_CMPXCHG_REV( fixed1, shl, kmp_int8, 8, <<, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl_rev
ATOMIC_CMPXCHG_REV( fixed1, shr, kmp_int8, 8, >>, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr_rev
ATOMIC_CMPXCHG_REV( fixed1u, shr, kmp_uint8, 8, >>, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr_rev
ATOMIC_CMPXCHG_REV( fixed1, sub, kmp_int8, 8, -, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_rev
ATOMIC_CMPXCHG_REV( fixed2, div, kmp_int16, 16, /, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_rev
ATOMIC_CMPXCHG_REV( fixed2u, div, kmp_uint16, 16, /, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_rev
ATOMIC_CMPXCHG_REV( fixed2, shl, kmp_int16, 16, <<, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl_rev
ATOMIC_CMPXCHG_REV( fixed2, shr, kmp_int16, 16, >>, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr_rev
ATOMIC_CMPXCHG_REV( fixed2u, shr, kmp_uint16, 16, >>, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr_rev
ATOMIC_CMPXCHG_REV( fixed2, sub, kmp_int16, 16, -, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_rev
ATOMIC_CMPXCHG_REV( fixed4, div, kmp_int32, 32, /, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div_rev
ATOMIC_CMPXCHG_REV( fixed4u, div, kmp_uint32, 32, /, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div_rev
ATOMIC_CMPXCHG_REV( fixed4, shl, kmp_int32, 32, <<, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl_rev
ATOMIC_CMPXCHG_REV( fixed4, shr, kmp_int32, 32, >>, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr_rev
ATOMIC_CMPXCHG_REV( fixed4u, shr, kmp_uint32, 32, >>, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr_rev
ATOMIC_CMPXCHG_REV( fixed4, sub, kmp_int32, 32, -, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_sub_rev
ATOMIC_CMPXCHG_REV( fixed8, div, kmp_int64, 64, /, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_rev
ATOMIC_CMPXCHG_REV( fixed8u, div, kmp_uint64, 64, /, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_rev
ATOMIC_CMPXCHG_REV( fixed8, shl, kmp_int64, 64, <<, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl_rev
ATOMIC_CMPXCHG_REV( fixed8, shr, kmp_int64, 64, >>, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr_rev
ATOMIC_CMPXCHG_REV( fixed8u, shr, kmp_uint64, 64, >>, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr_rev
ATOMIC_CMPXCHG_REV( fixed8, sub, kmp_int64, 64, -, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_rev
ATOMIC_CMPXCHG_REV( float4, div, kmp_real32, 32, /, 4r, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_rev
ATOMIC_CMPXCHG_REV( float4, sub, kmp_real32, 32, -, 4r, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_rev
ATOMIC_CMPXCHG_REV( float8, div, kmp_real64, 64, /, 8r, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_rev
ATOMIC_CMPXCHG_REV( float8, sub, kmp_real64, 64, -, 8r, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_rev
// TYPE_ID,OP_ID, TYPE, BITS,OP,LCK_ID, GOMP_FLAG
// ------------------------------------------------------------------------
// Routines for Extended types: long double, _Quad, complex flavours (use critical section)
// TYPE_ID, OP_ID, TYPE - detailed above
// OP - operator
// LCK_ID - lock identifier, used to possibly distinguish lock variable
#define ATOMIC_CRITICAL_REV(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN_REV(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL_REV(OP,GOMP_FLAG) \
OP_CRITICAL_REV(OP,LCK_ID) \
}
/* ------------------------------------------------------------------------- */
// routines for long double type
ATOMIC_CRITICAL_REV( float10, sub, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub_rev
ATOMIC_CRITICAL_REV( float10, div, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div_rev
#if KMP_HAVE_QUAD
// routines for _Quad type
ATOMIC_CRITICAL_REV( float16, sub, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub_rev
ATOMIC_CRITICAL_REV( float16, div, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div_rev
#if ( KMP_ARCH_X86 )
ATOMIC_CRITICAL_REV( float16, sub_a16, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16_rev
ATOMIC_CRITICAL_REV( float16, div_a16, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16_rev
#endif
#endif
// routines for complex types
ATOMIC_CRITICAL_REV( cmplx4, sub, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub_rev
ATOMIC_CRITICAL_REV( cmplx4, div, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div_rev
ATOMIC_CRITICAL_REV( cmplx8, sub, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub_rev
ATOMIC_CRITICAL_REV( cmplx8, div, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div_rev
ATOMIC_CRITICAL_REV( cmplx10, sub, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub_rev
ATOMIC_CRITICAL_REV( cmplx10, div, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div_rev
#if KMP_HAVE_QUAD
ATOMIC_CRITICAL_REV( cmplx16, sub, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_rev
ATOMIC_CRITICAL_REV( cmplx16, div, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_rev
#if ( KMP_ARCH_X86 )
ATOMIC_CRITICAL_REV( cmplx16, sub_a16, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16_rev
ATOMIC_CRITICAL_REV( cmplx16, div_a16, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16_rev
#endif
#endif
#endif //KMP_ARCH_X86 || KMP_ARCH_X86_64
// End of OpenMP 4.0: x = expr binop x for non-commutative operations.
#endif //OMP_40_ENABLED
/* ------------------------------------------------------------------------ */
/* Routines for mixed types of LHS and RHS, when RHS is "larger" */
/* Note: in order to reduce the total number of types combinations */
/* it is supposed that compiler converts RHS to longest floating type,*/
/* that is _Quad, before call to any of these routines */
/* Conversion to _Quad will be done by the compiler during calculation, */
/* conversion back to TYPE - before the assignment, like: */
/* *lhs = (TYPE)( (_Quad)(*lhs) OP rhs ) */
/* Performance penalty expected because of SW emulation use */
/* ------------------------------------------------------------------------ */
#define ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \
void __kmpc_atomic_##TYPE_ID##_##OP_ID##_##RTYPE_ID( ident_t *id_ref, int gtid, TYPE * lhs, RTYPE rhs ) \
{ \
KMP_DEBUG_ASSERT( __kmp_global.init_serial ); \
KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID "_" #RTYPE_ID ": T#%d\n", gtid ));
// -------------------------------------------------------------------------
#define ATOMIC_CRITICAL_FP(TYPE_ID,TYPE,OP_ID,OP,RTYPE_ID,RTYPE,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) /* send assignment */ \
OP_CRITICAL(OP##=,LCK_ID) /* send assignment */ \
}
// -------------------------------------------------------------------------
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
// -------------------------------------------------------------------------
// X86 or X86_64: no alignment problems ====================================
#define ATOMIC_CMPXCHG_MIX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \
OP_CMPXCHG(TYPE,BITS,OP) \
}
// -------------------------------------------------------------------------
#else
// ------------------------------------------------------------------------
// Code for other architectures that don't handle unaligned accesses.
#define ATOMIC_CMPXCHG_MIX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \
if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \
OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \
} else { \
KMP_CHECK_GTID; \
OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \
} \
}
#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
// RHS=float8
ATOMIC_CMPXCHG_MIX( fixed1, char, mul, 8, *, float8, kmp_real64, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul_float8
ATOMIC_CMPXCHG_MIX( fixed1, char, div, 8, /, float8, kmp_real64, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_float8
ATOMIC_CMPXCHG_MIX( fixed2, short, mul, 16, *, float8, kmp_real64, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul_float8
ATOMIC_CMPXCHG_MIX( fixed2, short, div, 16, /, float8, kmp_real64, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_float8
ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, mul, 32, *, float8, kmp_real64, 4i, 3, 0 ) // __kmpc_atomic_fixed4_mul_float8
ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, div, 32, /, float8, kmp_real64, 4i, 3, 0 ) // __kmpc_atomic_fixed4_div_float8
ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, mul, 64, *, float8, kmp_real64, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul_float8
ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, div, 64, /, float8, kmp_real64, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_float8
ATOMIC_CMPXCHG_MIX( float4, kmp_real32, add, 32, +, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add_float8
ATOMIC_CMPXCHG_MIX( float4, kmp_real32, sub, 32, -, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_float8
ATOMIC_CMPXCHG_MIX( float4, kmp_real32, mul, 32, *, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul_float8
ATOMIC_CMPXCHG_MIX( float4, kmp_real32, div, 32, /, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_float8
// RHS=float16 (deprecated, to be removed when we are sure the compiler does not use them)
#if KMP_HAVE_QUAD
ATOMIC_CMPXCHG_MIX( fixed1, char, add, 8, +, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_add_fp
ATOMIC_CMPXCHG_MIX( fixed1, char, sub, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_fp
ATOMIC_CMPXCHG_MIX( fixed1, char, mul, 8, *, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul_fp
ATOMIC_CMPXCHG_MIX( fixed1, char, div, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_fp
ATOMIC_CMPXCHG_MIX( fixed1u, uchar, div, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_fp
ATOMIC_CMPXCHG_MIX( fixed2, short, add, 16, +, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_add_fp
ATOMIC_CMPXCHG_MIX( fixed2, short, sub, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_fp
ATOMIC_CMPXCHG_MIX( fixed2, short, mul, 16, *, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul_fp
ATOMIC_CMPXCHG_MIX( fixed2, short, div, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_fp
ATOMIC_CMPXCHG_MIX( fixed2u, ushort, div, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_fp
ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, add, 32, +, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_add_fp
ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, sub, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_sub_fp
ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, mul, 32, *, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_mul_fp
ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, div, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_div_fp
ATOMIC_CMPXCHG_MIX( fixed4u, kmp_uint32, div, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_div_fp
ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, add, 64, +, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_add_fp
ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, sub, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_fp
ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, mul, 64, *, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul_fp
ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, div, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_fp
ATOMIC_CMPXCHG_MIX( fixed8u, kmp_uint64, div, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_fp
ATOMIC_CMPXCHG_MIX( float4, kmp_real32, add, 32, +, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add_fp
ATOMIC_CMPXCHG_MIX( float4, kmp_real32, sub, 32, -, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_fp
ATOMIC_CMPXCHG_MIX( float4, kmp_real32, mul, 32, *, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul_fp
ATOMIC_CMPXCHG_MIX( float4, kmp_real32, div, 32, /, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_fp
ATOMIC_CMPXCHG_MIX( float8, kmp_real64, add, 64, +, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_add_fp
ATOMIC_CMPXCHG_MIX( float8, kmp_real64, sub, 64, -, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_fp
ATOMIC_CMPXCHG_MIX( float8, kmp_real64, mul, 64, *, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_mul_fp
ATOMIC_CMPXCHG_MIX( float8, kmp_real64, div, 64, /, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_fp
ATOMIC_CRITICAL_FP( float10, long double, add, +, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_add_fp
ATOMIC_CRITICAL_FP( float10, long double, sub, -, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_sub_fp
ATOMIC_CRITICAL_FP( float10, long double, mul, *, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_mul_fp
ATOMIC_CRITICAL_FP( float10, long double, div, /, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_div_fp
#endif
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
// ------------------------------------------------------------------------
// X86 or X86_64: no alignment problems ====================================
#if USE_CMPXCHG_FIX
// workaround for C78287 (complex(kind=4) data type)
#define ATOMIC_CMPXCHG_CMPLX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \
OP_CMPXCHG_WORKAROUND(TYPE,BITS,OP) \
}
// end of the second part of the workaround for C78287
#else
#define ATOMIC_CMPXCHG_CMPLX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \
OP_CMPXCHG(TYPE,BITS,OP) \
}
#endif // USE_CMPXCHG_FIX
#else
// ------------------------------------------------------------------------
// Code for other architectures that don't handle unaligned accesses.
#define ATOMIC_CMPXCHG_CMPLX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \
if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \
OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \
} else { \
KMP_CHECK_GTID; \
OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \
} \
}
#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, add, 64, +, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_add_cmplx8
ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, sub, 64, -, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_sub_cmplx8
ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, mul, 64, *, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_mul_cmplx8
ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, div, 64, /, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_div_cmplx8
// READ, WRITE, CAPTURE are supported only on IA-32 architecture and Intel(R) 64
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
//////////////////////////////////////////////////////////////////////////////////////////////////////
// ------------------------------------------------------------------------
// Atomic READ routines
// ------------------------------------------------------------------------
// ------------------------------------------------------------------------
// Beginning of a definition (provides name, parameters, gebug trace)
// TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed)
// OP_ID - operation identifier (add, sub, mul, ...)
// TYPE - operands' type
#define ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE, RET_TYPE) \
RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * loc ) \
{ \
KMP_DEBUG_ASSERT( __kmp_global.init_serial ); \
KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid ));
// ------------------------------------------------------------------------
// Operation on *lhs, rhs using "compare_and_store_ret" routine
// TYPE - operands' type
// BITS - size in bits, used to distinguish low level calls
// OP - operator
// Note: temp_val introduced in order to force the compiler to read
// *lhs only once (w/o it the compiler reads *lhs twice)
// TODO: check if it is still necessary
// Return old value regardless of the result of "compare & swap# operation
#define OP_CMPXCHG_READ(TYPE,BITS,OP) \
{ \
TYPE KMP_ATOMIC_VOLATILE temp_val; \
union f_i_union { \
TYPE f_val; \
kmp_int##BITS i_val; \
}; \
union f_i_union old_value; \
temp_val = *loc; \
old_value.f_val = temp_val; \
old_value.i_val = KMP_COMPARE_AND_STORE_RET##BITS( (kmp_int##BITS *) loc, \
*VOLATILE_CAST(kmp_int##BITS *) &old_value.i_val, \
*VOLATILE_CAST(kmp_int##BITS *) &old_value.i_val ); \
new_value = old_value.f_val; \
return new_value; \
}
// -------------------------------------------------------------------------
// Operation on *lhs, rhs bound by critical section
// OP - operator (it's supposed to contain an assignment)
// LCK_ID - lock identifier
// Note: don't check gtid as it should always be valid
// 1, 2-byte - expect valid parameter, other - check before this macro
#define OP_CRITICAL_READ(OP,LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
new_value = (*loc); \
\
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid );
// -------------------------------------------------------------------------
#ifdef KMP_GOMP_COMPAT
#define OP_GOMP_CRITICAL_READ(OP,FLAG) \
if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \
KMP_CHECK_GTID; \
OP_CRITICAL_READ( OP, 0 ); \
return new_value; \
}
#else
#define OP_GOMP_CRITICAL_READ(OP,FLAG)
#endif /* KMP_GOMP_COMPAT */
// -------------------------------------------------------------------------
#define ATOMIC_FIXED_READ(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \
ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE,TYPE) \
TYPE new_value; \
OP_GOMP_CRITICAL_READ(OP##=,GOMP_FLAG) \
new_value = KMP_TEST_THEN_ADD##BITS( loc, OP 0 ); \
return new_value; \
}
// -------------------------------------------------------------------------
#define ATOMIC_CMPXCHG_READ(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \
ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE,TYPE) \
TYPE new_value; \
OP_GOMP_CRITICAL_READ(OP##=,GOMP_FLAG) \
OP_CMPXCHG_READ(TYPE,BITS,OP) \
}
// ------------------------------------------------------------------------
// Routines for Extended types: long double, _Quad, complex flavours (use critical section)
// TYPE_ID, OP_ID, TYPE - detailed above
// OP - operator
// LCK_ID - lock identifier, used to possibly distinguish lock variable
#define ATOMIC_CRITICAL_READ(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE,TYPE) \
TYPE new_value; \
OP_GOMP_CRITICAL_READ(OP##=,GOMP_FLAG) /* send assignment */ \
OP_CRITICAL_READ(OP,LCK_ID) /* send assignment */ \
return new_value; \
}
// ------------------------------------------------------------------------
// Fix for cmplx4 read (CQ220361) on Windows* OS. Regular routine with return value doesn't work.
// Let's return the read value through the additional parameter.
#if ( KMP_OS_WINDOWS )
#define OP_CRITICAL_READ_WRK(OP,LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
(*out) = (*loc); \
\
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid );
// ------------------------------------------------------------------------
#ifdef KMP_GOMP_COMPAT
#define OP_GOMP_CRITICAL_READ_WRK(OP,FLAG) \
if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \
KMP_CHECK_GTID; \
OP_CRITICAL_READ_WRK( OP, 0 ); \
}
#else
#define OP_GOMP_CRITICAL_READ_WRK(OP,FLAG)
#endif /* KMP_GOMP_COMPAT */
// ------------------------------------------------------------------------
#define ATOMIC_BEGIN_READ_WRK(TYPE_ID,OP_ID,TYPE) \
void __kmpc_atomic_##TYPE_ID##_##OP_ID( TYPE * out, ident_t *id_ref, int gtid, TYPE * loc ) \
{ \
KMP_DEBUG_ASSERT( __kmp_global.init_serial ); \
KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid ));
// ------------------------------------------------------------------------
#define ATOMIC_CRITICAL_READ_WRK(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN_READ_WRK(TYPE_ID,OP_ID,TYPE) \
OP_GOMP_CRITICAL_READ_WRK(OP##=,GOMP_FLAG) /* send assignment */ \
OP_CRITICAL_READ_WRK(OP,LCK_ID) /* send assignment */ \
}
#endif // KMP_OS_WINDOWS
// ------------------------------------------------------------------------
// TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG
ATOMIC_FIXED_READ( fixed4, rd, kmp_int32, 32, +, 0 ) // __kmpc_atomic_fixed4_rd
ATOMIC_FIXED_READ( fixed8, rd, kmp_int64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_rd
ATOMIC_CMPXCHG_READ( float4, rd, kmp_real32, 32, +, KMP_ARCH_X86 ) // __kmpc_atomic_float4_rd
ATOMIC_CMPXCHG_READ( float8, rd, kmp_real64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_float8_rd
// !!! TODO: Remove lock operations for "char" since it can't be non-atomic
ATOMIC_CMPXCHG_READ( fixed1, rd, kmp_int8, 8, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_rd
ATOMIC_CMPXCHG_READ( fixed2, rd, kmp_int16, 16, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_rd
ATOMIC_CRITICAL_READ( float10, rd, long double, +, 10r, 1 ) // __kmpc_atomic_float10_rd
#if KMP_HAVE_QUAD
ATOMIC_CRITICAL_READ( float16, rd, QUAD_LEGACY, +, 16r, 1 ) // __kmpc_atomic_float16_rd
#endif // KMP_HAVE_QUAD
// Fix for CQ220361 on Windows* OS
#if ( KMP_OS_WINDOWS )
ATOMIC_CRITICAL_READ_WRK( cmplx4, rd, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_rd
#else
ATOMIC_CRITICAL_READ( cmplx4, rd, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_rd
#endif
ATOMIC_CRITICAL_READ( cmplx8, rd, kmp_cmplx64, +, 16c, 1 ) // __kmpc_atomic_cmplx8_rd
ATOMIC_CRITICAL_READ( cmplx10, rd, kmp_cmplx80, +, 20c, 1 ) // __kmpc_atomic_cmplx10_rd
#if KMP_HAVE_QUAD
ATOMIC_CRITICAL_READ( cmplx16, rd, CPLX128_LEG, +, 32c, 1 ) // __kmpc_atomic_cmplx16_rd
#if ( KMP_ARCH_X86 )
ATOMIC_CRITICAL_READ( float16, a16_rd, Quad_a16_t, +, 16r, 1 ) // __kmpc_atomic_float16_a16_rd
ATOMIC_CRITICAL_READ( cmplx16, a16_rd, kmp_cmplx128_a16_t, +, 32c, 1 ) // __kmpc_atomic_cmplx16_a16_rd
#endif
#endif
// ------------------------------------------------------------------------
// Atomic WRITE routines
// ------------------------------------------------------------------------
#define ATOMIC_XCHG_WR(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(OP,GOMP_FLAG) \
KMP_XCHG_FIXED##BITS( lhs, rhs ); \
}
// ------------------------------------------------------------------------
#define ATOMIC_XCHG_FLOAT_WR(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(OP,GOMP_FLAG) \
KMP_XCHG_REAL##BITS( lhs, rhs ); \
}
// ------------------------------------------------------------------------
// Operation on *lhs, rhs using "compare_and_store" routine
// TYPE - operands' type
// BITS - size in bits, used to distinguish low level calls
// OP - operator
// Note: temp_val introduced in order to force the compiler to read
// *lhs only once (w/o it the compiler reads *lhs twice)
#define OP_CMPXCHG_WR(TYPE,BITS,OP) \
{ \
TYPE KMP_ATOMIC_VOLATILE temp_val; \
TYPE old_value, new_value; \
temp_val = *lhs; \
old_value = temp_val; \
new_value = rhs; \
while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \
*VOLATILE_CAST(kmp_int##BITS *) &old_value, \
*VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \
{ \
KMP_CPU_PAUSE(); \
\
temp_val = *lhs; \
old_value = temp_val; \
new_value = rhs; \
} \
}
// -------------------------------------------------------------------------
#define ATOMIC_CMPXCHG_WR(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(OP,GOMP_FLAG) \
OP_CMPXCHG_WR(TYPE,BITS,OP) \
}
// ------------------------------------------------------------------------
// Routines for Extended types: long double, _Quad, complex flavours (use critical section)
// TYPE_ID, OP_ID, TYPE - detailed above
// OP - operator
// LCK_ID - lock identifier, used to possibly distinguish lock variable
#define ATOMIC_CRITICAL_WR(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(OP,GOMP_FLAG) /* send assignment */ \
OP_CRITICAL(OP,LCK_ID) /* send assignment */ \
}
// -------------------------------------------------------------------------
ATOMIC_XCHG_WR( fixed1, wr, kmp_int8, 8, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_wr
ATOMIC_XCHG_WR( fixed2, wr, kmp_int16, 16, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_wr
ATOMIC_XCHG_WR( fixed4, wr, kmp_int32, 32, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_wr
#if ( KMP_ARCH_X86 )
ATOMIC_CMPXCHG_WR( fixed8, wr, kmp_int64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_wr
#else
ATOMIC_XCHG_WR( fixed8, wr, kmp_int64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_wr
#endif
ATOMIC_XCHG_FLOAT_WR( float4, wr, kmp_real32, 32, =, KMP_ARCH_X86 ) // __kmpc_atomic_float4_wr
#if ( KMP_ARCH_X86 )
ATOMIC_CMPXCHG_WR( float8, wr, kmp_real64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_float8_wr
#else
ATOMIC_XCHG_FLOAT_WR( float8, wr, kmp_real64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_float8_wr
#endif
ATOMIC_CRITICAL_WR( float10, wr, long double, =, 10r, 1 ) // __kmpc_atomic_float10_wr
#if KMP_HAVE_QUAD
ATOMIC_CRITICAL_WR( float16, wr, QUAD_LEGACY, =, 16r, 1 ) // __kmpc_atomic_float16_wr
#endif
ATOMIC_CRITICAL_WR( cmplx4, wr, kmp_cmplx32, =, 8c, 1 ) // __kmpc_atomic_cmplx4_wr
ATOMIC_CRITICAL_WR( cmplx8, wr, kmp_cmplx64, =, 16c, 1 ) // __kmpc_atomic_cmplx8_wr
ATOMIC_CRITICAL_WR( cmplx10, wr, kmp_cmplx80, =, 20c, 1 ) // __kmpc_atomic_cmplx10_wr
#if KMP_HAVE_QUAD
ATOMIC_CRITICAL_WR( cmplx16, wr, CPLX128_LEG, =, 32c, 1 ) // __kmpc_atomic_cmplx16_wr
#if ( KMP_ARCH_X86 )
ATOMIC_CRITICAL_WR( float16, a16_wr, Quad_a16_t, =, 16r, 1 ) // __kmpc_atomic_float16_a16_wr
ATOMIC_CRITICAL_WR( cmplx16, a16_wr, kmp_cmplx128_a16_t, =, 32c, 1 ) // __kmpc_atomic_cmplx16_a16_wr
#endif
#endif
// ------------------------------------------------------------------------
// Atomic CAPTURE routines
// ------------------------------------------------------------------------
// Beginning of a definition (provides name, parameters, gebug trace)
// TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed)
// OP_ID - operation identifier (add, sub, mul, ...)
// TYPE - operands' type
#define ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,RET_TYPE) \
RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, int flag ) \
{ \
KMP_DEBUG_ASSERT( __kmp_global.init_serial ); \
KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid ));
// -------------------------------------------------------------------------
// Operation on *lhs, rhs bound by critical section
// OP - operator (it's supposed to contain an assignment)
// LCK_ID - lock identifier
// Note: don't check gtid as it should always be valid
// 1, 2-byte - expect valid parameter, other - check before this macro
#define OP_CRITICAL_CPT(OP,LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
if( flag ) { \
(*lhs) OP rhs; \
new_value = (*lhs); \
} else { \
new_value = (*lhs); \
(*lhs) OP rhs; \
} \
\
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
return new_value;
// ------------------------------------------------------------------------
#ifdef KMP_GOMP_COMPAT
#define OP_GOMP_CRITICAL_CPT(OP,FLAG) \
if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \
KMP_CHECK_GTID; \
OP_CRITICAL_CPT( OP##=, 0 ); \
}
#else
#define OP_GOMP_CRITICAL_CPT(OP,FLAG)
#endif /* KMP_GOMP_COMPAT */
// ------------------------------------------------------------------------
// Operation on *lhs, rhs using "compare_and_store" routine
// TYPE - operands' type
// BITS - size in bits, used to distinguish low level calls
// OP - operator
// Note: temp_val introduced in order to force the compiler to read
// *lhs only once (w/o it the compiler reads *lhs twice)
#define OP_CMPXCHG_CPT(TYPE,BITS,OP) \
{ \
TYPE KMP_ATOMIC_VOLATILE temp_val; \
TYPE old_value, new_value; \
temp_val = *lhs; \
old_value = temp_val; \
new_value = old_value OP rhs; \
while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \
*VOLATILE_CAST(kmp_int##BITS *) &old_value, \
*VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \
{ \
KMP_CPU_PAUSE(); \
\
temp_val = *lhs; \
old_value = temp_val; \
new_value = old_value OP rhs; \
} \
if( flag ) { \
return new_value; \
} else \
return old_value; \
}
// -------------------------------------------------------------------------
#define ATOMIC_CMPXCHG_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \
ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \
TYPE new_value; \
OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) \
OP_CMPXCHG_CPT(TYPE,BITS,OP) \
}
// -------------------------------------------------------------------------
#define ATOMIC_FIXED_ADD_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \
ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \
TYPE old_value, new_value; \
OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) \
/* OP used as a sign for subtraction: (lhs-rhs) --> (lhs+-rhs) */ \
old_value = KMP_TEST_THEN_ADD##BITS( lhs, OP rhs ); \
if( flag ) { \
return old_value OP rhs; \
} else \
return old_value; \
}
// -------------------------------------------------------------------------
ATOMIC_FIXED_ADD_CPT( fixed4, add_cpt, kmp_int32, 32, +, 0 ) // __kmpc_atomic_fixed4_add_cpt
ATOMIC_FIXED_ADD_CPT( fixed4, sub_cpt, kmp_int32, 32, -, 0 ) // __kmpc_atomic_fixed4_sub_cpt
ATOMIC_FIXED_ADD_CPT( fixed8, add_cpt, kmp_int64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_add_cpt
ATOMIC_FIXED_ADD_CPT( fixed8, sub_cpt, kmp_int64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_cpt
ATOMIC_CMPXCHG_CPT( float4, add_cpt, kmp_real32, 32, +, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add_cpt
ATOMIC_CMPXCHG_CPT( float4, sub_cpt, kmp_real32, 32, -, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_cpt
ATOMIC_CMPXCHG_CPT( float8, add_cpt, kmp_real64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_float8_add_cpt
ATOMIC_CMPXCHG_CPT( float8, sub_cpt, kmp_real64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_cpt
// ------------------------------------------------------------------------
// Entries definition for integer operands
// TYPE_ID - operands type and size (fixed4, float4)
// OP_ID - operation identifier (add, sub, mul, ...)
// TYPE - operand type
// BITS - size in bits, used to distinguish low level calls
// OP - operator (used in critical section)
// TYPE_ID,OP_ID, TYPE, BITS,OP,GOMP_FLAG
// ------------------------------------------------------------------------
// Routines for ATOMIC integer operands, other operators
// ------------------------------------------------------------------------
// TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG
ATOMIC_CMPXCHG_CPT( fixed1, add_cpt, kmp_int8, 8, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_add_cpt
ATOMIC_CMPXCHG_CPT( fixed1, andb_cpt, kmp_int8, 8, &, 0 ) // __kmpc_atomic_fixed1_andb_cpt
ATOMIC_CMPXCHG_CPT( fixed1, div_cpt, kmp_int8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_cpt
ATOMIC_CMPXCHG_CPT( fixed1u, div_cpt, kmp_uint8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_cpt
ATOMIC_CMPXCHG_CPT( fixed1, mul_cpt, kmp_int8, 8, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul_cpt
ATOMIC_CMPXCHG_CPT( fixed1, orb_cpt, kmp_int8, 8, |, 0 ) // __kmpc_atomic_fixed1_orb_cpt
ATOMIC_CMPXCHG_CPT( fixed1, shl_cpt, kmp_int8, 8, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl_cpt
ATOMIC_CMPXCHG_CPT( fixed1, shr_cpt, kmp_int8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr_cpt
ATOMIC_CMPXCHG_CPT( fixed1u, shr_cpt, kmp_uint8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr_cpt
ATOMIC_CMPXCHG_CPT( fixed1, sub_cpt, kmp_int8, 8, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_cpt
ATOMIC_CMPXCHG_CPT( fixed1, xor_cpt, kmp_int8, 8, ^, 0 ) // __kmpc_atomic_fixed1_xor_cpt
ATOMIC_CMPXCHG_CPT( fixed2, add_cpt, kmp_int16, 16, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_add_cpt
ATOMIC_CMPXCHG_CPT( fixed2, andb_cpt, kmp_int16, 16, &, 0 ) // __kmpc_atomic_fixed2_andb_cpt
ATOMIC_CMPXCHG_CPT( fixed2, div_cpt, kmp_int16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_cpt
ATOMIC_CMPXCHG_CPT( fixed2u, div_cpt, kmp_uint16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_cpt
ATOMIC_CMPXCHG_CPT( fixed2, mul_cpt, kmp_int16, 16, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul_cpt
ATOMIC_CMPXCHG_CPT( fixed2, orb_cpt, kmp_int16, 16, |, 0 ) // __kmpc_atomic_fixed2_orb_cpt
ATOMIC_CMPXCHG_CPT( fixed2, shl_cpt, kmp_int16, 16, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl_cpt
ATOMIC_CMPXCHG_CPT( fixed2, shr_cpt, kmp_int16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr_cpt
ATOMIC_CMPXCHG_CPT( fixed2u, shr_cpt, kmp_uint16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr_cpt
ATOMIC_CMPXCHG_CPT( fixed2, sub_cpt, kmp_int16, 16, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_cpt
ATOMIC_CMPXCHG_CPT( fixed2, xor_cpt, kmp_int16, 16, ^, 0 ) // __kmpc_atomic_fixed2_xor_cpt
ATOMIC_CMPXCHG_CPT( fixed4, andb_cpt, kmp_int32, 32, &, 0 ) // __kmpc_atomic_fixed4_andb_cpt
ATOMIC_CMPXCHG_CPT( fixed4, div_cpt, kmp_int32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div_cpt
ATOMIC_CMPXCHG_CPT( fixed4u, div_cpt, kmp_uint32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div_cpt
ATOMIC_CMPXCHG_CPT( fixed4, mul_cpt, kmp_int32, 32, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_mul_cpt
ATOMIC_CMPXCHG_CPT( fixed4, orb_cpt, kmp_int32, 32, |, 0 ) // __kmpc_atomic_fixed4_orb_cpt
ATOMIC_CMPXCHG_CPT( fixed4, shl_cpt, kmp_int32, 32, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl_cpt
ATOMIC_CMPXCHG_CPT( fixed4, shr_cpt, kmp_int32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr_cpt
ATOMIC_CMPXCHG_CPT( fixed4u, shr_cpt, kmp_uint32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr_cpt
ATOMIC_CMPXCHG_CPT( fixed4, xor_cpt, kmp_int32, 32, ^, 0 ) // __kmpc_atomic_fixed4_xor_cpt
ATOMIC_CMPXCHG_CPT( fixed8, andb_cpt, kmp_int64, 64, &, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andb_cpt
ATOMIC_CMPXCHG_CPT( fixed8, div_cpt, kmp_int64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_cpt
ATOMIC_CMPXCHG_CPT( fixed8u, div_cpt, kmp_uint64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_cpt
ATOMIC_CMPXCHG_CPT( fixed8, mul_cpt, kmp_int64, 64, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul_cpt
ATOMIC_CMPXCHG_CPT( fixed8, orb_cpt, kmp_int64, 64, |, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orb_cpt
ATOMIC_CMPXCHG_CPT( fixed8, shl_cpt, kmp_int64, 64, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl_cpt
ATOMIC_CMPXCHG_CPT( fixed8, shr_cpt, kmp_int64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr_cpt
ATOMIC_CMPXCHG_CPT( fixed8u, shr_cpt, kmp_uint64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr_cpt
ATOMIC_CMPXCHG_CPT( fixed8, xor_cpt, kmp_int64, 64, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_xor_cpt
ATOMIC_CMPXCHG_CPT( float4, div_cpt, kmp_real32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_cpt
ATOMIC_CMPXCHG_CPT( float4, mul_cpt, kmp_real32, 32, *, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul_cpt
ATOMIC_CMPXCHG_CPT( float8, div_cpt, kmp_real64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_cpt
ATOMIC_CMPXCHG_CPT( float8, mul_cpt, kmp_real64, 64, *, KMP_ARCH_X86 ) // __kmpc_atomic_float8_mul_cpt
// TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG
// ------------------------------------------------------------------------
// Routines for C/C++ Reduction operators && and ||
// ------------------------------------------------------------------------
// -------------------------------------------------------------------------
// Operation on *lhs, rhs bound by critical section
// OP - operator (it's supposed to contain an assignment)
// LCK_ID - lock identifier
// Note: don't check gtid as it should always be valid
// 1, 2-byte - expect valid parameter, other - check before this macro
#define OP_CRITICAL_L_CPT(OP,LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
if( flag ) { \
new_value OP rhs; \
} else \
new_value = (*lhs); \
\
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid );
// ------------------------------------------------------------------------
#ifdef KMP_GOMP_COMPAT
#define OP_GOMP_CRITICAL_L_CPT(OP,FLAG) \
if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \
KMP_CHECK_GTID; \
OP_CRITICAL_L_CPT( OP, 0 ); \
return new_value; \
}
#else
#define OP_GOMP_CRITICAL_L_CPT(OP,FLAG)
#endif /* KMP_GOMP_COMPAT */
// ------------------------------------------------------------------------
// Need separate macros for &&, || because there is no combined assignment
#define ATOMIC_CMPX_L_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \
ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \
TYPE new_value; \
OP_GOMP_CRITICAL_L_CPT( = *lhs OP, GOMP_FLAG ) \
OP_CMPXCHG_CPT(TYPE,BITS,OP) \
}
ATOMIC_CMPX_L_CPT( fixed1, andl_cpt, char, 8, &&, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_andl_cpt
ATOMIC_CMPX_L_CPT( fixed1, orl_cpt, char, 8, ||, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_orl_cpt
ATOMIC_CMPX_L_CPT( fixed2, andl_cpt, short, 16, &&, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_andl_cpt
ATOMIC_CMPX_L_CPT( fixed2, orl_cpt, short, 16, ||, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_orl_cpt
ATOMIC_CMPX_L_CPT( fixed4, andl_cpt, kmp_int32, 32, &&, 0 ) // __kmpc_atomic_fixed4_andl_cpt
ATOMIC_CMPX_L_CPT( fixed4, orl_cpt, kmp_int32, 32, ||, 0 ) // __kmpc_atomic_fixed4_orl_cpt
ATOMIC_CMPX_L_CPT( fixed8, andl_cpt, kmp_int64, 64, &&, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andl_cpt
ATOMIC_CMPX_L_CPT( fixed8, orl_cpt, kmp_int64, 64, ||, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orl_cpt
// -------------------------------------------------------------------------
// Routines for Fortran operators that matched no one in C:
// MAX, MIN, .EQV., .NEQV.
// Operators .AND., .OR. are covered by __kmpc_atomic_*_{andl,orl}_cpt
// Intrinsics IAND, IOR, IEOR are covered by __kmpc_atomic_*_{andb,orb,xor}_cpt
// -------------------------------------------------------------------------
// -------------------------------------------------------------------------
// MIN and MAX need separate macros
// OP - operator to check if we need any actions?
#define MIN_MAX_CRITSECT_CPT(OP,LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
if ( *lhs OP rhs ) { /* still need actions? */ \
old_value = *lhs; \
*lhs = rhs; \
if ( flag ) \
new_value = rhs; \
else \
new_value = old_value; \
} \
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
return new_value; \
// -------------------------------------------------------------------------
#ifdef KMP_GOMP_COMPAT
#define GOMP_MIN_MAX_CRITSECT_CPT(OP,FLAG) \
if (( FLAG ) && ( __kmp_atomic_mode == 2 )) { \
KMP_CHECK_GTID; \
MIN_MAX_CRITSECT_CPT( OP, 0 ); \
}
#else
#define GOMP_MIN_MAX_CRITSECT_CPT(OP,FLAG)
#endif /* KMP_GOMP_COMPAT */
// -------------------------------------------------------------------------
#define MIN_MAX_CMPXCHG_CPT(TYPE,BITS,OP) \
{ \
TYPE KMP_ATOMIC_VOLATILE temp_val; \
/*TYPE old_value; */ \
temp_val = *lhs; \
old_value = temp_val; \
while ( old_value OP rhs && /* still need actions? */ \
! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \
*VOLATILE_CAST(kmp_int##BITS *) &old_value, \
*VOLATILE_CAST(kmp_int##BITS *) &rhs ) ) \
{ \
KMP_CPU_PAUSE(); \
temp_val = *lhs; \
old_value = temp_val; \
} \
if( flag ) \
return rhs; \
else \
return old_value; \
}
// -------------------------------------------------------------------------
// 1-byte, 2-byte operands - use critical section
#define MIN_MAX_CRITICAL_CPT(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \
TYPE new_value, old_value; \
if ( *lhs OP rhs ) { /* need actions? */ \
GOMP_MIN_MAX_CRITSECT_CPT(OP,GOMP_FLAG) \
MIN_MAX_CRITSECT_CPT(OP,LCK_ID) \
} \
return *lhs; \
}
#define MIN_MAX_COMPXCHG_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \
ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \
TYPE new_value, old_value; \
if ( *lhs OP rhs ) { \
GOMP_MIN_MAX_CRITSECT_CPT(OP,GOMP_FLAG) \
MIN_MAX_CMPXCHG_CPT(TYPE,BITS,OP) \
} \
return *lhs; \
}
MIN_MAX_COMPXCHG_CPT( fixed1, max_cpt, char, 8, <, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_max_cpt
MIN_MAX_COMPXCHG_CPT( fixed1, min_cpt, char, 8, >, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_min_cpt
MIN_MAX_COMPXCHG_CPT( fixed2, max_cpt, short, 16, <, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_max_cpt
MIN_MAX_COMPXCHG_CPT( fixed2, min_cpt, short, 16, >, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_min_cpt
MIN_MAX_COMPXCHG_CPT( fixed4, max_cpt, kmp_int32, 32, <, 0 ) // __kmpc_atomic_fixed4_max_cpt
MIN_MAX_COMPXCHG_CPT( fixed4, min_cpt, kmp_int32, 32, >, 0 ) // __kmpc_atomic_fixed4_min_cpt
MIN_MAX_COMPXCHG_CPT( fixed8, max_cpt, kmp_int64, 64, <, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_max_cpt
MIN_MAX_COMPXCHG_CPT( fixed8, min_cpt, kmp_int64, 64, >, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_min_cpt
MIN_MAX_COMPXCHG_CPT( float4, max_cpt, kmp_real32, 32, <, KMP_ARCH_X86 ) // __kmpc_atomic_float4_max_cpt
MIN_MAX_COMPXCHG_CPT( float4, min_cpt, kmp_real32, 32, >, KMP_ARCH_X86 ) // __kmpc_atomic_float4_min_cpt
MIN_MAX_COMPXCHG_CPT( float8, max_cpt, kmp_real64, 64, <, KMP_ARCH_X86 ) // __kmpc_atomic_float8_max_cpt
MIN_MAX_COMPXCHG_CPT( float8, min_cpt, kmp_real64, 64, >, KMP_ARCH_X86 ) // __kmpc_atomic_float8_min_cpt
#if KMP_HAVE_QUAD
MIN_MAX_CRITICAL_CPT( float16, max_cpt, QUAD_LEGACY, <, 16r, 1 ) // __kmpc_atomic_float16_max_cpt
MIN_MAX_CRITICAL_CPT( float16, min_cpt, QUAD_LEGACY, >, 16r, 1 ) // __kmpc_atomic_float16_min_cpt
#if ( KMP_ARCH_X86 )
MIN_MAX_CRITICAL_CPT( float16, max_a16_cpt, Quad_a16_t, <, 16r, 1 ) // __kmpc_atomic_float16_max_a16_cpt
MIN_MAX_CRITICAL_CPT( float16, min_a16_cpt, Quad_a16_t, >, 16r, 1 ) // __kmpc_atomic_float16_mix_a16_cpt
#endif
#endif
// ------------------------------------------------------------------------
#ifdef KMP_GOMP_COMPAT
#define OP_GOMP_CRITICAL_EQV_CPT(OP,FLAG) \
if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \
KMP_CHECK_GTID; \
OP_CRITICAL_CPT( OP, 0 ); \
}
#else
#define OP_GOMP_CRITICAL_EQV_CPT(OP,FLAG)
#endif /* KMP_GOMP_COMPAT */
// ------------------------------------------------------------------------
#define ATOMIC_CMPX_EQV_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \
ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \
TYPE new_value; \
OP_GOMP_CRITICAL_EQV_CPT(^=~,GOMP_FLAG) /* send assignment */ \
OP_CMPXCHG_CPT(TYPE,BITS,OP) \
}
// ------------------------------------------------------------------------
ATOMIC_CMPXCHG_CPT( fixed1, neqv_cpt, kmp_int8, 8, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_neqv_cpt
ATOMIC_CMPXCHG_CPT( fixed2, neqv_cpt, kmp_int16, 16, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_neqv_cpt
ATOMIC_CMPXCHG_CPT( fixed4, neqv_cpt, kmp_int32, 32, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_neqv_cpt
ATOMIC_CMPXCHG_CPT( fixed8, neqv_cpt, kmp_int64, 64, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_neqv_cpt
ATOMIC_CMPX_EQV_CPT( fixed1, eqv_cpt, kmp_int8, 8, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_eqv_cpt
ATOMIC_CMPX_EQV_CPT( fixed2, eqv_cpt, kmp_int16, 16, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_eqv_cpt
ATOMIC_CMPX_EQV_CPT( fixed4, eqv_cpt, kmp_int32, 32, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_eqv_cpt
ATOMIC_CMPX_EQV_CPT( fixed8, eqv_cpt, kmp_int64, 64, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_eqv_cpt
// ------------------------------------------------------------------------
// Routines for Extended types: long double, _Quad, complex flavours (use critical section)
// TYPE_ID, OP_ID, TYPE - detailed above
// OP - operator
// LCK_ID - lock identifier, used to possibly distinguish lock variable
#define ATOMIC_CRITICAL_CPT(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \
TYPE new_value; \
OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) /* send assignment */ \
OP_CRITICAL_CPT(OP##=,LCK_ID) /* send assignment */ \
}
// ------------------------------------------------------------------------
// Workaround for cmplx4. Regular routines with return value don't work
// on Win_32e. Let's return captured values through the additional parameter.
#define OP_CRITICAL_CPT_WRK(OP,LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
if( flag ) { \
(*lhs) OP rhs; \
(*out) = (*lhs); \
} else { \
(*out) = (*lhs); \
(*lhs) OP rhs; \
} \
\
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
return;
// ------------------------------------------------------------------------
#ifdef KMP_GOMP_COMPAT
#define OP_GOMP_CRITICAL_CPT_WRK(OP,FLAG) \
if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \
KMP_CHECK_GTID; \
OP_CRITICAL_CPT_WRK( OP##=, 0 ); \
}
#else
#define OP_GOMP_CRITICAL_CPT_WRK(OP,FLAG)
#endif /* KMP_GOMP_COMPAT */
// ------------------------------------------------------------------------
#define ATOMIC_BEGIN_WRK(TYPE_ID,OP_ID,TYPE) \
void __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, TYPE * out, int flag ) \
{ \
KMP_DEBUG_ASSERT( __kmp_global.init_serial ); \
KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid ));
// ------------------------------------------------------------------------
#define ATOMIC_CRITICAL_CPT_WRK(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN_WRK(TYPE_ID,OP_ID,TYPE) \
OP_GOMP_CRITICAL_CPT_WRK(OP,GOMP_FLAG) \
OP_CRITICAL_CPT_WRK(OP##=,LCK_ID) \
}
// The end of workaround for cmplx4
/* ------------------------------------------------------------------------- */
// routines for long double type
ATOMIC_CRITICAL_CPT( float10, add_cpt, long double, +, 10r, 1 ) // __kmpc_atomic_float10_add_cpt
ATOMIC_CRITICAL_CPT( float10, sub_cpt, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub_cpt
ATOMIC_CRITICAL_CPT( float10, mul_cpt, long double, *, 10r, 1 ) // __kmpc_atomic_float10_mul_cpt
ATOMIC_CRITICAL_CPT( float10, div_cpt, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div_cpt
#if KMP_HAVE_QUAD
// routines for _Quad type
ATOMIC_CRITICAL_CPT( float16, add_cpt, QUAD_LEGACY, +, 16r, 1 ) // __kmpc_atomic_float16_add_cpt
ATOMIC_CRITICAL_CPT( float16, sub_cpt, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub_cpt
ATOMIC_CRITICAL_CPT( float16, mul_cpt, QUAD_LEGACY, *, 16r, 1 ) // __kmpc_atomic_float16_mul_cpt
ATOMIC_CRITICAL_CPT( float16, div_cpt, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div_cpt
#if ( KMP_ARCH_X86 )
ATOMIC_CRITICAL_CPT( float16, add_a16_cpt, Quad_a16_t, +, 16r, 1 ) // __kmpc_atomic_float16_add_a16_cpt
ATOMIC_CRITICAL_CPT( float16, sub_a16_cpt, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16_cpt
ATOMIC_CRITICAL_CPT( float16, mul_a16_cpt, Quad_a16_t, *, 16r, 1 ) // __kmpc_atomic_float16_mul_a16_cpt
ATOMIC_CRITICAL_CPT( float16, div_a16_cpt, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16_cpt
#endif
#endif
// routines for complex types
// cmplx4 routines to return void
ATOMIC_CRITICAL_CPT_WRK( cmplx4, add_cpt, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_add_cpt
ATOMIC_CRITICAL_CPT_WRK( cmplx4, sub_cpt, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub_cpt
ATOMIC_CRITICAL_CPT_WRK( cmplx4, mul_cpt, kmp_cmplx32, *, 8c, 1 ) // __kmpc_atomic_cmplx4_mul_cpt
ATOMIC_CRITICAL_CPT_WRK( cmplx4, div_cpt, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div_cpt
ATOMIC_CRITICAL_CPT( cmplx8, add_cpt, kmp_cmplx64, +, 16c, 1 ) // __kmpc_atomic_cmplx8_add_cpt
ATOMIC_CRITICAL_CPT( cmplx8, sub_cpt, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub_cpt
ATOMIC_CRITICAL_CPT( cmplx8, mul_cpt, kmp_cmplx64, *, 16c, 1 ) // __kmpc_atomic_cmplx8_mul_cpt
ATOMIC_CRITICAL_CPT( cmplx8, div_cpt, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div_cpt
ATOMIC_CRITICAL_CPT( cmplx10, add_cpt, kmp_cmplx80, +, 20c, 1 ) // __kmpc_atomic_cmplx10_add_cpt
ATOMIC_CRITICAL_CPT( cmplx10, sub_cpt, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub_cpt
ATOMIC_CRITICAL_CPT( cmplx10, mul_cpt, kmp_cmplx80, *, 20c, 1 ) // __kmpc_atomic_cmplx10_mul_cpt
ATOMIC_CRITICAL_CPT( cmplx10, div_cpt, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div_cpt
#if KMP_HAVE_QUAD
ATOMIC_CRITICAL_CPT( cmplx16, add_cpt, CPLX128_LEG, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add_cpt
ATOMIC_CRITICAL_CPT( cmplx16, sub_cpt, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_cpt
ATOMIC_CRITICAL_CPT( cmplx16, mul_cpt, CPLX128_LEG, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul_cpt
ATOMIC_CRITICAL_CPT( cmplx16, div_cpt, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_cpt
#if ( KMP_ARCH_X86 )
ATOMIC_CRITICAL_CPT( cmplx16, add_a16_cpt, kmp_cmplx128_a16_t, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add_a16_cpt
ATOMIC_CRITICAL_CPT( cmplx16, sub_a16_cpt, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16_cpt
ATOMIC_CRITICAL_CPT( cmplx16, mul_a16_cpt, kmp_cmplx128_a16_t, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul_a16_cpt
ATOMIC_CRITICAL_CPT( cmplx16, div_a16_cpt, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16_cpt
#endif
#endif
#if OMP_40_ENABLED
// OpenMP 4.0: v = x = expr binop x; { v = x; x = expr binop x; } { x = expr binop x; v = x; } for non-commutative operations.
// Supported only on IA-32 architecture and Intel(R) 64
// -------------------------------------------------------------------------
// Operation on *lhs, rhs bound by critical section
// OP - operator (it's supposed to contain an assignment)
// LCK_ID - lock identifier
// Note: don't check gtid as it should always be valid
// 1, 2-byte - expect valid parameter, other - check before this macro
#define OP_CRITICAL_CPT_REV(OP,LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
if( flag ) { \
/*temp_val = (*lhs);*/\
(*lhs) = (rhs) OP (*lhs); \
new_value = (*lhs); \
} else { \
new_value = (*lhs);\
(*lhs) = (rhs) OP (*lhs); \
} \
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
return new_value;
// ------------------------------------------------------------------------
#ifdef KMP_GOMP_COMPAT
#define OP_GOMP_CRITICAL_CPT_REV(OP,FLAG) \
if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \
KMP_CHECK_GTID; \
OP_CRITICAL_CPT_REV( OP, 0 ); \
}
#else
#define OP_GOMP_CRITICAL_CPT_REV(OP,FLAG)
#endif /* KMP_GOMP_COMPAT */
// ------------------------------------------------------------------------
// Operation on *lhs, rhs using "compare_and_store" routine
// TYPE - operands' type
// BITS - size in bits, used to distinguish low level calls
// OP - operator
// Note: temp_val introduced in order to force the compiler to read
// *lhs only once (w/o it the compiler reads *lhs twice)
#define OP_CMPXCHG_CPT_REV(TYPE,BITS,OP) \
{ \
TYPE KMP_ATOMIC_VOLATILE temp_val; \
TYPE old_value, new_value; \
temp_val = *lhs; \
old_value = temp_val; \
new_value = rhs OP old_value; \
while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \
*VOLATILE_CAST(kmp_int##BITS *) &old_value, \
*VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \
{ \
KMP_CPU_PAUSE(); \
\
temp_val = *lhs; \
old_value = temp_val; \
new_value = rhs OP old_value; \
} \
if( flag ) { \
return new_value; \
} else \
return old_value; \
}
// -------------------------------------------------------------------------
#define ATOMIC_CMPXCHG_CPT_REV(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \
ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \
TYPE new_value; \
TYPE KMP_ATOMIC_VOLATILE temp_val; \
OP_GOMP_CRITICAL_CPT_REV(OP,GOMP_FLAG) \
OP_CMPXCHG_CPT_REV(TYPE,BITS,OP) \
}
ATOMIC_CMPXCHG_CPT_REV( fixed1, div_cpt_rev, kmp_int8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed1u, div_cpt_rev, kmp_uint8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed1, shl_cpt_rev, kmp_int8, 8, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed1, shr_cpt_rev, kmp_int8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed1u, shr_cpt_rev, kmp_uint8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed1, sub_cpt_rev, kmp_int8, 8, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed2, div_cpt_rev, kmp_int16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed2u, div_cpt_rev, kmp_uint16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed2, shl_cpt_rev, kmp_int16, 16, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed2, shr_cpt_rev, kmp_int16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed2u, shr_cpt_rev, kmp_uint16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed2, sub_cpt_rev, kmp_int16, 16, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed4, div_cpt_rev, kmp_int32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed4u, div_cpt_rev, kmp_uint32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed4, shl_cpt_rev, kmp_int32, 32, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed4, shr_cpt_rev, kmp_int32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed4u, shr_cpt_rev, kmp_uint32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed4, sub_cpt_rev, kmp_int32, 32, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_sub_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed8, div_cpt_rev, kmp_int64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed8u, div_cpt_rev, kmp_uint64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed8, shl_cpt_rev, kmp_int64, 64, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed8, shr_cpt_rev, kmp_int64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed8u, shr_cpt_rev, kmp_uint64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed8, sub_cpt_rev, kmp_int64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( float4, div_cpt_rev, kmp_real32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( float4, sub_cpt_rev, kmp_real32, 32, -, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( float8, div_cpt_rev, kmp_real64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( float8, sub_cpt_rev, kmp_real64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_cpt_rev
// TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG
// ------------------------------------------------------------------------
// Routines for Extended types: long double, _Quad, complex flavours (use critical section)
// TYPE_ID, OP_ID, TYPE - detailed above
// OP - operator
// LCK_ID - lock identifier, used to possibly distinguish lock variable
#define ATOMIC_CRITICAL_CPT_REV(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \
TYPE new_value; \
TYPE KMP_ATOMIC_VOLATILE temp_val; \
/*printf("__kmp_atomic_mode = %d\n", __kmp_atomic_mode);*/\
OP_GOMP_CRITICAL_CPT_REV(OP,GOMP_FLAG) \
OP_CRITICAL_CPT_REV(OP,LCK_ID) \
}
/* ------------------------------------------------------------------------- */
// routines for long double type
ATOMIC_CRITICAL_CPT_REV( float10, sub_cpt_rev, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub_cpt_rev
ATOMIC_CRITICAL_CPT_REV( float10, div_cpt_rev, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div_cpt_rev
#if KMP_HAVE_QUAD
// routines for _Quad type
ATOMIC_CRITICAL_CPT_REV( float16, sub_cpt_rev, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub_cpt_rev
ATOMIC_CRITICAL_CPT_REV( float16, div_cpt_rev, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div_cpt_rev
#if ( KMP_ARCH_X86 )
ATOMIC_CRITICAL_CPT_REV( float16, sub_a16_cpt_rev, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16_cpt_rev
ATOMIC_CRITICAL_CPT_REV( float16, div_a16_cpt_rev, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16_cpt_rev
#endif
#endif
// routines for complex types
// ------------------------------------------------------------------------
// Workaround for cmplx4. Regular routines with return value don't work
// on Win_32e. Let's return captured values through the additional parameter.
#define OP_CRITICAL_CPT_REV_WRK(OP,LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
if( flag ) { \
(*lhs) = (rhs) OP (*lhs); \
(*out) = (*lhs); \
} else { \
(*out) = (*lhs); \
(*lhs) = (rhs) OP (*lhs); \
} \
\
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
return;
// ------------------------------------------------------------------------
#ifdef KMP_GOMP_COMPAT
#define OP_GOMP_CRITICAL_CPT_REV_WRK(OP,FLAG) \
if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \
KMP_CHECK_GTID; \
OP_CRITICAL_CPT_REV_WRK( OP, 0 ); \
}
#else
#define OP_GOMP_CRITICAL_CPT_REV_WRK(OP,FLAG)
#endif /* KMP_GOMP_COMPAT */
// ------------------------------------------------------------------------
#define ATOMIC_CRITICAL_CPT_REV_WRK(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN_WRK(TYPE_ID,OP_ID,TYPE) \
OP_GOMP_CRITICAL_CPT_REV_WRK(OP,GOMP_FLAG) \
OP_CRITICAL_CPT_REV_WRK(OP,LCK_ID) \
}
// The end of workaround for cmplx4
// !!! TODO: check if we need to return void for cmplx4 routines
// cmplx4 routines to return void
ATOMIC_CRITICAL_CPT_REV_WRK( cmplx4, sub_cpt_rev, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub_cpt_rev
ATOMIC_CRITICAL_CPT_REV_WRK( cmplx4, div_cpt_rev, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div_cpt_rev
ATOMIC_CRITICAL_CPT_REV( cmplx8, sub_cpt_rev, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub_cpt_rev
ATOMIC_CRITICAL_CPT_REV( cmplx8, div_cpt_rev, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div_cpt_rev
ATOMIC_CRITICAL_CPT_REV( cmplx10, sub_cpt_rev, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub_cpt_rev
ATOMIC_CRITICAL_CPT_REV( cmplx10, div_cpt_rev, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div_cpt_rev
#if KMP_HAVE_QUAD
ATOMIC_CRITICAL_CPT_REV( cmplx16, sub_cpt_rev, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_cpt_rev
ATOMIC_CRITICAL_CPT_REV( cmplx16, div_cpt_rev, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_cpt_rev
#if ( KMP_ARCH_X86 )
ATOMIC_CRITICAL_CPT_REV( cmplx16, sub_a16_cpt_rev, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16_cpt_rev
ATOMIC_CRITICAL_CPT_REV( cmplx16, div_a16_cpt_rev, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16_cpt_rev
#endif
#endif
// OpenMP 4.0 Capture-write (swap): {v = x; x = expr;}
#define ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \
TYPE __kmpc_atomic_##TYPE_ID##_swp( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ) \
{ \
KMP_DEBUG_ASSERT( __kmp_global.init_serial ); \
KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_swp: T#%d\n", gtid ));
#define CRITICAL_SWP(LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
old_value = (*lhs); \
(*lhs) = rhs; \
\
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
return old_value;
// ------------------------------------------------------------------------
#ifdef KMP_GOMP_COMPAT
#define GOMP_CRITICAL_SWP(FLAG) \
if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \
KMP_CHECK_GTID; \
CRITICAL_SWP( 0 ); \
}
#else
#define GOMP_CRITICAL_SWP(FLAG)
#endif /* KMP_GOMP_COMPAT */
#define ATOMIC_XCHG_SWP(TYPE_ID,TYPE,BITS,GOMP_FLAG) \
ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \
TYPE old_value; \
GOMP_CRITICAL_SWP(GOMP_FLAG) \
old_value = KMP_XCHG_FIXED##BITS( lhs, rhs ); \
return old_value; \
}
// ------------------------------------------------------------------------
#define ATOMIC_XCHG_FLOAT_SWP(TYPE_ID,TYPE,BITS,GOMP_FLAG) \
ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \
TYPE old_value; \
GOMP_CRITICAL_SWP(GOMP_FLAG) \
old_value = KMP_XCHG_REAL##BITS( lhs, rhs ); \
return old_value; \
}
// ------------------------------------------------------------------------
#define CMPXCHG_SWP(TYPE,BITS) \
{ \
TYPE KMP_ATOMIC_VOLATILE temp_val; \
TYPE old_value, new_value; \
temp_val = *lhs; \
old_value = temp_val; \
new_value = rhs; \
while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \
*VOLATILE_CAST(kmp_int##BITS *) &old_value, \
*VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \
{ \
KMP_CPU_PAUSE(); \
\
temp_val = *lhs; \
old_value = temp_val; \
new_value = rhs; \
} \
return old_value; \
}
// -------------------------------------------------------------------------
#define ATOMIC_CMPXCHG_SWP(TYPE_ID,TYPE,BITS,GOMP_FLAG) \
ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \
TYPE old_value; \
GOMP_CRITICAL_SWP(GOMP_FLAG) \
CMPXCHG_SWP(TYPE,BITS) \
}
ATOMIC_XCHG_SWP( fixed1, kmp_int8, 8, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_swp
ATOMIC_XCHG_SWP( fixed2, kmp_int16, 16, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_swp
ATOMIC_XCHG_SWP( fixed4, kmp_int32, 32, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_swp
ATOMIC_XCHG_FLOAT_SWP( float4, kmp_real32, 32, KMP_ARCH_X86 ) // __kmpc_atomic_float4_swp
#if ( KMP_ARCH_X86 )
ATOMIC_CMPXCHG_SWP( fixed8, kmp_int64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_swp
ATOMIC_CMPXCHG_SWP( float8, kmp_real64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_float8_swp
#else
ATOMIC_XCHG_SWP( fixed8, kmp_int64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_swp
ATOMIC_XCHG_FLOAT_SWP( float8, kmp_real64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_float8_swp
#endif
// ------------------------------------------------------------------------
// Routines for Extended types: long double, _Quad, complex flavours (use critical section)
#define ATOMIC_CRITICAL_SWP(TYPE_ID,TYPE,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \
TYPE old_value; \
GOMP_CRITICAL_SWP(GOMP_FLAG) \
CRITICAL_SWP(LCK_ID) \
}
// ------------------------------------------------------------------------
// !!! TODO: check if we need to return void for cmplx4 routines
// Workaround for cmplx4. Regular routines with return value don't work
// on Win_32e. Let's return captured values through the additional parameter.
#define ATOMIC_BEGIN_SWP_WRK(TYPE_ID,TYPE) \
void __kmpc_atomic_##TYPE_ID##_swp( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, TYPE * out ) \
{ \
KMP_DEBUG_ASSERT( __kmp_global.init_serial ); \
KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_swp: T#%d\n", gtid ));
#define CRITICAL_SWP_WRK(LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
tmp = (*lhs); \
(*lhs) = (rhs); \
(*out) = tmp; \
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
return;
// ------------------------------------------------------------------------
#ifdef KMP_GOMP_COMPAT
#define GOMP_CRITICAL_SWP_WRK(FLAG) \
if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \
KMP_CHECK_GTID; \
CRITICAL_SWP_WRK( 0 ); \
}
#else
#define GOMP_CRITICAL_SWP_WRK(FLAG)
#endif /* KMP_GOMP_COMPAT */
// ------------------------------------------------------------------------
#define ATOMIC_CRITICAL_SWP_WRK(TYPE_ID, TYPE,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN_SWP_WRK(TYPE_ID,TYPE) \
TYPE tmp; \
GOMP_CRITICAL_SWP_WRK(GOMP_FLAG) \
CRITICAL_SWP_WRK(LCK_ID) \
}
// The end of workaround for cmplx4
ATOMIC_CRITICAL_SWP( float10, long double, 10r, 1 ) // __kmpc_atomic_float10_swp
#if KMP_HAVE_QUAD
ATOMIC_CRITICAL_SWP( float16, QUAD_LEGACY, 16r, 1 ) // __kmpc_atomic_float16_swp
#endif
// cmplx4 routine to return void
ATOMIC_CRITICAL_SWP_WRK( cmplx4, kmp_cmplx32, 8c, 1 ) // __kmpc_atomic_cmplx4_swp
//ATOMIC_CRITICAL_SWP( cmplx4, kmp_cmplx32, 8c, 1 ) // __kmpc_atomic_cmplx4_swp
ATOMIC_CRITICAL_SWP( cmplx8, kmp_cmplx64, 16c, 1 ) // __kmpc_atomic_cmplx8_swp
ATOMIC_CRITICAL_SWP( cmplx10, kmp_cmplx80, 20c, 1 ) // __kmpc_atomic_cmplx10_swp
#if KMP_HAVE_QUAD
ATOMIC_CRITICAL_SWP( cmplx16, CPLX128_LEG, 32c, 1 ) // __kmpc_atomic_cmplx16_swp
#if ( KMP_ARCH_X86 )
ATOMIC_CRITICAL_SWP( float16_a16, Quad_a16_t, 16r, 1 ) // __kmpc_atomic_float16_a16_swp
ATOMIC_CRITICAL_SWP( cmplx16_a16, kmp_cmplx128_a16_t, 32c, 1 ) // __kmpc_atomic_cmplx16_a16_swp
#endif
#endif
// End of OpenMP 4.0 Capture
#endif //OMP_40_ENABLED
#endif //KMP_ARCH_X86 || KMP_ARCH_X86_64
#undef OP_CRITICAL
/* ------------------------------------------------------------------------ */
/* Generic atomic routines */
/* ------------------------------------------------------------------------ */
void
__kmpc_atomic_1( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) )
{
KMP_DEBUG_ASSERT( __kmp_global.init_serial );
if (
#if KMP_ARCH_X86 && defined(KMP_GOMP_COMPAT)
FALSE /* must use lock */
#else
TRUE
#endif
)
{
kmp_int8 old_value, new_value;
old_value = *(kmp_int8 *) lhs;
(*f)( &new_value, &old_value, rhs );
/* TODO: Should this be acquire or release? */
while ( ! KMP_COMPARE_AND_STORE_ACQ8 ( (kmp_int8 *) lhs,
*(kmp_int8 *) &old_value, *(kmp_int8 *) &new_value ) )
{
KMP_CPU_PAUSE();
old_value = *(kmp_int8 *) lhs;
(*f)( &new_value, &old_value, rhs );
}
return;
}
else {
//
// All 1-byte data is of integer data type.
//
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_acquire_atomic_lock( & __kmp_atomic_lock_1i, gtid );
(*f)( lhs, lhs, rhs );
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_release_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_release_atomic_lock( & __kmp_atomic_lock_1i, gtid );
}
}
void
__kmpc_atomic_2( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) )
{
if (
#if KMP_ARCH_X86 && defined(KMP_GOMP_COMPAT)
FALSE /* must use lock */
#elif KMP_ARCH_X86 || KMP_ARCH_X86_64
TRUE /* no alignment problems */
#else
! ( (kmp_uintptr_t) lhs & 0x1) /* make sure address is 2-byte aligned */
#endif
)
{
kmp_int16 old_value, new_value;
old_value = *(kmp_int16 *) lhs;
(*f)( &new_value, &old_value, rhs );
/* TODO: Should this be acquire or release? */
while ( ! KMP_COMPARE_AND_STORE_ACQ16 ( (kmp_int16 *) lhs,
*(kmp_int16 *) &old_value, *(kmp_int16 *) &new_value ) )
{
KMP_CPU_PAUSE();
old_value = *(kmp_int16 *) lhs;
(*f)( &new_value, &old_value, rhs );
}
return;
}
else {
//
// All 2-byte data is of integer data type.
//
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_acquire_atomic_lock( & __kmp_atomic_lock_2i, gtid );
(*f)( lhs, lhs, rhs );
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_release_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_release_atomic_lock( & __kmp_atomic_lock_2i, gtid );
}
}
void
__kmpc_atomic_4( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) )
{
KMP_DEBUG_ASSERT( __kmp_global.init_serial );
if (
//
// FIXME: On IA-32 architecture, gcc uses cmpxchg only for 4-byte ints.
// Gomp compatibility is broken if this routine is called for floats.
//
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
TRUE /* no alignment problems */
#else
! ( (kmp_uintptr_t) lhs & 0x3) /* make sure address is 4-byte aligned */
#endif
)
{
kmp_int32 old_value, new_value;
old_value = *(kmp_int32 *) lhs;
(*f)( &new_value, &old_value, rhs );
/* TODO: Should this be acquire or release? */
while ( ! KMP_COMPARE_AND_STORE_ACQ32 ( (kmp_int32 *) lhs,
*(kmp_int32 *) &old_value, *(kmp_int32 *) &new_value ) )
{
KMP_CPU_PAUSE();
old_value = *(kmp_int32 *) lhs;
(*f)( &new_value, &old_value, rhs );
}
return;
}
else {
//
// Use __kmp_atomic_lock_4i for all 4-byte data,
// even if it isn't of integer data type.
//
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_acquire_atomic_lock( & __kmp_atomic_lock_4i, gtid );
(*f)( lhs, lhs, rhs );
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_release_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_release_atomic_lock( & __kmp_atomic_lock_4i, gtid );
}
}
void
__kmpc_atomic_8( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) )
{
KMP_DEBUG_ASSERT( __kmp_global.init_serial );
if (
#if KMP_ARCH_X86 && defined(KMP_GOMP_COMPAT)
FALSE /* must use lock */
#elif KMP_ARCH_X86 || KMP_ARCH_X86_64
TRUE /* no alignment problems */
#else
! ( (kmp_uintptr_t) lhs & 0x7) /* make sure address is 8-byte aligned */
#endif
)
{
kmp_int64 old_value, new_value;
old_value = *(kmp_int64 *) lhs;
(*f)( &new_value, &old_value, rhs );
/* TODO: Should this be acquire or release? */
while ( ! KMP_COMPARE_AND_STORE_ACQ64 ( (kmp_int64 *) lhs,
*(kmp_int64 *) &old_value,
*(kmp_int64 *) &new_value ) )
{
KMP_CPU_PAUSE();
old_value = *(kmp_int64 *) lhs;
(*f)( &new_value, &old_value, rhs );
}
return;
} else {
//
// Use __kmp_atomic_lock_8i for all 8-byte data,
// even if it isn't of integer data type.
//
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_acquire_atomic_lock( & __kmp_atomic_lock_8i, gtid );
(*f)( lhs, lhs, rhs );
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_release_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_release_atomic_lock( & __kmp_atomic_lock_8i, gtid );
}
}
void
__kmpc_atomic_10( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) )
{
KMP_DEBUG_ASSERT( __kmp_global.init_serial );
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_acquire_atomic_lock( & __kmp_atomic_lock_10r, gtid );
(*f)( lhs, lhs, rhs );
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_release_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_release_atomic_lock( & __kmp_atomic_lock_10r, gtid );
}
void
__kmpc_atomic_16( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) )
{
KMP_DEBUG_ASSERT( __kmp_global.init_serial );
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_acquire_atomic_lock( & __kmp_atomic_lock_16c, gtid );
(*f)( lhs, lhs, rhs );
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_release_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_release_atomic_lock( & __kmp_atomic_lock_16c, gtid );
}
void
__kmpc_atomic_20( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) )
{
KMP_DEBUG_ASSERT( __kmp_global.init_serial );
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_acquire_atomic_lock( & __kmp_atomic_lock_20c, gtid );
(*f)( lhs, lhs, rhs );
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_release_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_release_atomic_lock( & __kmp_atomic_lock_20c, gtid );
}
void
__kmpc_atomic_32( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) )
{
KMP_DEBUG_ASSERT( __kmp_global.init_serial );
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_acquire_atomic_lock( & __kmp_atomic_lock_32c, gtid );
(*f)( lhs, lhs, rhs );
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_release_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_release_atomic_lock( & __kmp_atomic_lock_32c, gtid );
}
// AC: same two routines as GOMP_atomic_start/end, but will be called by our compiler
// duplicated in order to not use 3-party names in pure Intel code
// TODO: consider adding GTID parameter after consultation with Ernesto/Xinmin.
void
__kmpc_atomic_start(void)
{
int gtid = __kmp_entry_gtid();
KA_TRACE(20, ("__kmpc_atomic_start: T#%d\n", gtid));
__kmp_acquire_atomic_lock(&__kmp_atomic_lock, gtid);
}
void
__kmpc_atomic_end(void)
{
int gtid = __kmp_get_gtid();
KA_TRACE(20, ("__kmpc_atomic_end: T#%d\n", gtid));
__kmp_release_atomic_lock(&__kmp_atomic_lock, gtid);
}
/* ------------------------------------------------------------------------ */
/* ------------------------------------------------------------------------ */
/*!
@}
*/
// end of file
|
dct_lee_cpu.h | /**
* @file dct_lee_cpu.h
* @author Yibo Lin
* @date Oct 2018
*/
#ifndef DREAMPLACE_DCT_LEE_CPU_H
#define DREAMPLACE_DCT_LEE_CPU_H
#include <vector>
#include <cmath>
#include <stdexcept>
#include "utility/src/Msg.h"
DREAMPLACE_BEGIN_NAMESPACE
namespace lee
{
constexpr double PI = 3.14159265358979323846;
/// Return true if a number is power of 2
template <typename T = unsigned>
inline bool isPowerOf2(T val)
{
return val && (val & (val - 1)) == 0;
}
/// Transpose a row-major matrix with M rows and N columns using block transpose method
template <typename TValue, typename TIndex = unsigned>
inline void transpose(const TValue *in, TValue *out, TIndex M, TIndex N, TIndex blockSize = 16)
{
//#pragma omp parallel for collapse(2) schedule(static)
for (TIndex j = 0; j < N; j += blockSize)
{
for (TIndex i = 0; i < M; i += blockSize)
{
// Transpose the block beginning at [i, j]
TIndex xend = std::min(M, i + blockSize);
TIndex yend = std::min(N, j + blockSize);
for (TIndex y = j; y < yend; ++y)
{
for (TIndex x = i; x < xend; ++x)
{
out[x + y * M] = in[y + x * N];
}
}
}
}
}
/// Negate values in odd position of a vector
template <typename TValue, typename TIndex = unsigned>
inline void negateOddEntries(TValue *vec, TIndex N)
{
for (TIndex i = 1; i < N; i += 2)
{
vec[i] = -vec[i];
}
}
/// Precompute cosine values needed for N-point dct
/// @param cos size N - 1 buffer, contains the result after function call
/// @param N the length of target dct, must be power of 2
template <typename TValue, typename TIndex = unsigned>
void precompute_dct_cos(TValue *cos, TIndex N)
{
// The input length must be power of 2
if (! isPowerOf2<TIndex>(N))
{
throw std::domain_error("Input length is not power of 2.");
}
TIndex offset = 0;
TIndex halfLen = N / 2;
while (halfLen)
{
TValue phaseStep = 0.5 * PI / halfLen;
TValue phase = 0.5 * phaseStep;
for (TIndex i = 0; i < halfLen; ++i)
{
cos[offset + i] = 0.5 / std::cos(phase);
phase += phaseStep;
}
offset += halfLen;
halfLen /= 2;
}
}
/// Precompute cosine values needed for N-point idct
/// @param cos size N - 1 buffer, contains the result after function call
/// @param N the length of target idct, must be power of 2
template <typename TValue, typename TIndex = unsigned>
void precompute_idct_cos(TValue *cos, TIndex N)
{
// The input length must be power of 2
if (! isPowerOf2<TIndex>(N))
{
throw std::domain_error("Input length is not power of 2.");
}
TIndex offset = 0;
TIndex halfLen = 1;
while(halfLen < N)
{
TValue phaseStep = 0.5 * PI / halfLen;
TValue phase = 0.5 * phaseStep;
for (TIndex i = 0; i < halfLen; ++i)
{
cos[offset + i] = 0.5 / std::cos(phase);
phase += phaseStep;
}
offset += halfLen;
halfLen *= 2;
}
}
/// The implementation of fast Discrete Cosine Transform (DCT) algorithm and its inverse (IDCT) are Lee's algorithms
/// Algorithm reference: A New Algorithm to Compute the Discrete Cosine Transform, by Byeong Gi Lee, 1984
///
/// Lee's algorithm has a recursive structure in nature.
/// Here is a sample recursive implementation: https://www.nayuki.io/page/fast-discrete-cosine-transform-algorithms
///
/// My implementation here is iterative, which is more efficient than the recursive version.
/// Here is a sample iterative implementation: https://www.codeproject.com/Articles/151043/Iterative-Fast-1D-Forvard-DCT
/// Compute y[k] = sum_n=0..N-1 (x[n] * cos((n + 0.5) * k * PI / N)), for k = 0..N-1
///
/// @param vec length N sequence to be transformed
/// @param temp length 2 * N helping buffer
/// @param cos length N - 1, stores cosine values precomputed by function 'precompute_dct_cos'
/// @param N length of vec, must be power of 2
template <typename TValue, typename TIndex = unsigned>
inline void dct(TValue *vec, TValue *out, TValue *buf, const TValue *cos, TIndex N)
{
// The input length must be power of 2
if (! isPowerOf2<TIndex>(N))
{
throw std::domain_error("Input length is not power of 2.");
}
// Pointers point to the beginning indices of two adjacent iterations
TValue *curr = out;
TValue *next = buf;
// 'temp' is used to store data of two adjacent iterations
// Copy 'vec' to the first N element in 'temp'
std::copy(vec, vec + N, curr);
// Current bufferfly length and half length
TIndex len = N;
TIndex halfLen = len / 2;
// Iteratively bi-partition sequences into sub-sequences
TIndex cosOffset = 0;
while (halfLen)
{
TIndex offset = 0;
TIndex steps = N / len;
for (TIndex k = 0; k < steps; ++k)
{
for (TIndex i = 0; i < halfLen; ++i)
{
next[offset + i] = curr[offset + i] + curr[offset + len - i - 1];
next[offset + halfLen + i] = (curr[offset + i] - curr[offset + len -i - 1]) * cos[cosOffset + i];
}
offset += len;
}
std::swap(curr, next);
cosOffset += halfLen;
len = halfLen;
halfLen /= 2;
}
// Bottom-up form the final DCT solution
// Note that the case len = 2 will do nothing, so we start from len = 4
len = 4;
halfLen = 2;
while(halfLen < N)
{
TIndex offset = 0;
TIndex steps = N / len;
for(TIndex k = 0; k < steps; ++k)
{
for(TIndex i = 0; i < halfLen - 1; ++i)
{
next[offset + i * 2] = curr[offset + i];
next[offset + i * 2 + 1] = curr[offset + halfLen + i] + curr[offset + halfLen + i + 1];
}
next[offset + len - 2] = curr[offset + halfLen - 1];
next[offset + len - 1] = curr[offset + len - 1];
offset += len;
}
std::swap(curr, next);
halfLen = len;
len *= 2;
}
// Populate the final results into 'out'
if (curr != out)
{
std::copy(curr, curr+N, out);
}
}
/// Compute y[k] = 0.5 * x[0] + sum_n=1..N-1 (x[n] * cos(n * (k + 0.5) * PI / N)), for k = 0..N-1
/// @param vec length N sequence to be transformed
/// @param temp length 2 * N helping buffer
/// @param cos length N - 1, stores cosine values precomputed by function 'precompute_idct_cos'
/// @param N length of vec, must be power of 2
template <typename TValue, typename TIndex = unsigned>
inline void idct(TValue *vec, TValue *out, TValue* buf, const TValue *cos, TIndex N)
{
// The input length must be power of 2
if (! isPowerOf2<TIndex>(N))
{
throw std::domain_error("Input length is not power of 2.");
}
// Pointers point to the beginning indices of two adjacent iterations
TValue *curr = out;
TValue *next = buf;
// This array is used to store date of two adjacent iterations
// Copy 'vec' to the first N element in 'temp'
std::copy(vec, vec + N, curr);
curr[0] /= 2;
// Current bufferfly length and half length
TIndex len = N;
TIndex halfLen = len / 2;
// Iteratively bi-partition sequences into sub-sequences
while (halfLen)
{
TIndex offset = 0;
TIndex steps = N / len;
for (TIndex k = 0; k < steps; ++k)
{
next[offset] = curr[offset];
next[offset + halfLen] = curr[offset + 1];
for (TIndex i = 1; i < halfLen; ++i)
{
next[offset + i] = curr[offset + i * 2];
next[offset + halfLen + i] = curr[offset + i * 2 - 1] + curr[offset + i * 2 + 1];
}
offset += len;
}
std::swap(curr, next);
len = halfLen;
halfLen /= 2;
}
// Bottom-up form the final IDCT solution
len = 2;
halfLen = 1;
TIndex cosOffset = 0;
while(halfLen < N)
{
TIndex offset = 0;
TIndex steps = N / len;
for(TIndex k = 0; k < steps; ++k)
{
for(TIndex i = 0; i < halfLen; ++i)
{
TValue g = curr[offset + i];
TValue h = curr[offset + halfLen + i] * cos[cosOffset + i];
next[offset + i] = g + h;
next[offset + len - 1 - i] = g - h;
}
offset += len;
}
std::swap(curr, next);
cosOffset += halfLen;
halfLen = len;
len *= 2;
}
// Populate the final results into 'out'
if (curr != out)
{
std::copy(curr, curr+N, out);
}
}
/// Compute batch dct
/// @param mtx size M * N row-major matrix to be transformed
/// @param temp length 3 * M * N helping buffer, first 2 * M * N is for dct, the last M * N is for matrix transpose
/// @param cosM length M - 1, stores cosine values precomputed by function 'precompute_dct_cos' for M-point dct
/// @param cosN length N - 1, stores cosine values precomputed by function 'precompute_dct_cos' for N-point dct
/// @param M number of rows
/// @param N number of columns
template <typename TValue, typename TIndex = unsigned>
inline void dct(TValue *mtx, TValue *out, TValue* buf, const TValue *cos, TIndex M, TIndex N)
{
//#pragma omp parallel for schedule(static)
for (TIndex i = 0; i < M; ++i)
{
dct<TValue, TIndex>(mtx + i * N, out + i * N, buf + i*N, cos, N);
}
}
/// Compute batch idct
/// @param mtx size M * N row-major matrix to be transformed
/// @param temp length 3 * M * N helping buffer, first 2 * M * N is for dct, the last M * N is for matrix transpose
/// @param cosM length M - 1, stores cosine values precomputed by function 'precompute_dct_cos' for M-point dct
/// @param cosN length N - 1, stores cosine values precomputed by function 'precompute_dct_cos' for N-point dct
/// @param M number of rows
/// @param N number of columns
template <typename TValue, typename TIndex = unsigned>
inline void idct(TValue *mtx, TValue *out, TValue* buf, const TValue *cos, TIndex M, TIndex N)
{
//#pragma omp parallel for schedule(static)
for (TIndex i = 0; i < M; ++i)
{
idct<TValue, TIndex>(mtx + i * N, out + i * N, buf + i*N, cos, N);
}
}
} // End of namespace lee
DREAMPLACE_END_NAMESPACE
#endif
|
common.h | #ifndef LIGHTGBM_UTILS_COMMON_FUN_H_
#define LIGHTGBM_UTILS_COMMON_FUN_H_
#include <LightGBM/utils/log.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <cstdio>
#include <string>
#include <vector>
#include <sstream>
#include <cstdint>
#include <algorithm>
#include <cmath>
#include <functional>
#include <memory>
#include <iterator>
#include <type_traits>
#include <iomanip>
namespace LightGBM {
namespace Common {
inline char tolower(char in) {
if (in <= 'Z' && in >= 'A')
return in - ('Z' - 'z');
return in;
}
inline static std::string& Trim(std::string& str) {
if (str.empty()) {
return str;
}
str.erase(str.find_last_not_of(" \f\n\r\t\v") + 1);
str.erase(0, str.find_first_not_of(" \f\n\r\t\v"));
return str;
}
inline static std::string& RemoveQuotationSymbol(std::string& str) {
if (str.empty()) {
return str;
}
str.erase(str.find_last_not_of("'\"") + 1);
str.erase(0, str.find_first_not_of("'\""));
return str;
}
inline static bool StartsWith(const std::string& str, const std::string prefix) {
if (str.substr(0, prefix.size()) == prefix) {
return true;
} else {
return false;
}
}
inline static std::vector<std::string> Split(const char* c_str, char delimiter) {
std::vector<std::string> ret;
std::string str(c_str);
size_t i = 0;
size_t pos = 0;
while (pos < str.length()) {
if (str[pos] == delimiter) {
if (i < pos) {
ret.push_back(str.substr(i, pos - i));
}
++pos;
i = pos;
} else {
++pos;
}
}
if (i < pos) {
ret.push_back(str.substr(i));
}
return ret;
}
inline static std::vector<std::string> SplitLines(const char* c_str) {
std::vector<std::string> ret;
std::string str(c_str);
size_t i = 0;
size_t pos = 0;
while (pos < str.length()) {
if (str[pos] == '\n' || str[pos] == '\r') {
if (i < pos) {
ret.push_back(str.substr(i, pos - i));
}
// skip the line endings
while (str[pos] == '\n' || str[pos] == '\r') ++pos;
// new begin
i = pos;
} else {
++pos;
}
}
if (i < pos) {
ret.push_back(str.substr(i));
}
return ret;
}
inline static std::vector<std::string> Split(const char* c_str, const char* delimiters) {
std::vector<std::string> ret;
std::string str(c_str);
size_t i = 0;
size_t pos = 0;
while (pos < str.length()) {
bool met_delimiters = false;
for (int j = 0; delimiters[j] != '\0'; ++j) {
if (str[pos] == delimiters[j]) {
met_delimiters = true;
break;
}
}
if (met_delimiters) {
if (i < pos) {
ret.push_back(str.substr(i, pos - i));
}
++pos;
i = pos;
} else {
++pos;
}
}
if (i < pos) {
ret.push_back(str.substr(i));
}
return ret;
}
inline static std::string FindFromLines(const std::vector<std::string>& lines, const char* key_word) {
for (auto& line : lines) {
size_t find_pos = line.find(key_word);
if (find_pos != std::string::npos) {
return line;
}
}
return "";
}
inline static const char* Atoi(const char* p, int* out) {
int sign, value;
while (*p == ' ') {
++p;
}
sign = 1;
if (*p == '-') {
sign = -1;
++p;
} else if (*p == '+') {
++p;
}
for (value = 0; *p >= '0' && *p <= '9'; ++p) {
value = value * 10 + (*p - '0');
}
*out = sign * value;
while (*p == ' ') {
++p;
}
return p;
}
inline static const char* Atof(const char* p, double* out) {
int frac;
double sign, value, scale;
*out = 0;
// Skip leading white space, if any.
while (*p == ' ') {
++p;
}
// Get sign, if any.
sign = 1.0;
if (*p == '-') {
sign = -1.0;
++p;
} else if (*p == '+') {
++p;
}
// is a number
if ((*p >= '0' && *p <= '9') || *p == '.' || *p == 'e' || *p == 'E') {
// Get digits before decimal point or exponent, if any.
for (value = 0.0; *p >= '0' && *p <= '9'; ++p) {
value = value * 10.0 + (*p - '0');
}
// Get digits after decimal point, if any.
if (*p == '.') {
double pow10 = 10.0;
++p;
while (*p >= '0' && *p <= '9') {
value += (*p - '0') / pow10;
pow10 *= 10.0;
++p;
}
}
// Handle exponent, if any.
frac = 0;
scale = 1.0;
if ((*p == 'e') || (*p == 'E')) {
uint32_t expon;
// Get sign of exponent, if any.
++p;
if (*p == '-') {
frac = 1;
++p;
} else if (*p == '+') {
++p;
}
// Get digits of exponent, if any.
for (expon = 0; *p >= '0' && *p <= '9'; ++p) {
expon = expon * 10 + (*p - '0');
}
if (expon > 308) expon = 308;
// Calculate scaling factor.
while (expon >= 50) { scale *= 1E50; expon -= 50; }
while (expon >= 8) { scale *= 1E8; expon -= 8; }
while (expon > 0) { scale *= 10.0; expon -= 1; }
}
// Return signed and scaled floating point result.
*out = sign * (frac ? (value / scale) : (value * scale));
} else {
size_t cnt = 0;
while (*(p + cnt) != '\0' && *(p + cnt) != ' '
&& *(p + cnt) != '\t' && *(p + cnt) != ','
&& *(p + cnt) != '\n' && *(p + cnt) != '\r'
&& *(p + cnt) != ':') {
++cnt;
}
if (cnt > 0) {
std::string tmp_str(p, cnt);
std::transform(tmp_str.begin(), tmp_str.end(), tmp_str.begin(), Common::tolower);
if (tmp_str == std::string("na") || tmp_str == std::string("nan")) {
*out = NAN;
} else if (tmp_str == std::string("inf") || tmp_str == std::string("infinity")) {
*out = sign * 1e308;
} else {
Log::Fatal("Unknown token %s in data file", tmp_str.c_str());
}
p += cnt;
}
}
while (*p == ' ') {
++p;
}
return p;
}
inline bool AtoiAndCheck(const char* p, int* out) {
const char* after = Atoi(p, out);
if (*after != '\0') {
return false;
}
return true;
}
inline bool AtofAndCheck(const char* p, double* out) {
const char* after = Atof(p, out);
if (*after != '\0') {
return false;
}
return true;
}
inline static const char* SkipSpaceAndTab(const char* p) {
while (*p == ' ' || *p == '\t') {
++p;
}
return p;
}
inline static const char* SkipReturn(const char* p) {
while (*p == '\n' || *p == '\r' || *p == ' ') {
++p;
}
return p;
}
template<typename T, typename T2>
inline static std::vector<T2> ArrayCast(const std::vector<T>& arr) {
std::vector<T2> ret;
for (size_t i = 0; i < arr.size(); ++i) {
ret.push_back(static_cast<T2>(arr[i]));
}
return ret;
}
template<typename T>
inline static std::string ArrayToString(const std::vector<T>& arr, char delimiter) {
if (arr.empty()) {
return std::string("");
}
std::stringstream str_buf;
str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2);
str_buf << arr[0];
for (size_t i = 1; i < arr.size(); ++i) {
str_buf << delimiter;
str_buf << arr[i];
}
return str_buf.str();
}
template<typename T>
inline static std::string ArrayToString(const std::vector<T>& arr, size_t n, char delimiter) {
if (arr.empty() || n == 0) {
return std::string("");
}
std::stringstream str_buf;
str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2);
str_buf << arr[0];
for (size_t i = 1; i < std::min(n, arr.size()); ++i) {
str_buf << delimiter;
str_buf << arr[i];
}
return str_buf.str();
}
template<typename T, bool is_float>
struct __StringToTHelper {
T operator()(const std::string& str) const {
return static_cast<T>(std::stoll(str));
}
};
template<typename T>
struct __StringToTHelper<T, true> {
T operator()(const std::string& str) const {
return static_cast<T>(std::stod(str));
}
};
template<typename T>
inline static std::vector<T> StringToArray(const std::string& str, char delimiter, size_t n) {
if (n == 0) {
return std::vector<T>();
}
std::vector<std::string> strs = Split(str.c_str(), delimiter);
if (strs.size() != n) {
Log::Fatal("StringToArray error, size doesn't match.");
}
std::vector<T> ret(n);
__StringToTHelper<T, std::is_floating_point<T>::value> helper;
for (size_t i = 0; i < n; ++i) {
ret[i] = helper(strs[i]);
}
return ret;
}
template<typename T>
inline static std::vector<T> StringToArray(const std::string& str, char delimiter) {
std::vector<std::string> strs = Split(str.c_str(), delimiter);
std::vector<T> ret;
ret.reserve(strs.size());
__StringToTHelper<T, std::is_floating_point<T>::value> helper;
for (const auto& s : strs) {
ret.push_back(helper(s));
}
return ret;
}
template<typename T>
inline static std::string Join(const std::vector<T>& strs, const char* delimiter) {
if (strs.empty()) {
return std::string("");
}
std::stringstream str_buf;
str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2);
str_buf << strs[0];
for (size_t i = 1; i < strs.size(); ++i) {
str_buf << delimiter;
str_buf << strs[i];
}
return str_buf.str();
}
template<typename T>
inline static std::string Join(const std::vector<T>& strs, size_t start, size_t end, const char* delimiter) {
if (end - start <= 0) {
return std::string("");
}
start = std::min(start, static_cast<size_t>(strs.size()) - 1);
end = std::min(end, static_cast<size_t>(strs.size()));
std::stringstream str_buf;
str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2);
str_buf << strs[start];
for (size_t i = start + 1; i < end; ++i) {
str_buf << delimiter;
str_buf << strs[i];
}
return str_buf.str();
}
static inline int64_t Pow2RoundUp(int64_t x) {
int64_t t = 1;
for (int i = 0; i < 64; ++i) {
if (t >= x) {
return t;
}
t <<= 1;
}
return 0;
}
/*!
* \brief Do inplace softmax transformaton on p_rec
* \param p_rec The input/output vector of the values.
*/
inline void Softmax(std::vector<double>* p_rec) {
std::vector<double> &rec = *p_rec;
double wmax = rec[0];
for (size_t i = 1; i < rec.size(); ++i) {
wmax = std::max(rec[i], wmax);
}
double wsum = 0.0f;
for (size_t i = 0; i < rec.size(); ++i) {
rec[i] = std::exp(rec[i] - wmax);
wsum += rec[i];
}
for (size_t i = 0; i < rec.size(); ++i) {
rec[i] /= static_cast<double>(wsum);
}
}
inline void Softmax(const double* input, double* output, int len) {
double wmax = input[0];
for (int i = 1; i < len; ++i) {
wmax = std::max(input[i], wmax);
}
double wsum = 0.0f;
for (int i = 0; i < len; ++i) {
output[i] = std::exp(input[i] - wmax);
wsum += output[i];
}
for (int i = 0; i < len; ++i) {
output[i] /= static_cast<double>(wsum);
}
}
template<typename T>
std::vector<const T*> ConstPtrInVectorWrapper(const std::vector<std::unique_ptr<T>>& input) {
std::vector<const T*> ret;
for (size_t i = 0; i < input.size(); ++i) {
ret.push_back(input.at(i).get());
}
return ret;
}
template<typename T1, typename T2>
inline void SortForPair(std::vector<T1>& keys, std::vector<T2>& values, size_t start, bool is_reverse = false) {
std::vector<std::pair<T1, T2>> arr;
for (size_t i = start; i < keys.size(); ++i) {
arr.emplace_back(keys[i], values[i]);
}
if (!is_reverse) {
std::sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) {
return a.first < b.first;
});
} else {
std::sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) {
return a.first > b.first;
});
}
for (size_t i = start; i < arr.size(); ++i) {
keys[i] = arr[i].first;
values[i] = arr[i].second;
}
}
/*
* approximate hessians of absolute loss with Gaussian function
* cf. https://en.wikipedia.org/wiki/Gaussian_function
*
* y is a prediction.
* t means true target.
* g means gradient.
* eta is a parameter to control the width of Gaussian function.
* w means weights.
*/
inline static double ApproximateHessianWithGaussian(const double y, const double t, const double g,
const double eta, const double w=1.0f) {
const double diff = y - t;
const double pi = 4.0 * std::atan(1.0);
const double x = std::fabs(diff);
const double a = 2.0 * std::fabs(g) * w; // difference of two first derivatives, (zero to inf) and (zero to -inf).
const double b = 0.0;
const double c = std::max((std::fabs(y) + std::fabs(t)) * eta, 1.0e-10);
return w * std::exp(-(x - b) * (x - b) / (2.0 * c * c)) * a / (c * std::sqrt(2 * pi));
}
template <typename T>
inline static std::vector<T*> Vector2Ptr(std::vector<std::vector<T>>& data) {
std::vector<T*> ptr(data.size());
for (size_t i = 0; i < data.size(); ++i) {
ptr[i] = data[i].data();
}
return ptr;
}
template <typename T>
inline static std::vector<int> VectorSize(const std::vector<std::vector<T>>& data) {
std::vector<int> ret(data.size());
for (size_t i = 0; i < data.size(); ++i) {
ret[i] = static_cast<int>(data[i].size());
}
return ret;
}
inline static double AvoidInf(double x) {
if (x >= 1e300) {
return 1e300;
} else if(x <= -1e300) {
return -1e300;
} else {
return x;
}
}
template<class _Iter> inline
static typename std::iterator_traits<_Iter>::value_type* IteratorValType(_Iter) {
return (0);
}
template<class _RanIt, class _Pr, class _VTRanIt> inline
static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred, _VTRanIt*) {
size_t len = _Last - _First;
const size_t kMinInnerLen = 1024;
int num_threads = 1;
#pragma omp parallel
#pragma omp master
{
num_threads = omp_get_num_threads();
}
if (len <= kMinInnerLen || num_threads <= 1) {
std::sort(_First, _Last, _Pred);
return;
}
size_t inner_size = (len + num_threads - 1) / num_threads;
inner_size = std::max(inner_size, kMinInnerLen);
num_threads = static_cast<int>((len + inner_size - 1) / inner_size);
#pragma omp parallel for schedule(static,1)
for (int i = 0; i < num_threads; ++i) {
size_t left = inner_size*i;
size_t right = left + inner_size;
right = std::min(right, len);
if (right > left) {
std::sort(_First + left, _First + right, _Pred);
}
}
// Buffer for merge.
std::vector<_VTRanIt> temp_buf(len);
_RanIt buf = temp_buf.begin();
size_t s = inner_size;
// Recursive merge
while (s < len) {
int loop_size = static_cast<int>((len + s * 2 - 1) / (s * 2));
#pragma omp parallel for schedule(static,1)
for (int i = 0; i < loop_size; ++i) {
size_t left = i * 2 * s;
size_t mid = left + s;
size_t right = mid + s;
right = std::min(len, right);
if (mid >= right) { continue; }
std::copy(_First + left, _First + mid, buf + left);
std::merge(buf + left, buf + mid, _First + mid, _First + right, _First + left, _Pred);
}
s *= 2;
}
}
template<class _RanIt, class _Pr> inline
static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred) {
return ParallelSort(_First, _Last, _Pred, IteratorValType(_First));
}
// Check that all y[] are in interval [ymin, ymax] (end points included); throws error if not
inline void check_elements_interval_closed(const float *y, float ymin, float ymax, int ny, const char *callername) {
for (int i = 0; i < ny; ++i) {
if (y[i] < ymin || y[i] > ymax) {
Log::Fatal("[%s]: does not tolerate element [#%i = %f] outside [%f, %f]", callername, i, y[i], ymin, ymax);
}
}
}
// One-pass scan over array w with nw elements: find min, max and sum of elements;
// this is useful for checking weight requirements.
inline void obtain_min_max_sum(const float *w, int nw, float *mi, float *ma, double *su) {
float minw = w[0];
float maxw = w[0];
double sumw = static_cast<double>(w[0]);
for (int i = 1; i < nw; ++i) {
sumw += w[i];
if (w[i] < minw) minw = w[i];
if (w[i] > maxw) maxw = w[i];
}
if (mi != nullptr) *mi = minw;
if (ma != nullptr) *ma = maxw;
if (su != nullptr) *su = sumw;
}
template<class T>
inline std::vector<uint32_t> ConstructBitset(const T* vals, int n) {
std::vector<uint32_t> ret;
for (int i = 0; i < n; ++i) {
int i1 = vals[i] / 32;
int i2 = vals[i] % 32;
if (static_cast<int>(ret.size()) < i1 + 1) {
ret.resize(i1 + 1, 0);
}
ret[i1] |= (1 << i2);
}
return ret;
}
template<class T>
inline bool FindInBitset(const uint32_t* bits, int n, T pos) {
int i1 = pos / 32;
if (i1 >= n) {
return false;
}
int i2 = pos % 32;
return (bits[i1] >> i2) & 1;
}
} // namespace Common
} // namespace LightGBM
#endif // LightGBM_UTILS_COMMON_FUN_H_
|
spherepix.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <complex.h>
#include <math.h>
#include "fastsphere.h"
#include "config.h"
#include "init.h"
#include "util.h"
#include "scatmat.h"
#include "farfield.h"
#include "spreflect.h"
#ifdef DOUBLEPREC
typedef double real;
typedef complex double cplx;
#else
typedef float real;
typedef complex float cplx;
#endif
void usage (char *name) {
fprintf (stderr, "USAGE: %s [-h] [-e] [-m mx [my mz [Mx My Mz]]] [-n Nx [Ny Nz]] [input [output]]\n", name);
fprintf (stderr, "\t-h: Print this message and exit\n");
fprintf (stderr, "\t-e: Specify the existence of an enclosing sphere in the input file\n");
fprintf (stderr, "\t-m: Specify the lower and upper box corner in wavelengths\n\t\t(default: tight)\n");
fprintf (stderr, "\t-n: Specify the number of pixels in each dimension (default: 100)\n");
fprintf (stderr, "\tInput file name may be '-' or omitted for stdin\n");
fprintf (stderr, "\tOutput file name may be '-' or omitted for stdout\n");
exit (EXIT_FAILURE);
}
/* Determine if a point is inside a sphere. */
int insphere (double *pt, double *cen, double r) {
double dist, dx[3];
dx[0] = pt[0] - cen[0];
dx[1] = pt[1] - cen[1];
dx[2] = pt[2] - cen[2];
/* Find the distance between the point and the center. */
dist = sqrt (dx[0] * dx[0] + dx[1] * dx[1] + dx[2] * dx[2]);
/* Return whether or not the point is in the sphere. */
return (dist <= r);
}
/* Compute the Laplacian of the inverse of the square root of density. The
* density is already rooted. Watch for edges of the domain. */
real lapden (real *r, real *lr, real *nr, double *c, int pos, int *nelt) {
real dlap, nval, pval;
int x, y;
x = pos % nelt[0];
y = pos / nelt[0];
/* Contribution of the x offsets with bounds checking. */
if (x >= nelt[0] - 1) nval = 1.0;
else nval = 1.0 / r[pos + 1];
if (x <= 0) pval = 1.0;
else pval = 1.0 / r[pos - 1];
dlap = (pval + nval - 2.0 / r[pos]) / (c[0] * c[0]);
/* Contribution of the y offsets with bounds checking. */
if (y >= nelt[1] - 1) nval = 1.0;
else nval = 1.0 / r[pos + nelt[0]];
if (y <= 0) pval = 1.0;
else pval = 1.0 / r[pos - nelt[0]];
dlap += (pval + nval - 2.0 / r[pos]) / (c[1] * c[1]);
/* Contribution of the z offsets with bounds checking. */
if (!nr) nval = 1.0;
else nval = 1.0 / nr[pos];
if (!lr) pval = 1.0;
else pval = 1.0 / lr[pos];
dlap += (pval + nval - 2.0 / r[pos]) / (c[2] * c[2]);
return dlap;
}
int augct (cplx *k, real *r, real *lr, real *nr, int *nelt, double *cell) {
int i, npx = nelt[0] * nelt[1];
real dval;
#pragma omp parallel for default(shared) private(i,dval)
for (i = 0; i < npx; ++i) {
/* Compute the Laplacian of the inverse square root of the density. */
dval = lapden (r, lr, nr, cell, i, nelt);
/* Scale by density square root and normalize by wave number. */
dval *= r[i] / (4.0 * M_PI * M_PI);
/* Subtract the density term from the contrast. */
k[i] -= dval;
}
return npx;
}
/* Build the contrast and density maps for a slab. */
int bldct (cplx *ct, real *density, int *nelt, double *blim,
double *cell, sptype *bgs, spscat *slist, int nsphere, int zidx) {
double zero[3] = {0, 0, 0};
int ntot = nelt[0] * nelt[1];
#pragma omp parallel default(shared)
{
double cen[3];
int i, j, idx[2];
cplx ctval;
real dval;
cen[2] = blim[2] + ((double)zidx + 0.5) * cell[2];
/* Build the density-free contrast and the density map. */
#pragma omp for
for (i = 0; i < ntot; ++i) {
/* Find the cell index. */
idx[0] = i % nelt[0];
idx[1] = i / nelt[0];
/* Find the cell center. */
cen[0] = blim[0] + ((double)idx[0] + 0.5) * cell[0];
cen[1] = blim[1] + ((double)idx[1] + 0.5) * cell[1];
/* Set the background contrast. */
ctval = 2 * M_PI;
dval = 1.0;
/* Check if the point is in an enclosing sphere, if it exists. */
if (bgs && insphere (cen, zero, bgs->r)) {
ctval = (cplx)(bgs->k);
dval = (real)(bgs->rho);
}
/* If the point is in an inner sphere, set the wave number. */
for (j = 0; j < nsphere; ++j)
if (insphere (cen, slist[j].cen, slist[j].spdesc->r)) {
ctval = (cplx)(slist[j].spdesc->k);
dval = (real)(slist[j].spdesc->rho);
}
/* Convert the wave number to the contrast. */
ctval /= (2 * M_PI);
ctval = ctval * ctval - 1;
/* Set the contrast value and density in the grid. */
ct[i] = ctval;
density[i] = sqrt(dval);
}
}
return ntot;
}
int main (int argc, char **argv) {
int nspheres, nsptype, n, i, npx, ndig;
int autobox = 1, nelt[3] = {100, 100, 100};
double boxlim[6], cell[3];
cplx *k, *nk, *kslab;
real *density, *lr, *r, *nr;
FILE *fptr = NULL;
char ch, *progname;
sptype *sparms, *bgptr = NULL, bgspt;
spscat *slist;
bgtype bg;
exctparm exct;
itconf itc;
/* Store the name used to invoke the program. */
progname = argv[0];
while ((ch = getopt (argc, argv, "hem:n:")) != -1) {
switch (ch) {
case 'e':
bgptr = &bgspt;
break;
case 'm':
/* Specify the box limits. */
autobox = sscanf (optarg, "%lf %lf %lf %lf %lf %lf",
boxlim, boxlim + 1, boxlim + 2,
boxlim + 3, boxlim + 4, boxlim + 5);
switch (autobox) {
case 1:
/* Set symmetric bounds from one dimension. */
boxlim[0] = boxlim[1] = boxlim[2] = -ABS(boxlim[0]);
boxlim[3] = boxlim[4] = boxlim[5] = ABS(boxlim[0]);
break;
case 3:
/* Set symmetric bounds from one corner. */
boxlim[0] = -(boxlim[3] = ABS(boxlim[0]));
boxlim[1] = -(boxlim[4] = ABS(boxlim[1]));
boxlim[2] = -(boxlim[5] = ABS(boxlim[2]));
break;
case 6:
/* Nothing to te done for fully specified box. */
break;
default:
usage (progname);
}
/* Don't automatically specify limits. */
autobox = 0;
break;
case 'n':
i = sscanf (optarg, "%d %d %d", nelt, nelt + 1, nelt + 2);
if (i == 1) nelt[1] = nelt[2] = nelt[0];
else if (i != 3) usage (progname);
break;
case 'h': default:
usage (progname);
}
}
/* Point argv to the input and output specifications. */
argc -= optind;
argv += optind;
if (argc < 1 || !strcmp("-", argv[0])) fptr = stdin;
else fptr = critopen (argv[0], "r");
readcfg (fptr, &nspheres, &nsptype, &sparms, bgptr, &slist, &bg, &exct, &itc, &ndig);
fprintf (stderr, "Parsed configuration for %d spheres at %g MHz\n", nspheres, exct.f / 1e6);
fclose (fptr);
/* Automatically set box dimensions if necessary. */
if (autobox && bgptr) {
boxlim[0] = boxlim[1] = boxlim[2] = -bgspt.r;
boxlim[3] = boxlim[4] = boxlim[5] = -bgspt.r;
} else if (autobox) {
/* Set the initial bounds to enclose the first sphere. */
boxlim[0] = slist->cen[0] - slist->spdesc->r;
boxlim[1] = slist->cen[1] - slist->spdesc->r;
boxlim[2] = slist->cen[2] - slist->spdesc->r;
boxlim[3] = slist->cen[0] + slist->spdesc->r;
boxlim[4] = slist->cen[1] + slist->spdesc->r;
boxlim[5] = slist->cen[2] + slist->spdesc->r;
for (i = 1; i < nspheres; ++i) {
boxlim[0] = MIN(boxlim[0], slist[i].cen[0] - slist[i].spdesc->r);
boxlim[1] = MIN(boxlim[1], slist[i].cen[1] - slist[i].spdesc->r);
boxlim[2] = MIN(boxlim[2], slist[i].cen[2] - slist[i].spdesc->r);
boxlim[3] = MAX(boxlim[3], slist[i].cen[0] + slist[i].spdesc->r);
boxlim[4] = MAX(boxlim[4], slist[i].cen[1] + slist[i].spdesc->r);
boxlim[5] = MAX(boxlim[5], slist[i].cen[2] + slist[i].spdesc->r);
}
}
/* Compute the cell dimensions. */
cell[0] = (boxlim[3] - boxlim[0]) / nelt[0];
cell[1] = (boxlim[4] - boxlim[1]) / nelt[1];
cell[2] = (boxlim[5] - boxlim[2]) / nelt[2];
npx = nelt[0] * nelt[1];
/* Allocate the contrast and density map for a slab. */
kslab = malloc (2 * npx * sizeof(cplx));
density = malloc (3 * npx * sizeof(real));
/* Point to the slab data stores. */
k = kslab;
nk = k + npx;
lr = NULL;
r = density;
nr = r + npx;
if (argc < 2 || !strcmp("-", argv[1])) fptr = stdout;
else fptr = critopen (argv[1], "w");
fprintf (stderr, "Writing contrast file.\n");
/* Write the header. */
fwrite (nelt, sizeof(int), 3, fptr);
/* Construct the first slab of data. */
bldct (k, r, nelt, boxlim, cell, bgptr, slist, nspheres, 0);
for (i = 1; i < nelt[2]; ++i) {
/* Construct the next slab of data. */
bldct (nk, nr, nelt, boxlim, cell, bgptr, slist, nspheres, i);
/* Build and write the previous slab. */
augct (k, r, lr, nr, nelt, cell);
fwrite (k, sizeof(cplx), npx, fptr);
/* Update the media pointers. */
k = kslab + (i % 2) * npx;
nk = kslab + ((i + 1) % 2) * npx;
lr = density + ((i - 1) % 3) * npx;
r = density + (i % 3) * npx;
nr = density + ((i + 1) % 3) * npx;
}
/* Build and write the last slab. */
augct (k, r, lr, NULL, nelt, cell);
fwrite (k, sizeof(cplx), npx, fptr);
fclose (fptr);
clrspheres (sparms, nsptype);
if (exct.pwmag) free (exct.pwmag);
if (exct.theta) free (exct.theta);
if (exct.psmag) free (exct.psmag);
if (exct.psloc) free (exct.psloc);
free (kslab);
free (density);
return EXIT_SUCCESS;
}
|
GB_unop__asinh_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__asinh_fp32_fp32
// op(A') function: GB_unop_tran__asinh_fp32_fp32
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = asinhf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = asinhf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = asinhf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ASINH || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__asinh_fp32_fp32
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = asinhf (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__asinh_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
bml_add_dense_typed.c | /* Needs to be included before #include <complex.h>. */
#ifdef BML_USE_MAGMA
#include "magma_v2.h"
#endif
#include "../../macros.h"
#include "../../typed.h"
#include "../blas.h"
#include "../bml_add.h"
#include "../bml_allocate.h"
#include "../bml_logger.h"
#include "../bml_parallel.h"
#include "../bml_scale.h"
#include "../bml_types.h"
#include "bml_add_dense.h"
#include "bml_allocate_dense.h"
#include "bml_copy_dense.h"
#include "bml_scale_dense.h"
#include "bml_types_dense.h"
#include <complex.h>
#include <stdlib.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/** Matrix addition.
*
* \f$ A = \alpha A + \beta B \f$
*
* \ingroup add_group
*
* \param A Matrix A
* \param B Matrix B
* \param alpha Scalar factor multiplied by A
* \param beta Scalar factor multiplied by B
*/
void TYPED_FUNC(
bml_add_dense) (
bml_matrix_dense_t * A,
bml_matrix_dense_t * B,
double alpha,
double beta)
{
int myRank = bml_getMyRank();
int nElems = B->domain->localRowExtent[myRank] * B->N;
int startIndex = B->domain->localDispl[myRank];
int inc = 1;
#ifdef BML_USE_MAGMA
nElems = B->N * B->ld;
MAGMA_T alpha_ = MAGMACOMPLEX(MAKE) (alpha, 0.);
MAGMA_T beta_ = MAGMACOMPLEX(MAKE) (beta, 0.);
MAGMA(scal) (nElems, alpha_, A->matrix, inc, bml_queue());
MAGMA(axpy) (nElems, beta_, B->matrix, inc,
A->matrix + startIndex, inc, bml_queue());
#else
REAL_T alpha_ = alpha;
REAL_T beta_ = beta;
#ifdef NOBLAS
LOG_ERROR("No BLAS library");
#else
C_BLAS(SCAL) (&nElems, &alpha_, A->matrix + startIndex, &inc);
C_BLAS(AXPY) (&nElems, &beta_, B->matrix + startIndex, &inc,
A->matrix + startIndex, &inc);
#endif
#endif
}
/** Matrix addition and calculate TrNorm.
*
* \f$ A = \alpha A + \beta B \f$
*
* \ingroup add_group
*
* \param A Matrix A
* \param B Matrix B
* \param alpha Scalar factor multiplied by A
* \param beta Scalar factor multiplied by B
*/
double TYPED_FUNC(
bml_add_norm_dense) (
bml_matrix_dense_t * A,
bml_matrix_dense_t * B,
double alpha,
double beta)
{
double trnorm = 0.0;
REAL_T *B_matrix = (REAL_T *) B->matrix;
int myRank = bml_getMyRank();
int N = A->N;
int *A_localRowMin = A->domain->localRowMin;
int *A_localRowMax = A->domain->localRowMax;
#pragma omp parallel for \
shared(B_matrix, A_localRowMin, A_localRowMax) \
shared(N, myRank) \
reduction(+:trnorm)
//for (int i = 0; i < N * N; i++)
for (int i = A_localRowMin[myRank] * N; i < A_localRowMax[myRank] * N;
i++)
{
trnorm += B_matrix[i] * B_matrix[i];
}
TYPED_FUNC(bml_add_dense) (A, B, alpha, beta);
return trnorm;
}
/** Matrix addition.
*
* \f$ A = A + \beta \mathrm{Id} \f$
*
* \ingroup add_group
*
* \param A Matrix A
* \param beta Scalar factor multiplied by I
*/
void TYPED_FUNC(
bml_add_identity_dense) (
bml_matrix_dense_t * A,
double beta)
{
int N = A->N;
#if BML_USE_MAGMA
MAGMA_T *A_matrix = (MAGMA_T *) A->matrix;
MAGMA_T beta_ = MAGMACOMPLEX(MAKE) (beta, 0.);
bml_matrix_dense_t *B =
TYPED_FUNC(bml_identity_matrix_dense) (N, sequential);
MAGMABLAS(geadd) (N, N, beta_, (MAGMA_T *) B->matrix, B->ld,
A_matrix, A->ld, bml_queue());
bml_deallocate_dense(B);
#else
REAL_T *A_matrix = (REAL_T *) A->matrix;
REAL_T beta_ = beta;
int *A_localRowMin = A->domain->localRowMin;
int *A_localRowMax = A->domain->localRowMax;
int myRank = bml_getMyRank();
#pragma omp parallel for \
shared(A_matrix, A_localRowMin, A_localRowMax) \
shared(N, myRank, beta_)
//for (int i = 0; i < N; i++)
for (int i = A_localRowMin[myRank]; i < A_localRowMax[myRank]; i++)
{
A_matrix[ROWMAJOR(i, i, N, N)] += beta_;
}
#endif
}
/** Matrix addition.
*
* \f$ A = alpha A + \beta \mathrm{Id} \f$
*
* \ingroup add_group
*
* \param A Matrix A
* \param alpha Scalar factor multiplied by A
* \param beta Scalar factor multiplied by I
*/
void TYPED_FUNC(
bml_scale_add_identity_dense) (
bml_matrix_dense_t * A,
double alpha,
double beta)
{
REAL_T _alpha = (REAL_T) alpha;
bml_scale_inplace_dense(&_alpha, A);
bml_add_identity_dense(A, beta);
}
|
GB_binop__bor_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bor_int8)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__bor_int8)
// A.*B function (eWiseMult): GB (_AemultB_03__bor_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bor_int8)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((node))
// C+=B function (dense accum): GB (_Cdense_accumB__bor_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__bor_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bor_int8)
// C=scalar+B GB (_bind1st__bor_int8)
// C=scalar+B' GB (_bind1st_tran__bor_int8)
// C=A+scalar GB (_bind2nd__bor_int8)
// C=A'+scalar GB (_bind2nd_tran__bor_int8)
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (aij) | (bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x) | (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BOR || GxB_NO_INT8 || GxB_NO_BOR_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bor_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bor_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bor_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((node))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bor_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bor_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bor_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bor_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bor_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bor_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = Bx [p] ;
Cx [p] = (x) | (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bor_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = Ax [p] ;
Cx [p] = (aij) | (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (x) | (aij) ; \
}
GrB_Info GB (_bind1st_tran__bor_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (aij) | (y) ; \
}
GrB_Info GB (_bind2nd_tran__bor_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_wtime.c | #include <omp.h>
#include <stdio.h>
int main()
{
double time0, time1;
#pragma omp target map(from: time0, time1)
{
// calls to wtime should not be folded together
time0 = omp_get_wtime();
#if defined(__AMDGCN__)
// nvptx has a nanosleep, but only for >= sm_70
__builtin_amdgcn_s_sleep(1000);
#endif
time1 = omp_get_wtime();
}
double delta = time1 - time0;
if(delta)
printf("Success!\n");
else
printf("Failure\n");
return delta == 0.; // success if they differ
}
|
DRACC_OMP_002_Buffer_Overflow_Tasking_yes.c | /*
Causing a buffer overflow on the accelerator with recursive tasks in line 17.
*/
#include <stdio.h>
#define C 1024
#pragma omp declare target
int counter = 0;
int rekurs(){
for(int i = 0; i<C; i++){
#pragma omp atomic
counter++;
#pragma omp task
rekurs();
}
return counter;
}
#pragma omp end declare target
int main(){
#pragma omp target map(tofrom:counter) device(0)
rekurs();
printf("counter %i\n",counter);
return 0;
} |
omp_init_lock.c | // RUN: %libomp-compile-and-run
// REQUIRES: dummy
#include "omp_testsuite.h"
#include <stdio.h>
// This should be slightly less than KMP_I_LOCK_CHUNK, which is 1024
#define LOCKS_PER_ITER 1000
#define ITERATIONS (REPETITIONS + 1)
// This tests concurrently using locks on one thread while initializing new
// ones on another thread. This exercises the global lock pool.
int test_omp_init_lock() {
int i;
omp_lock_t lcks[ITERATIONS * LOCKS_PER_ITER];
#pragma omp parallel for schedule(static) num_threads(NUM_TASKS)
for (i = 0; i < ITERATIONS; i++) {
int j;
omp_lock_t *my_lcks = &lcks[i * LOCKS_PER_ITER];
for (j = 0; j < LOCKS_PER_ITER; j++) {
omp_init_lock(&my_lcks[j]);
}
for (j = 0; j < LOCKS_PER_ITER * 100; j++) {
omp_set_lock(&my_lcks[j % LOCKS_PER_ITER]);
omp_unset_lock(&my_lcks[j % LOCKS_PER_ITER]);
}
}
// Wait until all repetitions are done. The test is exercising growth of
// the global lock pool, which does not shrink when no locks are allocated.
{
int j;
for (j = 0; j < ITERATIONS * LOCKS_PER_ITER; j++) {
omp_destroy_lock(&lcks[j]);
}
}
return 0;
}
int main() {
// No use repeating this test, since it's exercising a private global pool
// which is not reset between test iterations.
return test_omp_init_lock();
}
|
DRB031-truedepfirstdimension-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
There is a loop-carried true dependence within the outer level loop.
Data race pair: b[i][j]@66:7 vs. b[i-1][j-1]@66:15
*/
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
int main(int argc,char *argv[])
{
int i;
int j;
int n = 1000;
int m = 1000;
double b[1000][1000];
#pragma omp parallel for private (i,j)
for (i = 0; i <= n - 1; i += 1) {
#pragma omp parallel for private (j)
for (j = 0; j <= m - 1; j += 1) {
b[i][j] = 0.5;
}
}
for (i = 1; i <= n - 1; i += 1) {
#pragma omp parallel for private (j)
for (j = 1; j <= m - 1; j += 1) {
b[i][j] = b[i - 1][j - 1];
}
}
printf("b[500][500]=%f\n",b[500][500]);
return 0;
}
|
GB_unaryop__minv_fp64_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_fp64_int32
// op(A') function: GB_tran__minv_fp64_int32
// C type: double
// A type: int32_t
// cast: double cij = (double) aij
// unaryop: cij = 1./aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = 1./x ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_FP64 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_fp64_int32
(
double *restrict Cx,
const int32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_fp64_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mxnet_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file mxnet_op.h
* \brief
* \author Junyuan Xie
*/
#ifndef MXNET_OPERATOR_MXNET_OP_H_
#define MXNET_OPERATOR_MXNET_OP_H_
#include <dmlc/omp.h>
#include <mxnet/base.h>
#include <mxnet/engine.h>
#include <mxnet/op_attr_types.h>
#include <algorithm>
#include "./operator_tune.h"
#include "../engine/openmp.h"
#ifdef __CUDACC__
#include "../common/cuda_utils.h"
#endif // __CUDACC__
namespace mxnet {
namespace op {
namespace mxnet_op {
using namespace mshadow;
#ifdef __CUDA_ARCH__
__constant__ const float PI = 3.14159265358979323846;
#else
const float PI = 3.14159265358979323846;
using std::isnan;
#endif
template<typename xpu>
int get_num_threads(const int N);
#ifdef __CUDACC__
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
inline cudaDeviceProp cuda_get_device_prop() {
int device;
CUDA_CALL(cudaGetDevice(&device));
cudaDeviceProp deviceProp;
CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device));
return deviceProp;
}
/*!
* \brief Get the number of blocks for cuda kernel given N
*/
inline int cuda_get_num_blocks(const int N) {
using namespace mshadow::cuda;
return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
}
template<>
inline int get_num_threads<gpu>(const int N) {
using namespace mshadow::cuda;
return kBaseThreadNum * cuda_get_num_blocks(N);
}
#endif // __CUDACC__
template<>
inline int get_num_threads<cpu>(const int N) {
return engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
}
/*! \brief operator request type switch */
#define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \
switch (req) { \
case kNullOp: \
break; \
case kWriteInplace: \
case kWriteTo: \
{ \
const OpReqType ReqType = kWriteTo; \
{__VA_ARGS__} \
} \
break; \
case kAddTo: \
{ \
const OpReqType ReqType = kAddTo; \
{__VA_ARGS__} \
} \
break; \
default: \
break; \
}
/*! \brief operator request type switch */
#define MXNET_REQ_TYPE_SWITCH(req, ReqType, ...) \
switch (req) { \
case kNullOp: \
{ \
const OpReqType ReqType = kNullOp; \
{__VA_ARGS__} \
} \
break; \
case kWriteInplace: \
case kWriteTo: \
{ \
const OpReqType ReqType = kWriteTo; \
{__VA_ARGS__} \
} \
break; \
case kAddTo: \
{ \
const OpReqType ReqType = kAddTo; \
{__VA_ARGS__} \
} \
break; \
default: \
break; \
}
#define MXNET_NDIM_SWITCH(NDim, ndim, ...) \
if (NDim == 0) { \
} else if (NDim == 1) { \
const int ndim = 1; \
{__VA_ARGS__} \
} else if (NDim == 2) { \
const int ndim = 2; \
{__VA_ARGS__} \
} else if (NDim == 3) { \
const int ndim = 3; \
{__VA_ARGS__} \
} else if (NDim == 4) { \
const int ndim = 4; \
{__VA_ARGS__} \
} else if (NDim == 5) { \
const int ndim = 5; \
{__VA_ARGS__} \
} else { \
LOG(FATAL) << "ndim=" << NDim << "too large "; \
}
#define MXNET_NDIM_SWITCH_EX(NDim, ndim, ...) \
if (NDim == 0) { \
} else if (NDim == 1) { \
const int ndim = 1; \
{__VA_ARGS__} \
} else if (NDim == 2) { \
const int ndim = 2; \
{__VA_ARGS__} \
} else if (NDim == 3) { \
const int ndim = 3; \
{__VA_ARGS__} \
} else if (NDim == 4) { \
const int ndim = 4; \
{__VA_ARGS__} \
} else if (NDim == 5) { \
const int ndim = 5; \
{__VA_ARGS__} \
} else if (NDim == 6) { \
const int ndim = 6; \
{__VA_ARGS__} \
} else if (NDim == 7) { \
const int ndim = 7; \
{__VA_ARGS__} \
} else if (NDim == 8) { \
const int ndim = 8; \
{__VA_ARGS__} \
} else if (NDim == 9) { \
const int ndim = 9; \
{__VA_ARGS__} \
} else if (NDim == 10) { \
const int ndim = 10; \
{__VA_ARGS__} \
} else { \
LOG(FATAL) << "ndim=" << NDim << "too large "; \
}
#define MXNET_NO_INT8_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kUint8: \
LOG(FATAL) << "This operation does not " \
"support int8 or uint8"; \
break; \
case mshadow::kInt8: \
LOG(FATAL) << "This operation does not " \
"support int8 or uint8"; \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_NO_FLOAT16_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
LOG(FATAL) << "This operation does not " \
"support float16"; \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt8: \
{ \
typedef int8_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
template <typename T>
struct AccType {
using type = T;
};
template <>
struct AccType<mshadow::half::half_t> {
using type = float;
};
#define MXNET_REAL_ACC_TYPE_SWITCH(type, DType, AType, ...)\
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
typedef double AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
typedef double AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
typedef float AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
typedef uint8_t AType; \
LOG(FATAL) << "This operation only support " \
"floating point types not uint8"; \
} \
break; \
case mshadow::kInt8: \
{ \
typedef int8_t DType; \
typedef int8_t AType; \
LOG(FATAL) << "This operation only support " \
"floating point types not int8"; \
} \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
typedef int32_t AType; \
LOG(FATAL) << "This operation only support " \
"floating point types, not int32"; \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
typedef int64_t AType; \
LOG(FATAL) << "This operation only support " \
"floating point types, not int64"; \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_ACC_TYPE_SWITCH(type, DType, AType, ...)\
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
typedef double AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
typedef double AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
typedef float AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
typedef uint32_t AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt8: \
{ \
typedef int8_t DType; \
typedef int32_t AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
typedef int64_t AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
typedef int64_t AType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_INT_TYPE_SWITCH(type, DType, ...)\
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
LOG(FATAL) << "This operation only support " \
"integer types, not float32"; \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
LOG(FATAL) << "This operation only support " \
"integer types, not float64"; \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
LOG(FATAL) << "This operation only support " \
"integer types, not float16"; \
} \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt8: \
{ \
typedef int8_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_LOAD_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Invalid loading enum type " << type; \
}
/*!
* \brief assign the val to out according
* to request in Kernel::Launch
* \param out the data to be assigned
* \param req the assignment request
* \param val the value to be assigned to out
* \tparam OType output type
* \tparam VType value type
*/
#define KERNEL_ASSIGN(out, req, val) \
{ \
switch (req) { \
case kNullOp: \
break; \
case kWriteTo: \
case kWriteInplace: \
(out) = (val); \
break; \
case kAddTo: \
(out) += (val); \
break; \
default: \
break; \
} \
}
#define MXNET_ADD_ALL_TYPES \
.add_enum("float32", mshadow::kFloat32) \
.add_enum("float64", mshadow::kFloat64) \
.add_enum("float16", mshadow::kFloat16) \
.add_enum("uint8", mshadow::kUint8) \
.add_enum("int8", mshadow::kInt8) \
.add_enum("int32", mshadow::kInt32) \
.add_enum("int64", mshadow::kInt64)
#define MXNET_ADD_ALL_TYPES_WITH_BOOL \
.add_enum("float32", mshadow::kFloat32) \
.add_enum("float64", mshadow::kFloat64) \
.add_enum("float16", mshadow::kFloat16) \
.add_enum("uint8", mshadow::kUint8) \
.add_enum("int8", mshadow::kInt8) \
.add_enum("int32", mshadow::kInt32) \
.add_enum("int64", mshadow::kInt64) \
.add_enum("bool", mshadow::kBool)
/* \brief Compute flattened index given coordinates and shape. */
template<int ndim>
MSHADOW_XINLINE index_t ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) {
index_t ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i];
}
return ret;
}
/* Compute coordinates from flattened index given shape */
template<int ndim>
MSHADOW_XINLINE Shape<ndim> unravel(const index_t idx, const Shape<ndim>& shape) {
Shape<ndim> ret;
#pragma unroll
for (index_t i = ndim-1, j = idx; i >=0; --i) {
auto tmp = j / shape[i];
ret[i] = j - tmp*shape[i];
j = tmp;
}
return ret;
}
/* Compute dot product of two vector */
template<int ndim>
MSHADOW_XINLINE index_t dot(const Shape<ndim>& coord, const Shape<ndim>& stride) {
index_t ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
ret += coord[i] * stride[i];
}
return ret;
}
/* Combining unravel and dot */
template<int ndim>
MSHADOW_XINLINE index_t unravel_dot(const index_t idx, const Shape<ndim>& shape,
const Shape<ndim>& stride) {
index_t ret = 0;
#pragma unroll
for (index_t i = ndim-1, j = idx; i >=0; --i) {
auto tmp = j / shape[i];
ret += (j - tmp*shape[i])*stride[i];
j = tmp;
}
return ret;
}
/* Calculate stride of each dim from shape */
template<int ndim>
MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) {
Shape<ndim> stride;
index_t cumprod = 1;
#pragma unroll
for (int i = ndim - 1; i >= 0; --i) {
stride[i] = (shape[i] > 1) ? cumprod : 0;
cumprod *= shape[i];
}
return stride;
}
/* Increment coordinates and modify index */
template<int ndim>
MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape,
index_t* idx, const Shape<ndim>& stride) {
++(*coord)[ndim-1];
*idx += stride[ndim-1];
#pragma unroll
for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) {
(*coord)[i] -= shape[i];
++(*coord)[i-1];
*idx = *idx + stride[i-1] - shape[i] * stride[i];
}
}
/* Increment coordinates and modify index */
template<int ndim>
MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape,
index_t* idx1, const Shape<ndim>& stride1,
index_t* idx2, const Shape<ndim>& stride2) {
++(*coord)[ndim-1];
*idx1 += stride1[ndim-1];
*idx2 += stride2[ndim-1];
#pragma unroll
for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) {
(*coord)[i] -= shape[i];
++(*coord)[i-1];
*idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i];
*idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i];
}
}
/*!
* \brief Simple copy data from one blob to another
* \param to Destination blob
* \param from Source blob
*/
template <typename xpu>
MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) {
CHECK_EQ(from.Size(), to.Size());
CHECK_EQ(from.dev_mask(), to.dev_mask());
if (from.type_flag_ == mshadow::kBool || to.type_flag_ == mshadow::kBool) {
CHECK_EQ(from.type_flag_, to.type_flag_) << "Only supports copying between boolean ndarrays.";
mshadow::Copy(to.FlatTo1D<xpu, bool>(s), from.FlatTo1D<xpu, bool>(s), s);
return;
}
MSHADOW_TYPE_SWITCH(to.type_flag_, DType, {
if (to.type_flag_ == from.type_flag_) {
mshadow::Copy(to.FlatTo1D<xpu, DType>(s), from.FlatTo1D<xpu, DType>(s), s);
} else {
MSHADOW_TYPE_SWITCH(from.type_flag_, SrcDType, {
to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s));
})
}
})
}
/*! \brief Binary op backward gradient OP wrapper */
template<typename GRAD_OP>
struct backward_grad {
/* \brief Backward calc with grad
* \param a - output grad
* \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies)
* \return input grad
*/
template<typename DType, typename ...Args>
MSHADOW_XINLINE static DType Map(DType a, Args... args) {
return DType(a * GRAD_OP::Map(args...));
}
};
/*! \brief Binary op backward gradient OP wrapper (tuned) */
template<typename GRAD_OP>
struct backward_grad_tuned : public backward_grad<GRAD_OP>, public tunable {
using backward_grad<GRAD_OP>::Map;
};
/*! \brief Select assignment operation based upon the req value
* Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch
*/
template<typename OP, int req>
struct op_with_req {
typedef OP Operation;
/*! \brief input is one tensor */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i]));
}
/*! \brief inputs are two tensors */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *lhs, const DType *rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief input is tensor and a scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value));
}
/*! \brief input is tensor and two scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in,
const DType value_1, const DType value_2) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value_1, value_2));
}
/*! \brief No inputs (ie fill to constant value) */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out) {
KERNEL_ASSIGN(out[i], req, OP::Map());
}
/*! \brief input is single scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(value));
}
/*! \brief inputs are two tensors and a scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out,
const DType *input_1, const DType *input_2, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value));
}
/*! \brief inputs are three tensors (ie backward grad with binary grad function) */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out,
const DType *input_1,
const DType *input_2,
const DType *input_3) {
KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i]));
}
template<typename DType,
typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *in) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i]));
}
/*! \brief inputs are two tensors with a boolean output tensor */
template<typename DType,
typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *lhs, const DType *rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief input is tensor and two scalar value with a boolean output tensor */
template<typename DType,
typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *in, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value));
}
};
template<typename OP, typename xpu>
struct Kernel;
/*!
* \brief CPU Kernel launcher
* \tparam OP Operator to launch
*/
template<typename OP>
struct Kernel<OP, cpu> {
/*!
* \brief Launch a generic CPU kernel.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
*/
template<typename ...Args>
inline static bool Launch(mshadow::Stream<cpu> *, const size_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2) {
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < static_cast<index_t>(N); ++i) {
OP::Map(i, args...);
}
}
#else
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
return true;
}
/*!
* \brief Launch a generic CPU kernel with dynamic schedule. This is recommended
* for irregular workloads such as spmv.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
*/
template<typename ...Args>
inline static bool LaunchDynamic(mshadow::Stream<cpu> *, const int64_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(false);
if (omp_threads < 2) {
for (int64_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads) schedule(dynamic)
for (int64_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
}
#else
for (int64_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
return true;
}
/*!
* \brief Launch CPU kernel which has OMP tuning data available.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam PRIMITIVE_OP The primitive operation to use for tuning
* \tparam DType Data type
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param dest Destination pointer (used to infer DType)
* \param args Varargs to eventually pass to the OP::Map() function
*/
template<typename PRIMITIVE_OP, typename DType, typename ...Args>
static void LaunchTuned(mshadow::Stream<cpu> *, const size_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2 || !tuned_op<PRIMITIVE_OP, DType>::UseOMP(
N, static_cast<size_t>(omp_threads))) {
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < static_cast<index_t>(N); ++i) {
OP::Map(i, args...);
}
}
#else
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
}
/*!
* \brief Launch custom-tuned kernel where each thread is set to
* operate on a contiguous partition
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions
*/
template<typename ...Args>
inline static void LaunchEx(mshadow::Stream<cpu> *s, const size_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2) {
OP::Map(0, N, args...);
} else {
const auto length = (N + omp_threads - 1) / omp_threads;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < static_cast<index_t>(N); i += length) {
OP::Map(i, i + length > N ? N - i : length, args...);
}
}
#else
OP::Map(0, N, args...);
#endif
}
/*!
* \brief Launch a tunable OP with implicitly-supplied data type
* \tparam DType Data type
* \tparam T OP type
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param s Stream (usually null for CPU)
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
* \return Always true
*/
template<typename DType, typename T = OP, typename ...Args>
static MSHADOW_CINLINE
typename std::enable_if<std::is_base_of<tunable, T>::value, bool>::type
Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) {
LaunchTuned<T, DType>(s, N, dest, args...);
return true;
}
/*!
* \brief Launch a tunable OP wrapper with explicitly-supplied data type (ie op_with_req)
* \tparam DType Data type
* \tparam T Wrapper type
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param s Stream (usually null for CPU)
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
* \return Always true
*/
template<typename DType, typename T = OP, typename ...Args>
static MSHADOW_CINLINE
typename std::enable_if<std::is_base_of<tunable, typename T::Operation>::value, bool>::type
Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) {
LaunchTuned<typename T::Operation, DType>(s, N, dest, args...);
return true;
}
};
#ifdef __CUDACC__
template<typename OP, typename ...Args>
__global__ void mxnet_generic_kernel(int N, Args... args) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
OP::Map(i, args...);
}
}
template<typename OP, typename ...Args>
__global__ void mxnet_generic_kernel_ex(int N, Args... args) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
OP::Map(i, 1, args...);
}
}
template<typename OP>
struct Kernel<OP, gpu> {
/*! \brief Launch GPU kernel */
template<typename ...Args>
inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) {
if (0 == N) return;
using namespace mshadow::cuda;
int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
mxnet_generic_kernel<OP, Args...>
<<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
N, args...);
MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel);
}
template<typename ...Args>
inline static void LaunchEx(mshadow::Stream<gpu> *s, const int N, Args... args) {
if (0 == N) return;
using namespace mshadow::cuda;
int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
mxnet_generic_kernel_ex<OP, Args...>
<<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
N, args...);
MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel_ex);
}
};
#endif // __CUDACC__
/*!
* \brief Set to immediate scalar value kernel
* \tparam val Scalar immediate
*/
template<int val>
struct set_to_int : public tunable {
// mxnet_op version (when used directly with Kernel<>::Launch()) */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out) {
out[i] = DType(val);
}
// mshadow_op version (when used with op_with_req<>)
MSHADOW_XINLINE static int Map() {
return val;
}
};
/*!
* \brief Special-case kernel shortcut for setting to zero and one
*/
using set_zero = set_to_int<0>;
using set_one = set_to_int<1>;
} // namespace mxnet_op
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_MXNET_OP_H_
|
test.c | #include <stdio.h>
#include "header.h"
#include <string.h>
#include <math.h>
#include "TC_interface.h"
void read_initial_conditions(const char* filename, int NUM, double** y_host, double** variable_host);
int main(int argc, char *argv[])
{
char mechfile[100] = "h2.dat";
char thermofile[100] = "h2therm.dat";
int num_odes = 100100;
omp_set_num_threads(1);
double* y_host;
double* var_host;
read_initial_conditions("data.bin", num_odes, &y_host, &var_host);
/* Initialize TC library */
int withtab = 0;
TC_initChem( mechfile, thermofile, withtab, 1.0);
/* create saved jacobians */
double* save_jac = (double*)calloc(NSP * NSP * num_odes, sizeof(double));
for(int tid = 0; tid < num_odes; ++tid)
{
double jac[NSP * NSP] = {0};
TC_setThermoPres(var_host[tid]) ;
TC_getJacTYNm1anl ( &y_host[tid * NN], NSP, jac ) ;
//save jacobian
memcpy(&save_jac[tid * NSP * NSP], jac, NSP * NSP * sizeof(double));
}
//now re-run with multiple threads
omp_set_num_threads(10);
double* save_jac_multithread = (double*)calloc(NSP * NSP * num_odes, sizeof(double));
#pragma omp parallel for
for(int tid = 0; tid < num_odes; ++tid)
{
double jac[NSP * NSP] = {0};
TC_setThermoPres(var_host[tid]) ;
TC_getJacTYNm1anl ( &y_host[tid * NN], NSP, jac ) ;
//save jacobian
memcpy(&save_jac_multithread[tid * NSP * NSP], jac, NSP * NSP * sizeof(double));
}
//compute max error
double max_err = -1;
double max_err_rel = -1;
for(int tid = 0; tid < num_odes * NSP * NSP; ++tid)
{
if (save_jac[tid] != 0.0)
{
double err = fabs(save_jac[tid] - save_jac_multithread[tid]);
if (err > max_err)
max_err = err;
err = err * 100.0 / fabs(save_jac[tid]);
if (err > max_err_rel)
max_err_rel = err;
}
}
printf("Maximum absolute error: %e\nMaximum relative error: %e\n", max_err, max_err_rel);
free(y_host);
free(var_host);
free(save_jac);
free(save_jac_multithread);
return 0;
}
|
tasks.c | #include <stdio.h>
#include <omp.h>
#define N (2)
int A=0;
int main(void) {
int fail = 0;
int num_teams = 2;
fprintf(stderr, "Using num_teams %d\n", num_teams);
#pragma omp target teams distribute num_teams(num_teams) map(tofrom:A)
for (int k=0;k < num_teams; k++) {
#pragma omp parallel
for (int i=0; i < N; i++) {
#pragma single
{
#pragma omp task
for (int j=0; j <N; j++) {
A = 1;//printf("Howdy task0\n");
}
#pragma omp task
for (int j=0; j <N; j++) {
A=2;//printf("Howdy task1\n");
}
}
}
}
printf("Succeeded\n");
return 0;
}
|
GB_binop__div_int8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__div_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__div_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__div_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__div_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__div_int8)
// A*D function (colscale): GB (_AxD__div_int8)
// D*A function (rowscale): GB (_DxB__div_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__div_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__div_int8)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_int8)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_int8)
// C=scalar+B GB (_bind1st__div_int8)
// C=scalar+B' GB (_bind1st_tran__div_int8)
// C=A+scalar GB (_bind2nd__div_int8)
// C=A'+scalar GB (_bind2nd_tran__div_int8)
// C type: int8_t
// A type: int8_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = GB_IDIV_SIGNED (aij, bij, 8)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IDIV_SIGNED (x, y, 8) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_DIV || GxB_NO_INT8 || GxB_NO_DIV_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__div_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__div_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__div_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__div_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__div_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__div_int8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__div_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__div_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__div_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__div_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__div_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__div_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IDIV_SIGNED (x, bij, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__div_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IDIV_SIGNED (aij, y, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_SIGNED (x, aij, 8) ; \
}
GrB_Info GB (_bind1st_tran__div_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_SIGNED (aij, y, 8) ; \
}
GrB_Info GB (_bind2nd_tran__div_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
if97_lib_test.c | // Copyright Martin Lord 2014-2015.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
/* *****************************************************************************
* A SHORT PROGRAMME TO CHECK THE LIBRARY
* *******************************************************************************/
#include "if97_lib_test.h"
#include "if97_lib.h"
#include "IF97_common.h"
#include "iapws_surftens.h"
#include "solve_test.h"
#include "IF97_Region1_test.h"
#include "IF97_Region2_test.h"
#include "IF97_Region3_test.h"
#include "IF97_Region4_test.h"
#include "IF97_B23_test.h"
#include <stdio.h>
#include <math.h> // for fabs
#include "winsteam_compatibility.h"
int main (int argc, char **argv)
{
int intermediateResult = TEST_FAIL;
FILE *pTestLog; // create pointer to a file. the FILE type is defined in <stdio.h>
pTestLog = fopen(TESTLOGLOC, "w"); // open an empty file to write test results to. pTestLog is a pointer to it
// check threading using OpenMP test
fprintf ( pTestLog, "\n\n************************************************************ \n" );
fprintf ( pTestLog, "*** CHECK FOR MULTITHREADDING ***\n\n" );
#pragma omp parallel
{
fprintf ( pTestLog, "thread\n");
}
// *** test region 1 module ***
intermediateResult = if97_region1_test (pTestLog);
resultSummary ("IF97_Region1 module", pTestLog, intermediateResult);
// *** test region 2 module ***
intermediateResult = if97_region2_test (pTestLog);
resultSummary ("IF97_Region2 module", pTestLog, intermediateResult);
// *** test region 2-3 boundary line equation ***
intermediateResult = if97_B23_test (pTestLog);
resultSummary ("IF97_B23 module", pTestLog, intermediateResult);
// *** test region 3 module ***
intermediateResult = if97_region3_test (pTestLog);
resultSummary ("IF97_Region3 module", pTestLog, intermediateResult);
// *** test region 4 module ***
intermediateResult = if97_region4_test (pTestLog);
resultSummary ("IF97_Region4 module", pTestLog, intermediateResult);
// *** test region 5 module ***
intermediateResult = if97_region5_test (pTestLog);
resultSummary ("IF97_Region5 module", pTestLog, intermediateResult);
//**** test the solver *****
//intermediateResult = solve_test (pTestLog);
//resultSummary ("solver module", pTestLog, intermediateResult);
// *** Testing iapws_surftens ******
intermediateResult = if97_surftens_test (pTestLog);
resultSummary ("iapws_surftens", pTestLog, intermediateResult);
// *** test library module ***
intermediateResult = if97_lib_test (pTestLog);
resultSummary ("IF97_lib library", pTestLog, intermediateResult);
fclose (pTestLog); //close the test log file. note result = EOF on error
printf ("Test log results can be found in "); printf( TESTLOGLOC );
return 0;
}
// test a double input function. Pass = 0. See IF97_Common.h for failure codes.
//Function outputs more detail to logfile if VERBOSE_TEST is set to true
int testSingleInput ( double (*func) (double), double input, double expectedOutput, double tol, int tolType, char* funcName, FILE *logFile)
{
int testResult = TEST_FAIL; // initialize as a fail.
double error;
switch (tolType) {
case SIG_FIG:
error = fabs(((*func)(input) - expectedOutput)/expectedOutput);
if ( error <= pow(10,-tol )){
testResult = TEST_PASS;
if (VERBOSE_TEST)
fprintf ( logFile, "%s ( %.8g) = %.8g \t Expected: %.8g \t Error ratio : %e \t PASS\n", funcName, input, (*func)(input), expectedOutput, error );
}
else {
testResult = testResult | TEST_INCORRECT;
if (VERBOSE_TEST)
fprintf ( logFile, "%s ( %.8g ) = %.8g \t Expected: %.8g \t Error ratio : %e \t FAIL\n", funcName, input, (*func)(input), expectedOutput, error );
}
break;
case ABS:
error = fabs((*func)(input) - expectedOutput);
if ( error <= tol ){
testResult = TEST_PASS;
if (VERBOSE_TEST)
fprintf ( logFile, "%s ( %.8g ) = %.8g \t Expected: %.8g \t Error : %e \t PASS\n", funcName, input, (*func)(input), expectedOutput, error );
}
else {
testResult = testResult | TEST_INCORRECT;
if (VERBOSE_TEST)
fprintf ( logFile, "%s ( %.8g ) = %.8g \t Expected: %.8g \t Error : %e \t FAIL\n", funcName, input, (*func)(input), expectedOutput, error );
}
break;
case PERCENT:
error = fabs(((*func)(input) - expectedOutput)/expectedOutput)*100;
if ( error <= tol ){
testResult = TEST_PASS;
if (VERBOSE_TEST)
fprintf ( logFile, "%s ( %.8g ) = %.8g \t Expected: %.4g \t Error : %e % \t PASS\n", funcName, input, (*func)(input), expectedOutput, error );
}
else {
testResult = testResult | TEST_INCORRECT;
if (VERBOSE_TEST)
fprintf ( logFile, "%s ( %.8g ) = %.8g \t Expected: %.4g \t Error : %e % \t FAIL\n", funcName, input, (*func)(input), expectedOutput, error );
}
break;
}
return testResult;
}
// test a double input function. Pass = 0. See IF97_Common.h for failure codes.
//Function outputs more detail to logfile if VERBOSE_TEST is true
int testDoubleInput ( double (*func) (double, double), double input1, double input2, double expectedOutput, double tol, int tolType, char* funcName, FILE *logFile)
{
int testResult = TEST_FAIL; // initialize as a fail.
double error;
switch (tolType) {
case SIG_FIG:
error = fabs(((*func)(input1, input2) - expectedOutput)/expectedOutput);
if ( error <= pow(10,-tol )){
testResult = TEST_PASS;
if (VERBOSE_TEST)
fprintf ( logFile, "%s ( %.8g, %.8g ) = %.8g \t Expected: %.8g \t Error ratio : %e \t PASS\n", funcName, input1, input2, (*func)(input1, input2), expectedOutput, error );
}
else {
testResult = testResult | TEST_INCORRECT;
if (VERBOSE_TEST)
fprintf ( logFile, "%s ( %.8g, %.8g ) = %.8g \t Expected: %.8g \t Error ratio : %e \t FAIL\n", funcName, input1, input2, (*func)(input1, input2), expectedOutput, error );
}
break;
case ABS:
error = fabs((*func)(input1, input2) - expectedOutput);
if ( error <= tol ){
testResult = TEST_PASS;
if (VERBOSE_TEST)
fprintf ( logFile, "%s ( %.8g, %.8g ) = %.8g \t Expected: %.8g \t Error : %e \t PASS\n", funcName, input1, input2, (*func)(input1, input2), expectedOutput, error );
}
else {
testResult = testResult | TEST_INCORRECT;
if (VERBOSE_TEST)
fprintf ( logFile, "%s ( %.8g, %.8g ) = %.8g \t Expected: %.8g \t Error : %e \t FAIL\n", funcName, input1, input2, (*func)(input1, input2), expectedOutput, error );
}
break;
case PERCENT:
error = fabs(((*func)(input1, input2) - expectedOutput)/expectedOutput)*100;
if ( error <= tol ){
testResult = TEST_PASS;
if (VERBOSE_TEST)
fprintf ( logFile, "%s ( %.8g, %.8g ) = %.8g \t Expected: %.4g \t Error : %e % \t PASS\n", funcName, input1, input2, (*func)(input1, input2), expectedOutput, error );
}
else {
testResult = testResult | TEST_INCORRECT;
if (VERBOSE_TEST)
fprintf ( logFile, "%s ( %.8g, %.8g ) = %.8g \t Expected: %.4g \t Error : %e % \t FAIL\n", funcName, input1, input2, (*func)(input1, input2), expectedOutput, error );
}
break;
}
return testResult;
}
// test a double input function with boolean output. Pass = 0. See IF97_Common.h for failure codes.
//Function outputs more detail to logfile if VERBOSE_TEST is set to true
int testBoolDoubleInput ( bool (*func) (double, double), double input1, double input2, bool expectedOutput, char* funcName, FILE *logFile){
int testResult = TEST_FAIL; // initialize as a fail.
if ((*func)(input1, input2) == expectedOutput){
testResult = TEST_PASS;
if (VERBOSE_TEST)
fprintf ( logFile, "%s ( %.8g, %.8g ) = %s \t Expected: %s \t \t PASS\n", funcName, input1, input2, (*func)(input1, input2) ? "true" : "false", expectedOutput ? "true" : "false");
}
else{
testResult = testResult | TEST_INCORRECT;
if (VERBOSE_TEST)
fprintf ( logFile, "%s ( %.8g, %.8g ) = %s \t Expected: %s \t \t FAIL\n", funcName, input1, input2, (*func)(input1, input2) ? "true" : "false", expectedOutput ? "true" : "false");
}
return testResult;
}
// test a double input function with boolean output. Pass = 0. See IF97_Common.h for failure codes.
//Function outputs more detail to logfile if VERBOSE_TEST is set to true
int testCharDoubleInput ( char (*func) (double, double), double input1, double input2, char expectedOutput, char* funcName, FILE *logFile){
int testResult = TEST_FAIL; // initialize as a fail.
if ((*func)(input1, input2) == expectedOutput){
testResult = TEST_PASS;
if (VERBOSE_TEST)
fprintf ( logFile, "%s ( %.8g, %.8g ) = %c \t Expected: %c \t \t PASS\n", funcName, input1, input2, (*func)(input1, input2), expectedOutput);
}
else{
testResult = testResult | TEST_INCORRECT;
if (VERBOSE_TEST)
fprintf ( logFile, "%s ( %.8g, %.8g ) = %c \t Expected: %c \t \t FAIL\n", funcName, input1, input2, (*func)(input1, input2), expectedOutput);
}
return testResult;
}
// prints a unit test summary to the log based on the test code
void resultSummary (char* funcName, FILE *logFile, int testCode)
{
fprintf (logFile, "\nResults summary for %s : Error code %i \n", funcName, testCode);
if (testCode == TEST_PASS)
fprintf(logFile, "PASS");
else
{
if (testCode & TEST_FAIL)
fprintf(logFile, "FAIL\n");
if (testCode & TEST_INCORRECT)
fprintf(logFile, "Some or all elements generate incorrect results\n");
if (testCode & TEST_INCOMPLETE)
fprintf(logFile, "Some or all elements are not yet complete\n");
}
fprintf (logFile, "\n\n");
}
// *** Testing iapws_surftens ******
int if97_surftens_test (FILE *logFile){
const double If97_surf_acc = 0.07; // Highest difference in table 1 of IAPWS R1-76(2014)
int intermediateResult = TEST_PASS;
fprintf ( logFile, "\n\n *** Testing iapws_surftens *** \n\n" );
intermediateResult = intermediateResult | testSingleInput (iapws_surftens, 0.01, 75.65, If97_surf_acc, ABS, "iapws_surftens", logFile);
intermediateResult = intermediateResult | testSingleInput (iapws_surftens, 20.0, 72.74, If97_surf_acc, ABS, "iapws_surftens", logFile);
intermediateResult = intermediateResult | testSingleInput (iapws_surftens, 100.0, 58.91, If97_surf_acc, ABS, "iapws_surftens", logFile);
intermediateResult = intermediateResult | testSingleInput (iapws_surftens, 200.0, 37.67, If97_surf_acc, ABS, "iapws_surftens", logFile);
intermediateResult = intermediateResult | testSingleInput (iapws_surftens, 300.0, 14.36, If97_surf_acc, ABS, "iapws_surftens", logFile);
intermediateResult = intermediateResult | testSingleInput (iapws_surftens, 370.0, 0.39, If97_surf_acc, ABS, "iapws_surftens", logFile);
intermediateResult = intermediateResult | testSingleInput (iapws_surftens, IF97_TC - IF97_T_TRIP, 0.0, If97_surf_acc, ABS, "iapws_surftens", logFile);
if (intermediateResult != 0)
intermediateResult= intermediateResult | TEST_FAIL;
return intermediateResult;
}
int if97_lib_test (FILE *logFile){
int intermediateResult;
// *** Testing if97_pt_h ******
intermediateResult = TEST_PASS;
fprintf ( logFile, "\n\n *** Testing if97_pt_h *** \n\n" );
intermediateResult = intermediateResult | testDoubleInput (if97_pt_h, 3.0, 300.0, 1.15331273e02, TEST_ACCURACY, SIG_FIG, "if97_pt_h", logFile);
intermediateResult = intermediateResult | testDoubleInput (if97_pt_h, 80.0, 300.0, 1.84142828e02, TEST_ACCURACY, SIG_FIG,"if97_pt_h", logFile);
fprintf ( logFile, "\n1/if97_R3bw_v_pt(%.9g, %.9g) = %.9g", IF97_PC + 0.000001, IF97_TC + 0.000001, 1/if97_R3bw_v_pt(IF97_PC + 0.000001, IF97_TC + 0.000001));
fprintf ( logFile, "\n1/if97_pt_v(%.9g, %.9g) = %.9g\n", IF97_PC + 0.000001, IF97_TC + 0.000001, 1/if97_pt_v(IF97_PC + 0.000001, IF97_TC + 0.000001));
intermediateResult = intermediateResult | testDoubleInput (if97_pt_v, IF97_PC + 0.000001, IF97_TC + 0.000001, 1/324.23752306, TEST_ACCURACY, SIG_FIG,"if97_pt_v", logFile);
intermediateResult = intermediateResult | testDoubleInput (if97_pt_h, IF97_PC + 0.000001, IF97_TC + 0.000001, 2083.817978619541, TEST_ACCURACY, SIG_FIG,"if97_pt_h", logFile);
intermediateResult = intermediateResult | testDoubleInput (if97_pt_h, 30.0, 1500.0, 5.16723514e03,TEST_ACCURACY, SIG_FIG,"if97_pt_h", logFile);
intermediateResult = intermediateResult | testDoubleInput (if97_pt_u, IF97_PC + 0.000001, IF97_TC + 0.000001, 2015.769096450988, TEST_ACCURACY, SIG_FIG,"if97_pt_u", logFile);
intermediateResult = intermediateResult | testDoubleInput (if97_pt_s, IF97_PC + 0.000001, IF97_TC + 0.000001, 4.406259014859, TEST_ACCURACY, SIG_FIG,"if97_pt_s", logFile);
intermediateResult = intermediateResult | testDoubleInput (if97_pt_Cv, IF97_PC + 0.000001, IF97_TC + 0.000001, 4.527598110108, TEST_ACCURACY, SIG_FIG,"if97_pt_Cv", logFile);
intermediateResult = intermediateResult | testDoubleInput (if97_pt_Cp, IF97_PC + 0.000001, IF97_TC + 0.000001, 4.54022408404e5, TEST_ACCURACY, SIG_FIG,"if97_pt_Cp", logFile);
intermediateResult = intermediateResult | testDoubleInput (if97_pt_Vs, IF97_PC + 0.000001, IF97_TC + 0.000001, 314.252078309417, TEST_ACCURACY, SIG_FIG,"if97_pt_Vs", logFile);
resultSummary ("if97_pt_h", logFile, intermediateResult);
if (intermediateResult != 0)
intermediateResult= intermediateResult | TEST_FAIL;
return intermediateResult;
} |
positions.c | #include "globals.h"
void Make_Positions()
{
printf ( "Sampling positions ...\n" );
fflush ( stdout );
//! @todo do 2D properly in here and peanowalk, too
#ifdef PEANO_SAMPLING
const uint64_t countCoords = peanoCurveLength();
const uint64_t cellSize = peanoCellSize();
const double halfCellSize = 0.5 * cellSize;
const double norm = peanoNormFactor();
double cellSides[3];
cellSides[0] = cellSize * Problem.Boxsize[0] * norm;
cellSides[1] = cellSize * Problem.Boxsize[1] * norm;
cellSides[2] = cellSize * Problem.Boxsize[2] * norm;
const double cellVolume = cellSides[0] * cellSides[1] * cellSides[2];
const double probabilityFactor = cellVolume / ( Problem.Mpart );
double probabilitySum = 0.0;
printf ( " Have %lu peano cells of volume (%g, %g, %g) for %d particles\n", countCoords, cellSides[0], cellSides[1], cellSides[2], Param.Npart );
Assert ( countCoords > Param.Npart, "Need more peano cells than particles\n" );
int ipart = 0;
for ( uint64_t peano = 0; peano < countCoords; ++peano ) {
assignPeanoCoordinates ( P[ipart].Pos, peano );
translateAndRenormalizePeanoCoords ( P[ipart].Pos, halfCellSize, norm );
const double probability = probabilityFactor * Density_Func_Ptr ( ipart, Param.BiasCorrection );
probabilitySum += probability;
//Accept particle
if ( ipart < Param.Npart && probability > erand48 ( Omp.Seed ) ) {
//randomize position inside peano cell
P[ipart].Pos[0] += ( erand48 ( Omp.Seed ) - 0.5 ) * cellSides[0];
P[ipart].Pos[1] += ( erand48 ( Omp.Seed ) - 0.5 ) * cellSides[1];
P[ipart].Pos[2] += ( erand48 ( Omp.Seed ) - 0.5 ) * cellSides[2];
++ipart;
if ( ipart == Param.Npart ) {
printf ( " Aborting at %lu of %lu peano nodes (%g%%)\n", peano, countCoords, peano * 100. / countCoords );
//Let it run further to get correct probabilitySum values
}
}
}
if ( ipart != Param.Npart ) {
Problem.Mpart = Problem.Mpart * ipart / Param.Npart;
Param.Npart = ipart; //If got less particles we can just ignore all memory beyond this point in the structs
printf ( " Resetting particle number to %d and particle mass to %g\n", Param.Npart, Problem.Mpart );
}
// Normalization: Sum_cells p = 1 * Npart
printf ( " Sum of probabilities %g (%g%%)\n", probabilitySum, probabilitySum * 100.0 / Param.Npart );
#else
#pragma omp parallel for
for ( int ipart = 0; ipart < Param.Npart; ipart++ ) {
#ifdef REJECTION_SAMPLING
double rho = 0.0, rho_r = 0.0;
while ( rho >= rho_r ) {
P[ipart].Pos[0] = erand48 ( Omp.Seed ) * Problem.Boxsize[0];
P[ipart].Pos[1] = erand48 ( Omp.Seed ) * Problem.Boxsize[1];
#ifdef TWO_DIM
P[ipart].Pos[2] = 0.0;
#else
P[ipart].Pos[2] = erand48 ( Omp.Seed ) * Problem.Boxsize[2];
#endif //TWO_DIM
rho = Problem.Rho_Max * erand48 ( Omp.Seed );
rho_r = Density_Func_Ptr ( ipart , Param.BiasCorrection );
}
#else
P[ipart].Pos[0] = erand48 ( Omp.Seed ) * Problem.Boxsize[0];
P[ipart].Pos[1] = erand48 ( Omp.Seed ) * Problem.Boxsize[1];
#ifdef TWO_DIM
P[ipart].Pos[2] = 0.0;
#else
P[ipart].Pos[2] = erand48 ( Omp.Seed ) * Problem.Boxsize[2];
#endif //TWO_DIM
#endif //REJECTION_SAMPLING
P[ipart].Type = 0;
}
#endif //PEANO_SAMPLING
printf ( "done\n" );
return;
}
void Make_Velocities()
{
printf ( "Velocities ..." );
fflush ( stdout );
#pragma omp parallel for
for ( int ipart = 0; ipart < Param.Npart; ipart++ ) {
( *Velocity_Func_Ptr ) ( ipart, P[ipart].Vel );
#ifdef TWO_DIM
P[ipart].Vel[2] = 0.0;
#endif //TWO_DIM
}
printf ( " done\n" );
return ;
}
void Make_Temperatures()
{
printf ( "Internal Energy ..." );
fflush ( stdout );
#pragma omp parallel for
for ( int ipart = 0; ipart < Param.Npart; ipart++ ) {
SphP[ipart].U = ( *U_Func_Ptr ) ( ipart );
}
printf ( " done\n" );
return ;
}
void Make_Magnetic_Fields()
{
printf ( "Magnetic Field ..." );
fflush ( stdout );
#pragma omp parallel for
for ( int ipart = 0; ipart < Param.Npart; ipart++ ) {
( *Magnetic_Field_Func_Ptr ) ( ipart, SphP[ipart].Bfld );
#ifdef TWO_DIM
SphP[ipart].Bfld[2] = 0.0;
#endif //TWO_DIM
}
printf ( " done\n" );
return ;
}
void Make_PostProcessing()
{
printf ( "Post Processing ..." );
fflush ( stdout );
( *PostProcessing_Func_Ptr ) ();
printf ( " done\n" );
return ;
}
|
renumber.c | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <time.h>
#include "HiParTI.h"
#include "sptensor.h"
#include "renumber.h"
/*Interface to everything in this file is orderit(.., ..)*/
/*function declarations*/
static void ptiLexiOrderPerMode(ptiSparseTensor * tsr, ptiIndex mode, ptiIndex ** orgIds, ptiElementIndex sb_bits, int tk, int impl_num);
void ptiBFSLike(ptiSparseTensor * tsr, ptiIndex ** newIndices);
static double u_seconds(void)
{
struct timeval tp;
gettimeofday(&tp, NULL);
return (double) tp.tv_sec + (double) tp.tv_usec / 1000000.0;
};
void ptiIndexRenumber(ptiSparseTensor * tsr, ptiIndex ** newIndices, int renumber, ptiIndex iterations, ptiElementIndex sb_bits, int tk, int impl_num)
{
/*
newIndices is of size [nmodes][ndims[modes]] and assumed to be allocted.
It will be overwritten. No need to initialize.
We will need to reshuffle nonzeros. In order to not to touch tsr, we copy the indices of nonzeros
to a local variable coords. This is sort of transposed wrt tsr: its size is nnz * n, instead of n * nnz used in tsr.
*/
ptiIndex const nmodes = tsr->nmodes;
ptiNnzIndex const nnz = tsr->nnz;
ptiIndex i, m;
ptiNnzIndex z;
ptiIndex its;
if (renumber == 1) { /* Lexi-order renumbering */
/* copy the indices */
ptiSparseTensor tsr_temp;
ptiCopySparseTensor(&tsr_temp, tsr, tk);
ptiIndex ** orgIds = (ptiIndex **) malloc(sizeof(ptiIndex*) * nmodes);
for (m = 0; m < nmodes; m++)
{
orgIds[m] = (ptiIndex *) malloc(sizeof(ptiIndex) * tsr->ndims[m]);
// #pragma omp parallel for num_threads(tk) private(i)
for (i = 0; i < tsr->ndims[m]; i++)
orgIds[m][i] = i;
}
// FILE * debug_fp = fopen("new.txt", "w");
// fprintf(stdout, "orgIds:\n");
for (its = 0; its < iterations; its++)
{
printf("[Lexi-order] Optimizing the numbering for its %u\n", its+1);
for (m = 0; m < nmodes; m++)
ptiLexiOrderPerMode(&tsr_temp, m, orgIds, sb_bits, tk, impl_num);
// fprintf(stdout, "\niter %u:\n", its);
// for(ptiIndex m = 0; m < tsr->nmodes; ++m) {
// ptiDumpIndexArray(orgIds[m], tsr->ndims[m], stdout);
// }
}
// fclose(debug_fp);
/* compute newIndices from orgIds. Reverse perm */
for (m = 0; m < nmodes; m++)
for (i = 0; i < tsr->ndims[m]; i++)
newIndices[m][orgIds[m][i]] = i;
ptiFreeSparseTensor(&tsr_temp);
for (m = 0; m < nmodes; m++)
free(orgIds[m]);
free(orgIds);
} else if (renumber == 2 ) { /* BFS-like renumbering */
/*
REMARK (10 May 2018): this is the old bfs-like kind of thing. I hoped it would reduce the number of iterations,
but on a few cases it did not help much. Just leaving it in case we want to use it.
*/
printf("[BFS-like]\n");
ptiBFSLike(tsr, newIndices);
}
}
static void lexOrderThem(ptiNnzIndex m, ptiIndex n, ptiNnzIndex *ia, ptiIndex *cols, ptiIndex *cprm, int const tk)
{
/*m, n are the num of rows and cols, respectively. We lex order cols,
given rows.
*/
ptiNnzIndex j, jcol, jend;
ptiIndex jj;
ptiIndex *freeIdList, freeIdTop;
ptiIndex k, s, acol;
ptiIndex firstset, set, pos, item, headset;
colStruct *clms;
setStruct *csets;
clms = (colStruct *) calloc(sizeof(colStruct), n+2);
csets = (setStruct *) calloc(sizeof(setStruct), n+2);
freeIdList = (ptiIndex*)calloc(sizeof(ptiIndex),(n+2));
initColDLL(clms, n);
initSetDLL(csets, n);
for(jj = 1; jj<=n; jj++)
cprm[jj] = 2 * n;
firstset = 1;
freeIdList[0] = 0;
for(jj= 1; jj<=n; jj++)
freeIdList[jj] = jj+1;/*1 is used as a set id*/
freeIdTop = 1;
for(j=1; j<=m; j++){
jend = ia[j+1]-1;
for(jcol = ia[j]; jcol <= jend ; jcol++){
acol= cols[jcol];
s = clms[acol].svar;
if( csets[s].flag < j){/*first occurence of supervar s in j*/
csets[s].flag = j;
if(csets[s].sz == 1 && csets[s].tail != acol){
printf("this should not happen (sz 1 but tailset not ok)\n");
exit(12);
}
if(csets[s].sz > 1) {
ptiIndex newId;
/*remove acol from s*/
removeAColfromSet(csets, s, clms, acol);
/*create a new supervar ns=newId
and make i=acol its only var*/
if(freeIdTop == n+1) {
printf("this should not happen (no index)\n");
exit(12);
}
newId = freeIdList[freeIdTop++];
appendAColtoSet(csets, newId, clms, acol);
csets[s].var = acol; /*the new set's important var is acol*/
insertSetBefore(csets, newId, s);/*newId is before s*/
if(firstset == s)
firstset = newId;
}
}
else{/*second or later occurence of s for row j*/
k = csets[s].var;
/*remove acol from its current chain*/
removeAColfromSet(csets, s, clms, acol);
if(csets[s].sz == 0){/*s is a free id now..*/
freeIdList[--freeIdTop] = s; /*add s to the free id list*/
setEmpty(csets, s);/*no need to adjust firstset, as this is the second occ of s*/
}
/*add to chain containing k (as the last element)*/
appendAColtoSet(csets, clms[k].svar, clms, acol);
}
}
}
/*we are done. Let us read the cprm from the ordered sets*/
pos = 1;
for(set = firstset; set != 0; set = csets[set].next){
item = csets[set].tail;
headset = 0;
while(item != 0 ){
headset = item;
item = clms[item].prev;
}
/*located the head of the set. output them (this is for keeping the order)*/
while(headset){
cprm[pos++] = headset;
headset = clms[headset].next;
}
}
free(freeIdList);
free(csets);
free(clms);
if(pos-1 != n){
printf("**************** Error ***********\n");
printf("something went wrong and we could not order everyone\n");
exit(12);
}
return ;
}
// static void lexOrderThem( ptiNnzIndex m, ptiIndex n, ptiNnzIndex *ia, ptiIndex *cols, ptiIndex *cprm, int const tk)
// {
// /*m, n are the num of rows and cols, respectively. We lex order cols,
// given rows.
// BU notes as of 4 May 2018: I am hoping that I will not be asked the details of this function, and its memory use;) A quick and dirty update from something else I had since some time. I did not think through if the arrays could be reduced. Right now we have 10 arrays of size n each (where n is the length of a single dimension of the tensor.
// */
// ptiNnzIndex *flag, j, jcol, jend;
// ptiIndex *svar, *var, numBlocks;
// ptiIndex *prev, *next, *sz, *setnext, *setprev, *tailset;
// ptiIndex *freeIdList, freeIdTop;
// ptiIndex k, s, acol;
// ptiIndex firstset, set, pos;
// svar = (ptiIndex*) calloc(sizeof(ptiIndex),(n+2));
// flag = (ptiNnzIndex*) calloc(sizeof(ptiNnzIndex),(n+2));
// var = (ptiIndex*) calloc(sizeof(ptiIndex),(n+2));
// prev = (ptiIndex*) calloc(sizeof(ptiIndex),(n+2));
// next = (ptiIndex*) calloc(sizeof(ptiIndex),(n+2));
// sz = (ptiIndex*) calloc(sizeof(ptiIndex),(n+2));
// setprev = (ptiIndex*)calloc(sizeof(ptiIndex),(n+2));
// setnext = (ptiIndex*)calloc(sizeof(ptiIndex),(n+2));
// tailset = (ptiIndex*)calloc(sizeof(ptiIndex),(n+2));
// freeIdList = (ptiIndex*)calloc(sizeof(ptiIndex),(n+2));
// next[1] = 2;
// prev[0] = prev[1] = 0;
// next[n] = 0;
// prev[n] = n-1;
// svar[1] = svar[n] = 1;
// flag[1] = flag[n] = flag[n+1] = 0;
// cprm[1] = cprm[n] = 2 * n ;
// setprev[1] = setnext[1] = 0;
// // #pragma omp parallel for num_threads(tk)
// for(ptiIndex jj = 2; jj<=n-1; jj++)/*init all in a single svar*/
// {
// svar[jj] = 1;
// next[jj] = jj+1;
// prev[jj] = jj-1;
// flag[jj] = 0;
// sz[jj] = 0;
// setprev[jj] = setnext[jj] = 0;
// cprm[jj] = 2 * n;
// }
// var[1] = 1;
// sz[1] = n;
// sz[n] = sz[n+1] = 0;
// setprev[n] = setnext[n] = 0;
// setprev[n+1] = setnext[n+1] = 0;
// tailset[1] = n;
// firstset = 1;
// freeIdList[0] = 0;
// // #pragma omp parallel for num_threads(tk)
// for(ptiIndex jj= 1; jj<=n; jj++)
// freeIdList[jj] = jj+1;/*1 is used as a set id*/
// freeIdTop = 1;
// for(j=1; j<=m; j++)
// {
// jend = ia[j+1]-1;
// for(jcol = ia[j]; jcol <= jend ; jcol++)
// {
// acol= cols[jcol];
// s = svar[acol];
// if( flag[s] < j)/*first occurence of supervar s in j*/
// {
// flag[s] = j;
// if(sz[s] == 1 && tailset[s] != acol)
// {
// printf("this should not happen (sz 1 but tailset not ok)\n");
// exit(12);
// }
// if(sz[s] > 1)
// {
// ptiIndex newId;
// /*remove acol from s*/
// if(tailset[s] == acol) tailset[s] = prev[acol];
// next[prev[acol]] = next[acol];
// prev[next[acol]] = prev[acol];
// sz[s] = sz[s] - 1;
// create a new supervar ns=newId
// and make i=acol its only var
// if(freeIdTop == n+1) {
// printf("this should not happen (no index)\n");
// exit(12);
// }
// newId = freeIdList[freeIdTop++];
// svar[acol] = newId;
// var[newId] = acol;
// flag[newId] = j;
// sz[newId ] = 1;
// next[acol] = 0;
// prev[acol] = 0;
// var[s] = acol;
// tailset[newId] = acol;
// setnext[newId] = s;
// setprev[newId] = setprev[s];
// if(setprev[s])
// setnext[setprev[s]] = newId;
// setprev[s] = newId;
// if(firstset == s)
// firstset = newId;
// }
// }
// else/*second or later occurence of s for row j*/
// {
// k = var[s];
// svar[acol] = svar[k];
// /*remove acol from its current chain*/
// if(tailset[s] == acol) tailset[s] = prev[acol];
// next[prev[acol]] = next[acol];
// prev[next[acol]] = prev[acol];
// sz[s] = sz[s] - 1;
// if(sz[s] == 0)/*s is a free id now..*/
// {
// freeIdList[--freeIdTop] = s; /*add s to the free id list*/
// if(setnext[s])
// setprev[setnext[s]] = setprev[s];
// if(setprev[s])
// setnext[setprev[s]] = setnext[s];
// setprev[s] = setnext[s] = 0;
// tailset[s] = 0;
// var[s] = 0;
// flag[s] = 0;
// }
// /*add to chain containing k (as the last element)*/
// prev[acol] = tailset[svar[k]];
// next[acol] = 0;/*BU next[tailset[svar[k]]];*/
// next[tailset[svar[k]]] = acol;
// tailset[svar[k]] = acol;
// sz[svar[k]] = sz[svar[k]] + 1;
// }
// }
// }
// pos = 1;
// numBlocks = 0;
// for(set = firstset; set != 0; set = setnext[set])
// {
// ptiIndex item = tailset[set];
// ptiIndex headset = 0;
// numBlocks ++;
// while(item != 0 )
// {
// headset = item;
// item = prev[item];
// }
// /*located the head of the set. output them (this is for keeping the initial order*/
// while(headset)
// {
// cprm[pos++] = headset;
// headset = next[headset];
// }
// }
// free(tailset);
// free(sz);
// free(next);
// free(prev);
// free(var);
// free(flag);
// free(svar);
// free(setnext);
// free(setprev);
// if(pos-1 != n){
// printf("**************** Error ***********\n");
// printf("something went wrong and we could not order everyone\n");
// exit(12);
// }
// return ;
// }
/**************************************************************/
#define myAbs(x) (((x) < 0) ? -(x) : (x))
static void ptiLexiOrderPerMode(ptiSparseTensor * tsr, ptiIndex mode, ptiIndex ** orgIds, ptiElementIndex sb_bits, int tk, int impl_num)
{
ptiIndexVector * inds = tsr->inds;
ptiNnzIndex const nnz = tsr->nnz;
ptiIndex const nmodes = tsr->nmodes;
ptiIndex * ndims = tsr->ndims;
ptiIndex const mode_dim = ndims[mode];
ptiNnzIndex * rowPtrs = NULL;
ptiIndex * colIds = NULL;
ptiIndex * cprm = NULL, * invcprm = NULL, * saveOrgIds = NULL;
ptiNnzIndex atRowPlus1, mtxNrows, mtrxNnz;
ptiIndex * mode_order = (ptiIndex *) malloc (sizeof(ptiIndex) * (nmodes - 1));
ptiIndex c;
ptiNnzIndex z;
double t1, t0;
t0 = u_seconds();
ptiIndex i = 0;
for(ptiIndex m = 0; m < nmodes; ++m) {
if (m != mode) {
mode_order[i] = m;
++ i;
}
}
if (impl_num == 1) {
ptiSparseTensorSortIndexExceptSingleMode(tsr, 1, mode_order, tk);
} else if (impl_num == 2) {
ptiSparseTensorSortIndexExceptSingleModeRowBlock(tsr, 1, 0, tsr->nnz, mode_order, sb_bits, tk);
} else if (impl_num == 3) { // Not work
ptiSparseTensorSortIndexExceptSingleModeMorton(tsr, 1, mode_order, sb_bits, tk);
}
// mySort(coords, nnz-1, nmodes, ndims, mode);
t1 = u_seconds()-t0;
printf("mode %u, sort time %.2f\n", mode, t1);
// ptiAssert(ptiDumpSparseTensor(tsr, 0, stdout) == 0);
/* we matricize this (others x thisDim), whose columns will be renumbered */
/* on the matrix all arrays are from 1, and all indices are from 1. */
rowPtrs = (ptiNnzIndex *) malloc(sizeof(ptiNnzIndex) * (nnz + 2)); /*large space*/
colIds = (ptiIndex *) malloc(sizeof(ptiIndex) * (nnz + 2)); /*large space*/
if(rowPtrs == NULL || colIds == NULL)
{
printf("could not allocate.exiting \n");
exit(12);
}
rowPtrs[0] = 0; /* we should not access this, that is why. */
rowPtrs [1] = 1;
colIds[1] = inds[mode].data[0] + 1;
atRowPlus1 = 2;
mtrxNnz = 2;/* start filling from the second element */
t0 = u_seconds();
for (z = 1; z < nnz; z++)
{
int cmp_res;
if (impl_num == 1) {
cmp_res = pti_SparseTensorCompareIndicesExceptSingleMode(tsr, z, tsr, z-1, mode_order);
// cmp_res = pti_SparseTensorCompareIndicesExceptSingleModeCantor(tsr, z, tsr, z-1, mode_order);
} else if (impl_num == 2) {
cmp_res = pti_SparseTensorCompareIndicesExceptSingleModeRowBlock(tsr, z, tsr, z-1, mode_order, sb_bits);
} else if (impl_num == 3) {
cmp_res = pti_SparseTensorCompareIndicesMorton2D(tsr, z, tsr, z-1, mode_order, sb_bits);
}
if(cmp_res != 0)
rowPtrs[atRowPlus1++] = mtrxNnz; /* close the previous row and start a new one. */
colIds[mtrxNnz ++] = inds[mode].data[z] + 1;
}
rowPtrs[atRowPlus1] = mtrxNnz;
mtxNrows = atRowPlus1-1;
t1 =u_seconds()-t0;
printf("mode %u, create time %.2f\n", mode, t1);
rowPtrs = realloc(rowPtrs, (sizeof(ptiNnzIndex) * (mtxNrows + 2)));
cprm = (ptiIndex *) malloc(sizeof(ptiIndex) * (mode_dim + 1));
invcprm = (ptiIndex *) malloc(sizeof(ptiIndex) * (mode_dim + 1));
saveOrgIds = (ptiIndex *) malloc(sizeof(ptiIndex) * (mode_dim + 1));
// printf("rowPtrs: \n");
// ptiDumpNnzIndexArray(rowPtrs, mtxNrows + 2, stdout);
// printf("colIds: \n");
// ptiDumpIndexArray(colIds, nnz + 2, stdout);
t0 = u_seconds();
lexOrderThem(mtxNrows, mode_dim, rowPtrs, colIds, cprm, tk);
t1 =u_seconds()-t0;
printf("mode %u, lexorder time %.2f\n", mode, t1);
// printf("cprm: \n");
// ptiDumpIndexArray(cprm, mode_dim + 1, stdout);
/* update orgIds and modify coords */
for (c=0; c < mode_dim; c++)
{
invcprm[cprm[c+1]-1] = c;
saveOrgIds[c] = orgIds[mode][c];
}
for (c=0; c < mode_dim; c++)
orgIds[mode][c] = saveOrgIds[cprm[c+1]-1];
// printf("invcprm: \n");
// ptiDumpIndexArray(invcprm, mode_dim + 1, stdout);
/* rename the dim component of nonzeros */
for (z = 0; z < nnz; z++)
inds[mode].data[z] = invcprm[inds[mode].data[z]];
// ptiAssert(ptiDumpSparseTensor(tsr, 0, stdout) == 0);
free(saveOrgIds);
free(invcprm);
free(cprm);
free(colIds);
free(rowPtrs);
free(mode_order);
}
/**************************************************************/
typedef struct{
ptiIndex nvrt; /* number of vertices. This nvrt = n_0 + n_1 + ... + n_{d-1} for a d-dimensional tensor
where the ith dimension is of size n_i.*/
ptiNnzIndex *vptrs, *vHids; /*starts of hedges containing vertices, and the ids of the hedges*/
ptiNnzIndex nhdg; /*this will be equal to the number of nonzeros in the tensor*/
ptiNnzIndex *hptrs, *hVids; /*starts of vertices in the hedges, and the ids of the vertices*/
} basicHypergraph;
static void allocateHypergraphData(basicHypergraph *hg, ptiIndex nvrt, ptiNnzIndex nhdg, ptiNnzIndex npins)
{
hg->nvrt = nvrt;
hg->vptrs = (ptiNnzIndex *) malloc(sizeof(ptiNnzIndex) * (nvrt+1));
hg->vHids = (ptiNnzIndex *) malloc(sizeof(ptiNnzIndex) * npins);
hg->nhdg = nhdg;
hg->hptrs = (ptiNnzIndex *) malloc(sizeof(ptiNnzIndex) * (nhdg+1));
hg->hVids = (ptiNnzIndex *) malloc(sizeof(ptiNnzIndex) * npins);
}
static void freeHypergraphData(basicHypergraph *hg)
{
hg->nvrt = 0;
if (hg->vptrs) free(hg->vptrs);
if (hg->vHids) free(hg->vHids);
hg->nhdg = 0;
if (hg->hptrs) free(hg->hptrs);
if (hg->hVids) free(hg->hVids);
}
static void setVList(basicHypergraph *hg)
{
/*PRE: We assume hg->hptrs and hg->hVids are set; hg->nvrts is set, and
hg->vptrs and hg->vHids are allocated appropriately.
*/
ptiNnzIndex j, h, v, nhdg = hg->nhdg;
ptiIndex nvrt = hg->nvrt;
/*vertices */
ptiNnzIndex *vptrs = hg->vptrs, *vHids = hg->vHids;
/*hyperedges*/
ptiNnzIndex *hptrs = hg->hptrs, *hVids = hg->hVids;
for (v = 0; v <= nvrt; v++)
vptrs[v] = 0;
for (h = 0; h < nhdg; h++)
{
for (j = hptrs[h]; j < hptrs[h+1]; j++)
{
v = hVids[j];
vptrs[v] ++;
}
}
for (v=1; v <= nvrt; v++)
vptrs[v] += vptrs[v-1];
for (h = nhdg; h >= 1; h--)
{
for (j = hptrs[h-1]; j < hptrs[h]; j++)
{
v = hVids[j];
vHids[--(vptrs[v])] = h-1;
}
}
}
static void fillHypergraphFromCoo(basicHypergraph *hg, ptiIndex nm, ptiNnzIndex nnz, ptiIndex *ndims, ptiIndexVector * inds)
{
ptiIndex totalSizes;
ptiNnzIndex h, toAddress;
ptiIndex *dimSizesPrefixSum;
ptiIndex i;
dimSizesPrefixSum = (ptiIndex *) malloc(sizeof(ptiIndex) * (nm+1));
totalSizes = 0;
for (i=0; i < nm; i++)
{
dimSizesPrefixSum[i] = totalSizes;
totalSizes += ndims[i];
}
printf("allocating hyp %u %lu\n", nm, nnz);
allocateHypergraphData(hg, totalSizes, nnz, nnz * nm);
toAddress = 0;
for (h = 0; h < nnz; h++)
{
hg->hptrs[h] = toAddress;
for (i = 0; i < nm; i++)
hg->hVids[toAddress + i] = dimSizesPrefixSum[i] + inds[i].data[h];
toAddress += nm;
}
hg->hptrs[hg->nhdg] = toAddress;
setVList(hg);
free(dimSizesPrefixSum);
}
static inline ptiIndex locateVertex(ptiNnzIndex indStart, ptiNnzIndex indEnd, ptiNnzIndex *lst, ptiNnzIndex sz)
{
ptiNnzIndex i;
for (i = 0; i < sz; i++)
if(lst[i] >= indStart && lst[i] <= indEnd)
return lst[i];
printf("could not locate in a hyperedge !!!\n");
exit(1);
return sz+1;
}
#define SIZEV( vid ) vptrs[(vid)+1]-vptrs[(vid)]
static void heapIncreaseKey(ptiIndex *heapIds, ptiNnzIndex *key, ptiNnzIndex *vptrs, ptiIndex sz, ptiIndex id, ptiIndex *inheap, ptiNnzIndex newKey)
{
ptiIndex i = inheap[id]; /*location in heap*/
if( i > 0 && i <=sz )
{
key[id] = newKey;
while ((i>>1)>0 && ( (key[id] > key[heapIds[i>>1]]) ||
(key[id] == key[heapIds[i>>1]] && SIZEV(id) > SIZEV(heapIds[i>>1])))
)
{
heapIds[i] = heapIds[i>>1];
inheap[heapIds[i]] = i;
i = i>>1;
}
heapIds[i] = id;
inheap[id] = i;
}
}
static void heapify(ptiIndex *heapIds, ptiNnzIndex *key, ptiNnzIndex *vptrs, ptiIndex sz, ptiIndex i, ptiIndex *inheap)
{
ptiIndex largest, j, l,r, tmp;
largest = j = i;
while(j<=sz/2)
{
l = 2*j;
r = 2*j + 1;
if ( (key[heapIds[l]] > key[heapIds[j]] ) ||
(key[heapIds[l]] == key[heapIds[j]] && SIZEV(heapIds[l]) < SIZEV(heapIds[j]) )
)
largest = l;
else
largest = j;
if (r<=sz && (key[heapIds[r]]>key[heapIds[largest]] ||
(key[heapIds[r]]==key[heapIds[largest]] && SIZEV(heapIds[r]) < SIZEV(heapIds[largest])))
)
largest = r;
if (largest != j)
{
tmp = heapIds[largest];
heapIds[largest] = heapIds[j];
inheap[heapIds[j]] = largest;
heapIds[j] = tmp;
inheap[heapIds[j]] = j;
j = largest;
}
else
break;
}
}
static ptiIndex heapExtractMax(ptiIndex *heapIds, ptiNnzIndex *key, ptiNnzIndex *vptrs, ptiIndex *sz, ptiIndex *inheap)
{
ptiIndex maxind ;
if (*sz < 1){
printf("Error: heap underflow\n"); exit(12);
}
maxind = heapIds[1];
heapIds[1] = heapIds[*sz];
inheap[heapIds[1]] = 1;
*sz = *sz - 1;
inheap[maxind] = 0;
heapify(heapIds, key, vptrs, *sz, 1, inheap);
return maxind;
}
static void heapBuild(ptiIndex *heapIds, ptiNnzIndex *key, ptiNnzIndex *vptrs, ptiIndex sz, ptiIndex *inheap)
{
ptiIndex i;
for (i=sz/2; i>=1; i--)
heapify(heapIds, key, vptrs, sz, i, inheap);
}
static void orderforHiCOOaDim(basicHypergraph *hg, ptiIndex *newIndicesHg, ptiIndex indStart, ptiIndex indEnd)
{
/* we re-order the vertices of the hypergraph with ids in the range [indStart, indEnd]*/
ptiIndex i, v, heapSz, *inHeap, *heapIds;
ptiNnzIndex j, jj, hedge, hedge2, k, w, ww;
ptiNnzIndex *vptrs = hg->vptrs, *vHids = hg->vHids, *hptrs = hg->hptrs, *hVids = hg->hVids;
ptiNnzIndex *keyvals, newKeyval;
int *markers, mark;
mark = 0;
heapIds = (ptiIndex*) malloc(sizeof(ptiIndex) * (indEnd-indStart + 2));
inHeap = (ptiIndex*) malloc(sizeof(ptiIndex) * hg->nvrt);/*this is large*/
keyvals = (ptiNnzIndex *) malloc(sizeof(ptiNnzIndex) * hg->nvrt);
markers = (int*) malloc(sizeof(int)* hg->nvrt);
heapSz = 0;
for (i = indStart; i<=indEnd; i++)
{
keyvals[i] = 0;
heapIds[++heapSz] = i;
inHeap[i] = heapSz;
markers[i] = -1;
}
heapBuild(heapIds, keyvals, vptrs, heapSz, inHeap);
for (i = indStart; i <= indEnd; i++)
{
v = heapExtractMax(heapIds, keyvals, vptrs, &heapSz, inHeap);
newIndicesHg[v] = i;
markers[v] = mark;
for (j = vptrs[v]; j < vptrs[v+1]; j++)
{
hedge = vHids[j];
for (k = hptrs[hedge]; k < hptrs[hedge+1]; k++)
{
w = hVids[k];
if (markers[w] != mark)
{
markers[w] = mark;
for(jj = vptrs[w]; jj < vptrs[w+1]; jj++)
{
hedge2 = vHids[jj];
ww = locateVertex(indStart, indEnd, hVids + hptrs[hedge2], hptrs[hedge2+1]-hptrs[hedge2]);
if( inHeap[ww] )
{
newKeyval = keyvals[ww] + 1;
heapIncreaseKey(heapIds, keyvals, vptrs, heapSz, ww, inHeap, newKeyval);
}
}
}
}
}
}
free(markers);
free(keyvals);
free(inHeap);
free(heapIds);
}
/**************************************************************/
void ptiBFSLike(ptiSparseTensor * tsr, ptiIndex ** newIndices)
{
/*PRE: newIndices is allocated
POST:
newIndices[0][0...n_0-1] gives the new ids for dim 0
newIndices[1][0...n_1-1] gives the new ids for dim 1
...
newIndices[d-1][0...n_{d-1}-1] gives the new ids for dim d-1
This implements a simple idea close to BFS/Cuthill-McKee/Maximum cardinality search.
*/
ptiIndex const nmodes = tsr->nmodes;
ptiNnzIndex const nnz = tsr->nnz;
ptiIndex * ndims = tsr->ndims;
ptiIndexVector * inds = tsr->inds;
ptiIndex *dimsPrefixSum;
basicHypergraph hg;
ptiIndex *newIndicesHg;
ptiIndex d, i;
dimsPrefixSum = (ptiIndex*) calloc(nmodes, sizeof(ptiIndex));
for (d = 1; d < nmodes; d++)
dimsPrefixSum[d] = ndims[d-1] + dimsPrefixSum[d-1];
fillHypergraphFromCoo(&hg, nmodes, nnz, ndims, inds);
newIndicesHg = (ptiIndex*) malloc(sizeof(ptiIndex) * hg.nvrt);
for (i = 0; i < hg.nvrt; i++)
newIndicesHg[i] = i;
for (d = 0; d < nmodes; d++) /*order d*/
orderforHiCOOaDim(&hg, newIndicesHg, dimsPrefixSum[d], dimsPrefixSum[d] + ndims[d]-1);
/*copy from newIndices to newIndicesOut*/
for (d = 0; d < nmodes; d++)
for (i = 0; i < ndims[d]; i++)
newIndices[d][i] = newIndicesHg[dimsPrefixSum[d] + i] - dimsPrefixSum[d];
free(newIndicesHg);
freeHypergraphData(&hg);
free(dimsPrefixSum);
}
/********************** Internals end *************************/
|
kernels_compact_storage.h | #ifndef _KERNELS_COMPACT_STORAGE_H_
#define _KERNELS_COMPACT_STORAGE_H_
#define COMPUTE_DIAMETER_WITH_POINT( _CAND_PNT_, _CURR_DIST_TO_CLUST_, _I_ ) \
if( (_CAND_PNT_) < 0 ){\
break;\
}\
do{\
int tmp_index = (_I_)*curThreadCount+tid;\
if( (_CAND_PNT_) == seed_point ){\
break;\
}\
_CURR_DIST_TO_CLUST_ = dist_to_clust[ tmp_index ];\
/* if "_CAND_PNT_" is too far away, or already in Ai_mask, or in clustered_points, ignore it. */\
if( (_CURR_DIST_TO_CLUST_ > threshold) || (0 != Ai_mask[(_CAND_PNT_)]) || (0 != clustered_pnts_mask[(_CAND_PNT_)]) ){ \
_CAND_PNT_ = seed_point; /* This is so we don't do the lookup again. */\
break;\
}\
dist_to_new_point = threshold+1;\
/* Find _CAND_PNT_ in the neighborhood of the latest_point.*/\
for(int j=last_index_checked; j<max_degree; j++){\
int tmp_pnt = indr_mtrx[ latest_p_off + j ];\
if( (tmp_pnt > (_CAND_PNT_)) || (tmp_pnt < 0) ){\
last_index_checked = j;\
break;\
}\
if( tmp_pnt == (_CAND_PNT_) ){\
dist_to_new_point = compact_storage_dist_matrix[ latest_p_off + j ];\
break;\
}\
}\
\
/* See if the distance of "_CAND_PNT_" to the "latest_point" is larger */\
/* than the previous, cached distance of "_CAND_PNT_" to the cluster. */\
if(dist_to_new_point > _CURR_DIST_TO_CLUST_){\
diameter = dist_to_new_point;\
dist_to_clust[ tmp_index ] = diameter;\
}else{\
diameter = _CURR_DIST_TO_CLUST_;\
}\
\
/* The point that leads to the cluster with the smallest diameter is the closest point */\
if( diameter < min_dist ){\
min_dist = diameter;\
point_index = (_CAND_PNT_);\
}\
}while(0)
#define FETCH_POINT( _CAND_PNT_ , _I_ )\
{\
int tmp_index = (_I_)*curThreadCount+tid;\
if( tmp_index >= max_degree ){\
break;\
}\
_CAND_PNT_ = indr_mtrx[ seed_p_off + tmp_index ];\
if( (_CAND_PNT_) < 0 ){\
break;\
}\
}
#pragma omp declare target
int generate_candidate_cluster_compact_storage(
float* dist_array,
int* point_index_array,
const int seed_point,
const int degree,
char *Ai_mask,
float *compact_storage_dist_matrix,
char *clustered_pnts_mask,
int *indr_mtrx,
float *dist_to_clust,
const int point_count,
const int N0,
const int max_degree,
int *candidate_cluster,
const float threshold)
{
bool flag;
int cnt, latest_point;
int tid = omp_get_thread_num();
int curThreadCount = omp_get_num_threads();
int seed_p_off;
float curr_dist_to_clust_i;
float curr_dist_to_clust_0, curr_dist_to_clust_1, curr_dist_to_clust_2, curr_dist_to_clust_3;
float curr_dist_to_clust_4, curr_dist_to_clust_5, curr_dist_to_clust_6, curr_dist_to_clust_7;
float curr_dist_to_clust_8, curr_dist_to_clust_9, curr_dist_to_clust_10, curr_dist_to_clust_11;
int cand_pnt_i=-1;
int cand_pnt_0=-1, cand_pnt_1=-1, cand_pnt_2=-1, cand_pnt_3=-1;
int cand_pnt_4=-1, cand_pnt_5=-1, cand_pnt_6=-1, cand_pnt_7=-1;
int cand_pnt_8=-1, cand_pnt_9=-1, cand_pnt_10=-1, cand_pnt_11=-1;
// Cleanup the candidate-cluster-mask, Ai_mask
for(int i=0; i+tid < N0; i+=curThreadCount){
Ai_mask[i+tid] = 0;
}
// Cleanup the "distance cache"
for(int i=0; i+tid < max_degree; i+=curThreadCount){
dist_to_clust[i+tid] = 0;
}
// Put the seed point in the candidate cluster and mark it as taken in the candidate cluster mask Ai_mask.
flag = true;
cnt = 1;
if( 0 == tid ){
if( NULL != candidate_cluster )
candidate_cluster[0] = seed_point;
Ai_mask[seed_point] = 1;
}
#pragma omp barrier
seed_p_off = seed_point*max_degree;
latest_point = seed_point;
// Prefetch 12 points per thread, into registers, to reduce the memory pressure (and delay) of
// constantly going to memory to fetch these points inside the while() loop that follows.
do{
FETCH_POINT( cand_pnt_0, 0 );
FETCH_POINT( cand_pnt_1, 1 );
FETCH_POINT( cand_pnt_2, 2 );
FETCH_POINT( cand_pnt_3, 3 );
FETCH_POINT( cand_pnt_4, 4 );
FETCH_POINT( cand_pnt_5, 5 );
FETCH_POINT( cand_pnt_6, 6 );
FETCH_POINT( cand_pnt_7, 7 );
FETCH_POINT( cand_pnt_8, 8 );
FETCH_POINT( cand_pnt_9, 9 );
FETCH_POINT( cand_pnt_10, 10 );
FETCH_POINT( cand_pnt_11, 11 );
}while(0);
// different threads might exit this loop at different times, so let them catch up.
#pragma omp barrier
while( (cnt < point_count) && flag ){
int point_index = -1;
float min_dist=3*threshold;
int last_index_checked = 0;
float diameter;
float dist_to_new_point;
int latest_p_off = latest_point*max_degree;
do{
COMPUTE_DIAMETER_WITH_POINT( cand_pnt_0, curr_dist_to_clust_0, 0 );
COMPUTE_DIAMETER_WITH_POINT( cand_pnt_1, curr_dist_to_clust_1, 1 );
COMPUTE_DIAMETER_WITH_POINT( cand_pnt_2, curr_dist_to_clust_2, 2 );
COMPUTE_DIAMETER_WITH_POINT( cand_pnt_3, curr_dist_to_clust_3, 3 );
COMPUTE_DIAMETER_WITH_POINT( cand_pnt_4, curr_dist_to_clust_4, 4 );
COMPUTE_DIAMETER_WITH_POINT( cand_pnt_5, curr_dist_to_clust_5, 5 );
COMPUTE_DIAMETER_WITH_POINT( cand_pnt_6, curr_dist_to_clust_6, 6 );
COMPUTE_DIAMETER_WITH_POINT( cand_pnt_7, curr_dist_to_clust_7, 7 );
COMPUTE_DIAMETER_WITH_POINT( cand_pnt_8, curr_dist_to_clust_8, 8 );
COMPUTE_DIAMETER_WITH_POINT( cand_pnt_9, curr_dist_to_clust_9, 9 );
COMPUTE_DIAMETER_WITH_POINT( cand_pnt_10, curr_dist_to_clust_10, 10 );
COMPUTE_DIAMETER_WITH_POINT( cand_pnt_11, curr_dist_to_clust_11, 11 );
}while(0);
// different threads might exit this loop at different times, so let them catch up.
#pragma omp barrier
// The following loop implements the "find point pj s.t. diameter(Ai && pj) is minimum"
for(int i=12; i*curThreadCount+tid < max_degree; i++){
FETCH_POINT( cand_pnt_i, i );
COMPUTE_DIAMETER_WITH_POINT( cand_pnt_i, curr_dist_to_clust_i, i );
}
#pragma omp barrier
//min_G_index = closest_point_reduction(min_dist, threshold, point_index);
dist_array[tid] = min_dist;
point_index_array[tid] = point_index;
#pragma omp barrier
if(tid == 0 ){
for(int j=1; j<curThreadCount; j++){
float dist = dist_array[j];
// look for a point that is closer, or equally far, but with a smaller index.
if( (dist < min_dist) || (dist == min_dist && point_index_array[j] < point_index_array[0]) ){
min_dist = dist;
point_index_array[0] = point_index_array[j];
}
}
if( min_dist > threshold )
point_index_array[0] = -1;
}
#pragma omp barrier
int min_G_index = point_index_array[0];
if(min_G_index >= 0 ){
if( 0 == tid ){
Ai_mask[min_G_index] = 1;
if( NULL != candidate_cluster ){
candidate_cluster[cnt] = min_G_index;
}
}
latest_point = min_G_index;
cnt++;
}else{
flag = false;
}
#pragma omp barrier
}
#pragma omp barrier
return cnt;
}
#pragma omp end declare target
#endif
|
misc.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
#include <stdint.h>
#include "../timer.h"
//------------------------------------------------------------------------------------------------------------------------------
void zero_grid(domain_type * domain, int level, int grid_id){
// zero's the entire grid INCLUDING ghost zones...
uint64_t _timeStart = CycleTime();
int CollaborativeThreadingBoxSize = 100000; // i.e. never
#ifdef __COLLABORATIVE_THREADING
CollaborativeThreadingBoxSize = 1 << __COLLABORATIVE_THREADING;
#endif
int omp_across_boxes = (domain->subdomains[0].levels[level].dim.i < CollaborativeThreadingBoxSize);
int omp_within_a_box = (domain->subdomains[0].levels[level].dim.i >= CollaborativeThreadingBoxSize);
int box;
#pragma omp parallel for private(box) if(omp_across_boxes)
for(box=0;box<domain->subdomains_per_rank;box++){
int i,j,k;
int pencil = domain->subdomains[box].levels[level].pencil;
int plane = domain->subdomains[box].levels[level].plane;
int ghosts = domain->subdomains[box].levels[level].ghosts;
int dim_k = domain->subdomains[box].levels[level].dim.k;
int dim_j = domain->subdomains[box].levels[level].dim.j;
int dim_i = domain->subdomains[box].levels[level].dim.i;
double * __restrict__ grid = domain->subdomains[box].levels[level].grids[grid_id] + ghosts*(1+pencil+plane);
#pragma omp parallel for private(k,j,i) if(omp_within_a_box) collapse(2)
for(k=-ghosts;k<dim_k+ghosts;k++){
for(j=-ghosts;j<dim_j+ghosts;j++){
for(i=-ghosts;i<dim_i+ghosts;i++){
int ijk = i + j*pencil + k*plane;
grid[ijk] = 0.0;
}}}
}
domain->cycles.blas1[level] += (uint64_t)(CycleTime()-_timeStart);
}
//------------------------------------------------------------------------------------------------------------------------------
void initialize_grid_to_scalar(domain_type * domain, int level, int grid_id, double scalar){
// initializes the grid to a scalar while zero'ing the ghost zones...
uint64_t _timeStart = CycleTime();
int CollaborativeThreadingBoxSize = 100000; // i.e. never
#ifdef __COLLABORATIVE_THREADING
CollaborativeThreadingBoxSize = 1 << __COLLABORATIVE_THREADING;
#endif
int omp_across_boxes = (domain->subdomains[0].levels[level].dim.i < CollaborativeThreadingBoxSize);
int omp_within_a_box = (domain->subdomains[0].levels[level].dim.i >= CollaborativeThreadingBoxSize);
int box;
#pragma omp parallel for private(box) if(omp_across_boxes)
for(box=0;box<domain->subdomains_per_rank;box++){
int i,j,k;
int pencil = domain->subdomains[box].levels[level].pencil;
int plane = domain->subdomains[box].levels[level].plane;
int ghosts = domain->subdomains[box].levels[level].ghosts;
int dim_k = domain->subdomains[box].levels[level].dim.k;
int dim_j = domain->subdomains[box].levels[level].dim.j;
int dim_i = domain->subdomains[box].levels[level].dim.i;
double * __restrict__ grid = domain->subdomains[box].levels[level].grids[grid_id] + ghosts*(1+pencil+plane);
#pragma omp parallel for private(k,j,i) if(omp_within_a_box) collapse(2)
for(k=-ghosts;k<dim_k+ghosts;k++){
for(j=-ghosts;j<dim_j+ghosts;j++){
for(i=-ghosts;i<dim_i+ghosts;i++){
int ijk = i + j*pencil + k*plane;
int ghostZone = (i<0) || (j<0) || (k<0) || (i>=dim_i) || (j>=dim_j) || (k>=dim_k);
grid[ijk] = ghostZone ? 0.0 : scalar;
}}}
}
domain->cycles.blas1[level] += (uint64_t)(CycleTime()-_timeStart);
}
//------------------------------------------------------------------------------------------------------------------------------
void add_grids(domain_type * domain, int level, int id_c, double scale_a, int id_a, double scale_b, int id_b){ // c=scale_a*id_a + scale_b*id_b
uint64_t _timeStart = CycleTime();
int CollaborativeThreadingBoxSize = 100000; // i.e. never
#ifdef __COLLABORATIVE_THREADING
CollaborativeThreadingBoxSize = 1 << __COLLABORATIVE_THREADING;
#endif
int omp_across_boxes = (domain->subdomains[0].levels[level].dim.i < CollaborativeThreadingBoxSize);
int omp_within_a_box = (domain->subdomains[0].levels[level].dim.i >= CollaborativeThreadingBoxSize);
int box;
#pragma omp parallel for private(box) if(omp_across_boxes)
for(box=0;box<domain->subdomains_per_rank;box++){
int i,j,k;
int pencil = domain->subdomains[box].levels[level].pencil;
int plane = domain->subdomains[box].levels[level].plane;
int ghosts = domain->subdomains[box].levels[level].ghosts;
int dim_k = domain->subdomains[box].levels[level].dim.k;
int dim_j = domain->subdomains[box].levels[level].dim.j;
int dim_i = domain->subdomains[box].levels[level].dim.i;
double * __restrict__ grid_c = domain->subdomains[box].levels[level].grids[id_c] + ghosts*(1+pencil+plane);
double * __restrict__ grid_a = domain->subdomains[box].levels[level].grids[id_a] + ghosts*(1+pencil+plane);
double * __restrict__ grid_b = domain->subdomains[box].levels[level].grids[id_b] + ghosts*(1+pencil+plane);
#pragma omp parallel for private(k,j,i) if(omp_within_a_box) collapse(2)
for(k=0;k<dim_k;k++){
for(j=0;j<dim_j;j++){
for(i=0;i<dim_i;i++){
int ijk = i + j*pencil + k*plane;
grid_c[ijk] = scale_a*grid_a[ijk] + scale_b*grid_b[ijk];
}}}
}
domain->cycles.blas1[level] += (uint64_t)(CycleTime()-_timeStart);
}
//------------------------------------------------------------------------------------------------------------------------------
void mul_grids(domain_type * domain, int level, int id_c, double scale, int id_a, int id_b){ // id_c=scale*id_a*id_b
uint64_t _timeStart = CycleTime();
int CollaborativeThreadingBoxSize = 100000; // i.e. never
#ifdef __COLLABORATIVE_THREADING
CollaborativeThreadingBoxSize = 1 << __COLLABORATIVE_THREADING;
#endif
int omp_across_boxes = (domain->subdomains[0].levels[level].dim.i < CollaborativeThreadingBoxSize);
int omp_within_a_box = (domain->subdomains[0].levels[level].dim.i >= CollaborativeThreadingBoxSize);
int box;
#pragma omp parallel for private(box) if(omp_across_boxes)
for(box=0;box<domain->subdomains_per_rank;box++){
int i,j,k;
int pencil = domain->subdomains[box].levels[level].pencil;
int plane = domain->subdomains[box].levels[level].plane;
int ghosts = domain->subdomains[box].levels[level].ghosts;
int dim_k = domain->subdomains[box].levels[level].dim.k;
int dim_j = domain->subdomains[box].levels[level].dim.j;
int dim_i = domain->subdomains[box].levels[level].dim.i;
double * __restrict__ grid_c = domain->subdomains[box].levels[level].grids[id_c] + ghosts*(1+pencil+plane);
double * __restrict__ grid_a = domain->subdomains[box].levels[level].grids[id_a] + ghosts*(1+pencil+plane);
double * __restrict__ grid_b = domain->subdomains[box].levels[level].grids[id_b] + ghosts*(1+pencil+plane);
#pragma omp parallel for private(k,j,i) if(omp_within_a_box) collapse(2)
for(k=0;k<dim_k;k++){
for(j=0;j<dim_j;j++){
for(i=0;i<dim_i;i++){
int ijk = i + j*pencil + k*plane;
grid_c[ijk] = scale*grid_a[ijk]*grid_b[ijk];
}}}
}
domain->cycles.blas1[level] += (uint64_t)(CycleTime()-_timeStart);
}
//------------------------------------------------------------------------------------------------------------------------------
void scale_grid(domain_type * domain, int level, int id_c, double scale_a, int id_a){ // c[]=scale_a*a[]
uint64_t _timeStart = CycleTime();
int CollaborativeThreadingBoxSize = 100000; // i.e. never
#ifdef __COLLABORATIVE_THREADING
CollaborativeThreadingBoxSize = 1 << __COLLABORATIVE_THREADING;
#endif
int omp_across_boxes = (domain->subdomains[0].levels[level].dim.i < CollaborativeThreadingBoxSize);
int omp_within_a_box = (domain->subdomains[0].levels[level].dim.i >= CollaborativeThreadingBoxSize);
int box;
#pragma omp parallel for private(box) if(omp_across_boxes)
for(box=0;box<domain->subdomains_per_rank;box++){
int i,j,k;
int pencil = domain->subdomains[box].levels[level].pencil;
int plane = domain->subdomains[box].levels[level].plane;
int ghosts = domain->subdomains[box].levels[level].ghosts;
int dim_k = domain->subdomains[box].levels[level].dim.k;
int dim_j = domain->subdomains[box].levels[level].dim.j;
int dim_i = domain->subdomains[box].levels[level].dim.i;
double * __restrict__ grid_c = domain->subdomains[box].levels[level].grids[id_c] + ghosts*(1+pencil+plane);
double * __restrict__ grid_a = domain->subdomains[box].levels[level].grids[id_a] + ghosts*(1+pencil+plane);
#pragma omp parallel for private(k,j,i) if(omp_within_a_box) collapse(2)
for(k=0;k<dim_k;k++){
for(j=0;j<dim_j;j++){
for(i=0;i<dim_i;i++){
int ijk = i + j*pencil + k*plane;
grid_c[ijk] = scale_a*grid_a[ijk];
}}}
}
domain->cycles.blas1[level] += (uint64_t)(CycleTime()-_timeStart);
}
//------------------------------------------------------------------------------------------------------------------------------
double dot(domain_type * domain, int level, int id_a, int id_b){
uint64_t _timeStart = CycleTime();
int CollaborativeThreadingBoxSize = 100000; // i.e. never
#ifdef __COLLABORATIVE_THREADING
CollaborativeThreadingBoxSize = 1 << __COLLABORATIVE_THREADING;
#endif
int omp_across_boxes = (domain->subdomains[0].levels[level].dim.i < CollaborativeThreadingBoxSize);
int omp_within_a_box = (domain->subdomains[0].levels[level].dim.i >= CollaborativeThreadingBoxSize);
int box;
double a_dot_b_domain = 0.0;
// FIX, schedule(static) is a stand in to guarantee reproducibility...
#pragma omp parallel for private(box) if(omp_across_boxes) reduction(+:a_dot_b_domain) schedule(static)
for(box=0;box<domain->subdomains_per_rank;box++){
int i,j,k;
int pencil = domain->subdomains[box].levels[level].pencil;
int plane = domain->subdomains[box].levels[level].plane;
int ghosts = domain->subdomains[box].levels[level].ghosts;
int dim_k = domain->subdomains[box].levels[level].dim.k;
int dim_j = domain->subdomains[box].levels[level].dim.j;
int dim_i = domain->subdomains[box].levels[level].dim.i;
double * __restrict__ grid_a = domain->subdomains[box].levels[level].grids[id_a] + ghosts*(1+pencil+plane); // i.e. [0] = first non ghost zone point
double * __restrict__ grid_b = domain->subdomains[box].levels[level].grids[id_b] + ghosts*(1+pencil+plane);
double a_dot_b_box = 0.0;
#pragma omp parallel for private(i,j,k) if(omp_within_a_box) collapse(2) reduction(+:a_dot_b_box) schedule(static)
for(k=0;k<dim_k;k++){
for(j=0;j<dim_j;j++){
for(i=0;i<dim_i;i++){
int ijk = i + j*pencil + k*plane;
a_dot_b_box += grid_a[ijk]*grid_b[ijk];
}}}
a_dot_b_domain+=a_dot_b_box;
}
domain->cycles.blas1[level] += (uint64_t)(CycleTime()-_timeStart);
#ifdef __MPI
uint64_t _timeStartAllReduce = CycleTime();
double send = a_dot_b_domain;
MPI_Allreduce(&send,&a_dot_b_domain,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD);
uint64_t _timeEndAllReduce = CycleTime();
domain->cycles.collectives[level] += (uint64_t)(_timeEndAllReduce-_timeStartAllReduce);
domain->cycles.communication[level] += (uint64_t)(_timeEndAllReduce-_timeStartAllReduce);
#endif
return(a_dot_b_domain);
}
//------------------------------------------------------------------------------------------------------------------------------
double norm(domain_type * domain, int level, int grid_id){ // implements the max norm
uint64_t _timeStart = CycleTime();
int CollaborativeThreadingBoxSize = 100000; // i.e. never
#ifdef __COLLABORATIVE_THREADING
CollaborativeThreadingBoxSize = 1 << __COLLABORATIVE_THREADING;
#endif
int omp_across_boxes = (domain->subdomains[0].levels[level].dim.i < CollaborativeThreadingBoxSize);
int omp_within_a_box = (domain->subdomains[0].levels[level].dim.i >= CollaborativeThreadingBoxSize);
int box;
double max_norm = 0.0;
// FIX, schedule(static) is a stand in to guarantee reproducibility...
#pragma omp parallel for private(box) if(omp_across_boxes) reduction(max:max_norm) schedule(static)
for(box=0;box<domain->subdomains_per_rank;box++){
int i,j,k;
int pencil = domain->subdomains[box].levels[level].pencil;
int plane = domain->subdomains[box].levels[level].plane;
int ghosts = domain->subdomains[box].levels[level].ghosts;
int dim_k = domain->subdomains[box].levels[level].dim.k;
int dim_j = domain->subdomains[box].levels[level].dim.j;
int dim_i = domain->subdomains[box].levels[level].dim.i;
double * __restrict__ grid = domain->subdomains[box].levels[level].grids[ grid_id] + ghosts*(1+pencil+plane); // i.e. [0] = first non ghost zone point
double box_norm = 0.0;
#pragma omp parallel for private(i,j,k) if(omp_within_a_box) collapse(2) reduction(max:box_norm) schedule(static)
for(k=0;k<dim_k;k++){
for(j=0;j<dim_j;j++){
for(i=0;i<dim_i;i++){
int ijk = i + j*pencil + k*plane;
double fabs_grid_ijk = fabs(grid[ijk]);
if(fabs_grid_ijk>box_norm){box_norm=fabs_grid_ijk;} // max norm
}}}
if(box_norm>max_norm){max_norm = box_norm;}
} // box list
domain->cycles.blas1[level] += (uint64_t)(CycleTime()-_timeStart);
#ifdef __MPI
uint64_t _timeStartAllReduce = CycleTime();
double send = max_norm;
MPI_Allreduce(&send,&max_norm,1,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD);
uint64_t _timeEndAllReduce = CycleTime();
domain->cycles.collectives[level] += (uint64_t)(_timeEndAllReduce-_timeStartAllReduce);
domain->cycles.communication[level] += (uint64_t)(_timeEndAllReduce-_timeStartAllReduce);
#endif
return(max_norm);
}
//------------------------------------------------------------------------------------------------------------------------------
double mean(domain_type * domain, int level, int id_a){
uint64_t _timeStart = CycleTime();
int CollaborativeThreadingBoxSize = 100000; // i.e. never
#ifdef __COLLABORATIVE_THREADING
CollaborativeThreadingBoxSize = 1 << __COLLABORATIVE_THREADING;
#endif
int omp_across_boxes = (domain->subdomains[0].levels[level].dim.i < CollaborativeThreadingBoxSize);
int omp_within_a_box = (domain->subdomains[0].levels[level].dim.i >= CollaborativeThreadingBoxSize);
int box;
double sum_domain = 0.0;
#pragma omp parallel for private(box) if(omp_across_boxes) reduction(+:sum_domain)
for(box=0;box<domain->subdomains_per_rank;box++){
int i,j,k;
int pencil = domain->subdomains[box].levels[level].pencil;
int plane = domain->subdomains[box].levels[level].plane;
int ghosts = domain->subdomains[box].levels[level].ghosts;
int dim_k = domain->subdomains[box].levels[level].dim.k;
int dim_j = domain->subdomains[box].levels[level].dim.j;
int dim_i = domain->subdomains[box].levels[level].dim.i;
double * __restrict__ grid_a = domain->subdomains[box].levels[level].grids[id_a] + ghosts*(1+pencil+plane); // i.e. [0] = first non ghost zone point
double sum_box = 0.0;
#pragma omp parallel for private(i,j,k) if(omp_within_a_box) collapse(2) reduction(+:sum_box)
for(k=0;k<dim_k;k++){
for(j=0;j<dim_j;j++){
for(i=0;i<dim_i;i++){
int ijk = i + j*pencil + k*plane;
sum_box += grid_a[ijk];
}}}
sum_domain+=sum_box;
}
domain->cycles.blas1[level] += (uint64_t)(CycleTime()-_timeStart);
double ncells_domain = (double)domain->dim.i*(double)domain->dim.j*(double)domain->dim.k;
#ifdef __MPI
uint64_t _timeStartAllReduce = CycleTime();
double send = sum_domain;
MPI_Allreduce(&send,&sum_domain,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD);
uint64_t _timeEndAllReduce = CycleTime();
domain->cycles.collectives[level] += (uint64_t)(_timeEndAllReduce-_timeStartAllReduce);
domain->cycles.communication[level] += (uint64_t)(_timeEndAllReduce-_timeStartAllReduce);
#endif
double mean_domain = sum_domain / ncells_domain;
return(mean_domain);
}
void shift_grid(domain_type * domain, int level, int id_c, int id_a, double shift_a){
uint64_t _timeStart = CycleTime();
int CollaborativeThreadingBoxSize = 100000; // i.e. never
#ifdef __COLLABORATIVE_THREADING
CollaborativeThreadingBoxSize = 1 << __COLLABORATIVE_THREADING;
#endif
int omp_across_boxes = (domain->subdomains[0].levels[level].dim.i < CollaborativeThreadingBoxSize);
int omp_within_a_box = (domain->subdomains[0].levels[level].dim.i >= CollaborativeThreadingBoxSize);
int box;
#pragma omp parallel for private(box) if(omp_across_boxes)
for(box=0;box<domain->subdomains_per_rank;box++){
int i,j,k;
int pencil = domain->subdomains[box].levels[level].pencil;
int plane = domain->subdomains[box].levels[level].plane;
int ghosts = domain->subdomains[box].levels[level].ghosts;
int dim_k = domain->subdomains[box].levels[level].dim.k;
int dim_j = domain->subdomains[box].levels[level].dim.j;
int dim_i = domain->subdomains[box].levels[level].dim.i;
double * __restrict__ grid_c = domain->subdomains[box].levels[level].grids[id_c] + ghosts*(1+pencil+plane); // i.e. [0] = first non ghost zone point
double * __restrict__ grid_a = domain->subdomains[box].levels[level].grids[id_a] + ghosts*(1+pencil+plane); // i.e. [0] = first non ghost zone point
#pragma omp parallel for private(i,j,k) if(omp_within_a_box) collapse(2)
for(k=0;k<dim_k;k++){
for(j=0;j<dim_j;j++){
for(i=0;i<dim_i;i++){
int ijk = i + j*pencil + k*plane;
grid_c[ijk] = grid_a[ijk] + shift_a;
}}}
}
domain->cycles.blas1[level] += (uint64_t)(CycleTime()-_timeStart);
}
//------------------------------------------------------------------------------------------------------------------------------
void project_cell_to_face(domain_type * domain, int level, int id_cell, int id_face, int dir){
uint64_t _timeStart = CycleTime();
int CollaborativeThreadingBoxSize = 100000; // i.e. never
#ifdef __COLLABORATIVE_THREADING
CollaborativeThreadingBoxSize = 1 << __COLLABORATIVE_THREADING;
#endif
int omp_across_boxes = (domain->subdomains[0].levels[level].dim.i < CollaborativeThreadingBoxSize);
int omp_within_a_box = (domain->subdomains[0].levels[level].dim.i >= CollaborativeThreadingBoxSize);
int box;
#pragma omp parallel for private(box) if(omp_across_boxes)
for(box=0;box<domain->subdomains_per_rank;box++){
int i,j,k;
int pencil = domain->subdomains[box].levels[level].pencil;
int plane = domain->subdomains[box].levels[level].plane;
int ghosts = domain->subdomains[box].levels[level].ghosts;
int dim_k = domain->subdomains[box].levels[level].dim.k;
int dim_j = domain->subdomains[box].levels[level].dim.j;
int dim_i = domain->subdomains[box].levels[level].dim.i;
double * __restrict__ grid_cell = domain->subdomains[box].levels[level].grids[id_cell] + ghosts*(1+pencil+plane);
double * __restrict__ grid_face = domain->subdomains[box].levels[level].grids[id_face] + ghosts*(1+pencil+plane);
int stride;
switch(dir){
case 0: stride = 1;break;//i-direction
case 1: stride = pencil;break;//j-direction
case 2: stride = plane;break;//k-direction
}
#pragma omp parallel for private(k,j,i) if(omp_within_a_box) collapse(2)
for(k=0;k<=dim_k;k++){ // <= to ensure you do low and high faces
for(j=0;j<=dim_j;j++){
for(i=0;i<=dim_i;i++){
int ijk = i + j*pencil + k*plane;
grid_face[ijk] = 0.5*(grid_cell[ijk-stride] + grid_cell[ijk]); // simple linear interpolation
}}}
}
domain->cycles.blas1[level] += (uint64_t)(CycleTime()-_timeStart);
}
|
x_solve.c | //-------------------------------------------------------------------------//
// //
// This benchmark is an OpenMP C version of the NPB SP code. This OpenMP //
// C version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the OpenMP Fortran versions in //
// "NPB3.3-OMP" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this OpenMP C version to //
// cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include "header.h"
//---------------------------------------------------------------------
// this function performs the solution of the approximate factorization
// step in the x-direction for all five matrix components
// simultaneously. The Thomas algorithm is employed to solve the
// systems for the x-lines. Boundary conditions are non-periodic
//---------------------------------------------------------------------
void x_solve()
{
int i, j, k, i1, i2, m;
double ru1, fac1, fac2;
#pragma omp parallel for default(shared) private(i,j,k,i1,i2,m, \
ru1,fac1,fac2)
for (k = 1; k <= nz2; k++) {
lhsinit(nx2+1, ny2);
//---------------------------------------------------------------------
// Computes the left hand side for the three x-factors
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// first fill the lhs for the u-eigenvalue
//---------------------------------------------------------------------
for (j = 1; j <= ny2; j++) {
for (i = 0; i <= grid_points[0]-1; i++) {
ru1 = c3c4*rho_i[k][j][i];
cv[i] = us[k][j][i];
rhon[i] = max(max(dx2+con43*ru1,dx5+c1c5*ru1), max(dxmax+ru1,dx1));
}
for (i = 1; i <= nx2; i++) {
lhs[j][i][0] = 0.0;
lhs[j][i][1] = -dttx2 * cv[i-1] - dttx1 * rhon[i-1];
lhs[j][i][2] = 1.0 + c2dttx1 * rhon[i];
lhs[j][i][3] = dttx2 * cv[i+1] - dttx1 * rhon[i+1];
lhs[j][i][4] = 0.0;
}
}
//---------------------------------------------------------------------
// add fourth order dissipation
//---------------------------------------------------------------------
for (j = 1; j <= ny2; j++) {
i = 1;
lhs[j][i][2] = lhs[j][i][2] + comz5;
lhs[j][i][3] = lhs[j][i][3] - comz4;
lhs[j][i][4] = lhs[j][i][4] + comz1;
lhs[j][i+1][1] = lhs[j][i+1][1] - comz4;
lhs[j][i+1][2] = lhs[j][i+1][2] + comz6;
lhs[j][i+1][3] = lhs[j][i+1][3] - comz4;
lhs[j][i+1][4] = lhs[j][i+1][4] + comz1;
}
for (j = 1; j <= ny2; j++) {
for (i = 3; i <= grid_points[0]-4; i++) {
lhs[j][i][0] = lhs[j][i][0] + comz1;
lhs[j][i][1] = lhs[j][i][1] - comz4;
lhs[j][i][2] = lhs[j][i][2] + comz6;
lhs[j][i][3] = lhs[j][i][3] - comz4;
lhs[j][i][4] = lhs[j][i][4] + comz1;
}
}
for (j = 1; j <= ny2; j++) {
i = grid_points[0]-3;
lhs[j][i][0] = lhs[j][i][0] + comz1;
lhs[j][i][1] = lhs[j][i][1] - comz4;
lhs[j][i][2] = lhs[j][i][2] + comz6;
lhs[j][i][3] = lhs[j][i][3] - comz4;
lhs[j][i+1][0] = lhs[j][i+1][0] + comz1;
lhs[j][i+1][1] = lhs[j][i+1][1] - comz4;
lhs[j][i+1][2] = lhs[j][i+1][2] + comz5;
}
//---------------------------------------------------------------------
// subsequently, fill the other factors (u+c), (u-c) by adding to
// the first
//---------------------------------------------------------------------
for (j = 1; j <= ny2; j++) {
for (i = 1; i <= nx2; i++) {
lhsp[j][i][0] = lhs[j][i][0];
lhsp[j][i][1] = lhs[j][i][1] - dttx2 * speed[k][j][i-1];
lhsp[j][i][2] = lhs[j][i][2];
lhsp[j][i][3] = lhs[j][i][3] + dttx2 * speed[k][j][i+1];
lhsp[j][i][4] = lhs[j][i][4];
lhsm[j][i][0] = lhs[j][i][0];
lhsm[j][i][1] = lhs[j][i][1] + dttx2 * speed[k][j][i-1];
lhsm[j][i][2] = lhs[j][i][2];
lhsm[j][i][3] = lhs[j][i][3] - dttx2 * speed[k][j][i+1];
lhsm[j][i][4] = lhs[j][i][4];
}
}
//---------------------------------------------------------------------
// FORWARD ELIMINATION
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// perform the Thomas algorithm; first, FORWARD ELIMINATION
//---------------------------------------------------------------------
for (j = 1; j <= ny2; j++) {
for (i = 0; i <= grid_points[0]-3; i++) {
i1 = i + 1;
i2 = i + 2;
fac1 = 1.0/lhs[j][i][2];
lhs[j][i][3] = fac1*lhs[j][i][3];
lhs[j][i][4] = fac1*lhs[j][i][4];
for (m = 0; m < 3; m++) {
rhs[k][j][i][m] = fac1*rhs[k][j][i][m];
}
lhs[j][i1][2] = lhs[j][i1][2] - lhs[j][i1][1]*lhs[j][i][3];
lhs[j][i1][3] = lhs[j][i1][3] - lhs[j][i1][1]*lhs[j][i][4];
for (m = 0; m < 3; m++) {
rhs[k][j][i1][m] = rhs[k][j][i1][m] - lhs[j][i1][1]*rhs[k][j][i][m];
}
lhs[j][i2][1] = lhs[j][i2][1] - lhs[j][i2][0]*lhs[j][i][3];
lhs[j][i2][2] = lhs[j][i2][2] - lhs[j][i2][0]*lhs[j][i][4];
for (m = 0; m < 3; m++) {
rhs[k][j][i2][m] = rhs[k][j][i2][m] - lhs[j][i2][0]*rhs[k][j][i][m];
}
}
}
//---------------------------------------------------------------------
// The last two rows in this grid block are a bit different,
// since they for (not have two more rows available for the
// elimination of off-diagonal entries
//---------------------------------------------------------------------
for (j = 1; j <= ny2; j++) {
i = grid_points[0]-2;
i1 = grid_points[0]-1;
fac1 = 1.0/lhs[j][i][2];
lhs[j][i][3] = fac1*lhs[j][i][3];
lhs[j][i][4] = fac1*lhs[j][i][4];
for (m = 0; m < 3; m++) {
rhs[k][j][i][m] = fac1*rhs[k][j][i][m];
}
lhs[j][i1][2] = lhs[j][i1][2] - lhs[j][i1][1]*lhs[j][i][3];
lhs[j][i1][3] = lhs[j][i1][3] - lhs[j][i1][1]*lhs[j][i][4];
for (m = 0; m < 3; m++) {
rhs[k][j][i1][m] = rhs[k][j][i1][m] - lhs[j][i1][1]*rhs[k][j][i][m];
}
//---------------------------------------------------------------------
// scale the last row immediately
//---------------------------------------------------------------------
fac2 = 1.0/lhs[j][i1][2];
for (m = 0; m < 3; m++) {
rhs[k][j][i1][m] = fac2*rhs[k][j][i1][m];
}
}
//---------------------------------------------------------------------
// for (the u+c and the u-c factors
//---------------------------------------------------------------------
for (j = 1; j <= ny2; j++) {
for (i = 0; i <= grid_points[0]-3; i++) {
i1 = i + 1;
i2 = i + 2;
m = 3;
fac1 = 1.0/lhsp[j][i][2];
lhsp[j][i][3] = fac1*lhsp[j][i][3];
lhsp[j][i][4] = fac1*lhsp[j][i][4];
rhs[k][j][i][m] = fac1*rhs[k][j][i][m];
lhsp[j][i1][2] = lhsp[j][i1][2] - lhsp[j][i1][1]*lhsp[j][i][3];
lhsp[j][i1][3] = lhsp[j][i1][3] - lhsp[j][i1][1]*lhsp[j][i][4];
rhs[k][j][i1][m] = rhs[k][j][i1][m] - lhsp[j][i1][1]*rhs[k][j][i][m];
lhsp[j][i2][1] = lhsp[j][i2][1] - lhsp[j][i2][0]*lhsp[j][i][3];
lhsp[j][i2][2] = lhsp[j][i2][2] - lhsp[j][i2][0]*lhsp[j][i][4];
rhs[k][j][i2][m] = rhs[k][j][i2][m] - lhsp[j][i2][0]*rhs[k][j][i][m];
m = 4;
fac1 = 1.0/lhsm[j][i][2];
lhsm[j][i][3] = fac1*lhsm[j][i][3];
lhsm[j][i][4] = fac1*lhsm[j][i][4];
rhs[k][j][i][m] = fac1*rhs[k][j][i][m];
lhsm[j][i1][2] = lhsm[j][i1][2] - lhsm[j][i1][1]*lhsm[j][i][3];
lhsm[j][i1][3] = lhsm[j][i1][3] - lhsm[j][i1][1]*lhsm[j][i][4];
rhs[k][j][i1][m] = rhs[k][j][i1][m] - lhsm[j][i1][1]*rhs[k][j][i][m];
lhsm[j][i2][1] = lhsm[j][i2][1] - lhsm[j][i2][0]*lhsm[j][i][3];
lhsm[j][i2][2] = lhsm[j][i2][2] - lhsm[j][i2][0]*lhsm[j][i][4];
rhs[k][j][i2][m] = rhs[k][j][i2][m] - lhsm[j][i2][0]*rhs[k][j][i][m];
}
}
//---------------------------------------------------------------------
// And again the last two rows separately
//---------------------------------------------------------------------
for (j = 1; j <= ny2; j++) {
i = grid_points[0]-2;
i1 = grid_points[0]-1;
m = 3;
fac1 = 1.0/lhsp[j][i][2];
lhsp[j][i][3] = fac1*lhsp[j][i][3];
lhsp[j][i][4] = fac1*lhsp[j][i][4];
rhs[k][j][i][m] = fac1*rhs[k][j][i][m];
lhsp[j][i1][2] = lhsp[j][i1][2] - lhsp[j][i1][1]*lhsp[j][i][3];
lhsp[j][i1][3] = lhsp[j][i1][3] - lhsp[j][i1][1]*lhsp[j][i][4];
rhs[k][j][i1][m] = rhs[k][j][i1][m] - lhsp[j][i1][1]*rhs[k][j][i][m];
m = 4;
fac1 = 1.0/lhsm[j][i][2];
lhsm[j][i][3] = fac1*lhsm[j][i][3];
lhsm[j][i][4] = fac1*lhsm[j][i][4];
rhs[k][j][i][m] = fac1*rhs[k][j][i][m];
lhsm[j][i1][2] = lhsm[j][i1][2] - lhsm[j][i1][1]*lhsm[j][i][3];
lhsm[j][i1][3] = lhsm[j][i1][3] - lhsm[j][i1][1]*lhsm[j][i][4];
rhs[k][j][i1][m] = rhs[k][j][i1][m] - lhsm[j][i1][1]*rhs[k][j][i][m];
//---------------------------------------------------------------------
// Scale the last row immediately
//---------------------------------------------------------------------
rhs[k][j][i1][3] = rhs[k][j][i1][3]/lhsp[j][i1][2];
rhs[k][j][i1][4] = rhs[k][j][i1][4]/lhsm[j][i1][2];
}
//---------------------------------------------------------------------
// BACKSUBSTITUTION
//---------------------------------------------------------------------
for (j = 1; j <= ny2; j++) {
i = grid_points[0]-2;
i1 = grid_points[0]-1;
for (m = 0; m < 3; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - lhs[j][i][3]*rhs[k][j][i1][m];
}
rhs[k][j][i][3] = rhs[k][j][i][3] - lhsp[j][i][3]*rhs[k][j][i1][3];
rhs[k][j][i][4] = rhs[k][j][i][4] - lhsm[j][i][3]*rhs[k][j][i1][4];
}
//---------------------------------------------------------------------
// The first three factors
//---------------------------------------------------------------------
for (j = 1; j <= ny2; j++) {
for (i = grid_points[0]-3; i >= 0; i--) {
i1 = i + 1;
i2 = i + 2;
for (m = 0; m < 3; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] -
lhs[j][i][3]*rhs[k][j][i1][m] -
lhs[j][i][4]*rhs[k][j][i2][m];
}
//-------------------------------------------------------------------
// And the remaining two
//-------------------------------------------------------------------
rhs[k][j][i][3] = rhs[k][j][i][3] -
lhsp[j][i][3]*rhs[k][j][i1][3] -
lhsp[j][i][4]*rhs[k][j][i2][3];
rhs[k][j][i][4] = rhs[k][j][i][4] -
lhsm[j][i][3]*rhs[k][j][i1][4] -
lhsm[j][i][4]*rhs[k][j][i2][4];
}
}
}
//---------------------------------------------------------------------
// Do the block-diagonal inversion
//---------------------------------------------------------------------
ninvr();
}
|
GB_unaryop__abs_int32_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int32_int64
// op(A') function: GB_tran__abs_int32_int64
// C type: int32_t
// A type: int64_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, x) \
int32_t z = (int32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT32 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int32_int64
(
int32_t *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int32_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
comm.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
*/
#ifndef MXNET_KVSTORE_COMM_H_
#define MXNET_KVSTORE_COMM_H_
#include <dmlc/omp.h>
#include <string>
#include <algorithm>
#include <utility>
#include <limits>
#include <vector>
#include <tuple>
#include <thread>
#include "mxnet/ndarray.h"
#include "../ndarray/ndarray_function.h"
#include "../operator/tensor/sparse_retain-inl.h"
namespace mxnet {
namespace kvstore {
/**
* \brief multiple device commmunication
*/
class Comm {
public:
Comm() {
pinned_ctx_ = Context::CPUPinned(0);
}
virtual ~Comm() { }
/**
* \brief init key with the data shape and storage shape
*/
virtual void Init(int key, const NDArrayStorageType stype,
const TShape& shape, int dtype = mshadow::kFloat32) = 0;
/**
* \brief returns src[0] + .. + src[src.size()-1]
*/
virtual const NDArray& Reduce(
int key, const std::vector<NDArray>& src, int priority) = 0;
/**
* \brief copy from src to dst[i] for every i
*/
virtual void Broadcast(
int key, const NDArray& src,
const std::vector<NDArray*> dst, int priority) = 0;
/**
* \brief broadcast src to dst[i] with target row_ids for every i
* \param dst a list of destination row_sparse NDArray and its target row_ids to broadcast,
where the row_ids are expected to be unique and sorted
* \param use_copy if set to true, directly copy src to dst[i] without looking up the
provided row_ids
*/
virtual void BroadcastRowSparse(int key, const NDArray& src,
const std::vector<std::pair<NDArray*, NDArray>>& dst,
const bool use_copy,
const int priority) = 0;
/**
* \brief return a pinned contex
*/
Context pinned_ctx() const {
return pinned_ctx_;
}
protected:
Context pinned_ctx_;
};
/**
* \brief an implemention of Comm that first copy data to CPU memeory, and then
* reduce there
*/
class CommCPU : public Comm {
public:
CommCPU() {
nthread_reduction_ = dmlc::GetEnv("MXNET_KVSTORE_REDUCTION_NTHREADS", 4);
bigarray_bound_ = dmlc::GetEnv("MXNET_KVSTORE_BIGARRAY_BOUND", 1000 * 1000);
// TODO(junwu) delete the following data member, now for benchmark only
is_serial_push_ = dmlc::GetEnv("MXNET_KVSTORE_SERIAL_PUSH", 0);
}
virtual ~CommCPU() { }
void Init(int key, const NDArrayStorageType stype, const TShape& shape,
int type = mshadow::kFloat32) override {
if (stype == kDefaultStorage) {
merge_buf_[key].merged = NDArray(shape, pinned_ctx_, false, type);
} else {
merge_buf_[key].merged = NDArray(stype, shape, pinned_ctx_, true, type);
}
}
const NDArray& Reduce(int key, const std::vector<NDArray>& src,
int priority) override {
auto& buf = merge_buf_[key];
// avoid extra copy for single device, but it may bring problems for
// abnormal usage of kvstore
if (src.size() == 1) {
if (src[0].storage_type() == kDefaultStorage) {
return src[0];
} else { // if sparse and only one GPU, always update weight on CPU
CopyFromTo(src[0], &buf.merged, priority);
return buf.merged;
}
}
if (buf.merged.storage_type() == kDefaultStorage) {
std::vector<Engine::VarHandle> const_vars(src.size() - 1);
std::vector<NDArray> reduce(src.size());
CopyFromTo(src[0], &buf.merged, priority);
reduce[0] = buf.merged;
if (buf.copy_buf.empty()) {
buf.copy_buf.resize(src.size()-1);
for (size_t j = 0; j < src.size() - 1; ++j) {
// allocate NDArray basd on storage type
buf.copy_buf[j] = NDArray(
src[0].shape(), pinned_ctx_, false, src[0].dtype());
}
}
for (size_t i = 1; i < src.size(); ++i) {
CopyFromTo(src[i], &(buf.copy_buf[i-1]), priority);
reduce[i] = buf.copy_buf[i-1];
const_vars[i-1] = reduce[i].var();
}
Engine::Get()->PushSync([reduce, this](RunContext rctx) {
ReduceSumCPU(reduce);
}, Context::CPU(), const_vars, {reduce[0].var()},
FnProperty::kCPUPrioritized, priority, PROFILER_MESSAGE("KVStoreReduce"));
} else {
// buf.merged is a sparse ndarray.
std::vector<Engine::VarHandle> const_vars(src.size());
std::vector<NDArray> reduce(src.size());
if (buf.copy_buf.empty()) {
buf.copy_buf.resize(src.size());
for (size_t j = 0; j < src.size(); ++j) {
buf.copy_buf[j] = NDArray(
src[0].storage_type(), src[0].shape(), pinned_ctx_, true, src[0].dtype());
}
}
for (size_t i = 0; i < src.size(); ++i) {
CopyFromTo(src[i], &(buf.copy_buf[i]), priority);
reduce[i] = buf.copy_buf[i];
const_vars[i] = reduce[i].var();
}
auto result = buf.merged;
Engine::Get()->PushSync([reduce, result, this](RunContext rctx) {
NDArray out = result;
Resource rsc = ResourceManager::Get()->Request(rctx.ctx,
ResourceRequest(ResourceRequest::kTempSpace));
is_serial_push_?
ReduceSumCPUExSerial(reduce, &out)
: mxnet::ndarray::ElementwiseSum(rctx.get_stream<cpu>(), rsc, reduce, &out);
}, Context::CPU(), const_vars, {result.var()},
FnProperty::kCPUPrioritized, priority, PROFILER_MESSAGE("KVStoreReduce"));
}
return buf.merged;
}
void Broadcast(int key, const NDArray& src,
const std::vector<NDArray*> dst, int priority) override {
int mask = src.ctx().dev_mask();
if (mask == Context::kCPU) {
for (auto d : dst) CopyFromTo(src, d, priority);
} else {
// first copy data to cpu, then broadcast
auto& buf = merge_buf_[key];
CopyFromTo(src, &buf.merged, priority);
for (auto d : dst) CopyFromTo(buf.merged, d, priority);
}
}
void BroadcastRowSparse(int key, const NDArray& src,
const std::vector<std::pair<NDArray*, NDArray>>& dst,
const bool use_copy,
const int priority) override {
using namespace mshadow;
CHECK_EQ(src.storage_type(), kRowSparseStorage)
<< "BroadcastRowSparse expects row-sparse src NDArray";
CHECK_EQ(src.ctx().dev_mask(), Context::kCPU)
<< "BroadcastRowSparse with src on gpu context not supported";
for (size_t i = 0; i < dst.size(); ++i) {
NDArray* out = dst[i].first;
NDArray row_id = dst[i].second;
if (use_copy) {
CopyFromTo(src, out, priority);
} else {
CHECK_EQ(out->storage_type(), kRowSparseStorage)
<< "BroadcastRowSparse expects row_sparse dst NDArray";
CHECK_EQ(row_id.ctx().dev_mask(), Context::kCPU)
<< "BroadcastRowSparse with row_indices on gpu context not supported";
// retain according to unique indices
const bool use_sparse_retain = (src.shape()[0] != src.storage_shape()[0])
|| (row_id.dtype() != out->aux_type(rowsparse::kIdx))
|| (out->ctx().dev_mask() != Context::kGPU);
if (use_sparse_retain) { // use sparse_retain op
const bool is_to_gpu = out->ctx().dev_mask() == Context::kGPU;
NDArray out_cpu = is_to_gpu? NDArray(kRowSparseStorage, src.shape(),
src.ctx(), true, src.dtype(), src.aux_types()) : *out;
Engine::Get()->PushSync([=](RunContext rctx) {
const TBlob& indices = row_id.data();
NDArray temp = out_cpu; // get rid of const qualifier
op::SparseRetainOpForwardRspImpl<cpu>(rctx.get_stream<cpu>(),
src, indices, kWriteTo,
&temp);
}, Context::CPU(), {src.var(), row_id.var()}, {out_cpu.var()},
FnProperty::kNormal, priority, PROFILER_MESSAGE("KVStoreSparseRetain"));
if (is_to_gpu) {
CopyFromTo(out_cpu, out, priority);
}
} else { // direct copy rows
Engine::Get()->PushSync([=](RunContext rctx) {
CopyRetainedRowsToGPU(rctx.get_stream<cpu>(), rctx.get_stream<gpu>(),
src, row_id, out);
}, out->ctx(), {src.var(), row_id.var()}, {out->var()},
FnProperty::kCopyToGPU, priority, PROFILER_MESSAGE("KVStoreCopyRetainedRowsToGPU"));
}
}
}
}
private:
/*!
* \brief When src is a rsp with full rows,
* simply copy retained rows directly from cpu to gpu
* without invoking sparse_retain op.
*/
void CopyRetainedRowsToGPU(mshadow::Stream<cpu>* cpu_stream,
mshadow::Stream<gpu>* gpu_stream,
const NDArray& src,
const NDArray& indices,
NDArray* dst) {
#if MXNET_USE_CUDA == 1
CHECK_EQ(src.storage_type(), kRowSparseStorage)
<< "CopyRetainedRowsToGPU expects row-sparse src NDArray";
CHECK_EQ(src.ctx().dev_mask(), Context::kCPU)
<< "CopyRetainedRowsToGPU with src on gpu context not supported";
CHECK_EQ(src.storage_shape()[0], src.shape()[0])
<< "CopyRetainedRowsToGPU only supports src rsp with full rows";
CHECK_EQ(indices.storage_type(), kDefaultStorage);
CHECK_EQ(indices.ctx().dev_mask(), Context::kCPU);
CHECK_EQ(dst->storage_type(), kRowSparseStorage);
CHECK_EQ(dst->ctx().dev_mask(), Context::kGPU);
CHECK_EQ(indices.dtype(), dst->aux_type(rowsparse::kIdx))
<< "CopyRetainedRowsToGPU only supports same data type for idx array and dst aux_data(0)";
if (!src.storage_initialized() || indices.data().Size() == 0U) {
op::FillZerosRspImpl(gpu_stream, dst);
return;
}
using namespace mshadow;
const TBlob& src_data = src.data();
const TBlob& idx_data = indices.data();
const size_t row_length = src.shape().ProdShape(1, src.shape().ndim());
const size_t num_rows_retained = idx_data.Size();
dst->CheckAndAlloc({Shape1(num_rows_retained)});
TBlob dst_data = dst->data();
TBlob dst_idx_data = dst->aux_data(rowsparse::kIdx);
MSHADOW_TYPE_SWITCH(src.dtype(), DType, {
MSHADOW_IDX_TYPE_SWITCH(indices.dtype(), IType, {
// copy idx array
Tensor<gpu, 1, IType> dst_idx_tensor = dst_idx_data.FlatTo1D<gpu, IType>(gpu_stream);
const Tensor<cpu, 1, IType> idx_tensor = idx_data.FlatTo1D<cpu, IType>(cpu_stream);
Copy(dst_idx_tensor, idx_tensor, gpu_stream);
// copy src data
const Tensor<cpu, 2, DType> src_data_tensor = src_data.get_with_shape<cpu, 2, DType>(
Shape2(src_data.shape_[0], row_length), cpu_stream);
Tensor<gpu, 2, DType> dst_data_tensor = dst_data.get_with_shape<gpu, 2, DType>(
Shape2(dst_data.shape_[0], row_length), gpu_stream);
for (size_t i = 0; i < num_rows_retained; ++i) {
Copy(dst_data_tensor[i], src_data_tensor[idx_tensor[i]], gpu_stream);
}
})
})
#else
LOG(FATAL) << "GPU not enabled";
#endif
}
// reduce sum into val[0]
inline void ReduceSumCPU(const std::vector<NDArray> &in_data) {
MSHADOW_TYPE_SWITCH(in_data[0].dtype(), DType, {
std::vector<DType*> dptr(in_data.size());
for (size_t i = 0; i < in_data.size(); ++i) {
TBlob data = in_data[i].data();
CHECK(data.CheckContiguous());
dptr[i] = data.FlatTo2D<cpu, DType>().dptr_;
}
size_t total = in_data[0].shape().Size();
ReduceSumCPUImpl(dptr, total);
});
}
// serial implementation of reduce sum for row sparse NDArray.
inline void ReduceSumCPUExSerial(const std::vector<NDArray> &in, NDArray *out) {
using namespace rowsparse;
using namespace mshadow;
auto stype = out->storage_type();
CHECK_EQ(stype, kRowSparseStorage) << "Unexpected storage type " << stype;
size_t total_num_rows = 0;
size_t num_in = in.size();
// skip the ones with empty indices and values
std::vector<bool> skip(num_in, false);
// the values tensor of the inputs
MSHADOW_TYPE_SWITCH(out->dtype(), DType, {
MSHADOW_IDX_TYPE_SWITCH(out->aux_type(kIdx), IType, {
std::vector<Tensor<cpu, 2, DType>> in_vals(num_in);
std::vector<Tensor<cpu, 1, IType>> in_indices(num_in);
// offset to the values tensor of all inputs
std::vector<size_t> offsets(num_in, 0);
std::vector<size_t> num_rows(num_in, 0);
for (size_t i = 0; i < num_in; i++) {
if (!in[i].storage_initialized()) {
skip[i] = true;
continue;
}
auto size = in[i].aux_shape(kIdx).Size();
num_rows[i] = size;
total_num_rows += size;
in_vals[i] = in[i].data().FlatTo2D<cpu, DType>();
in_indices[i] = in[i].aux_data(kIdx).FlatTo1D<cpu, IType>();
}
std::vector<IType> indices;
indices.reserve(total_num_rows);
// gather indices from all inputs
for (size_t i = 0; i < num_in; i++) {
for (size_t j = 0; j < num_rows[i]; j++) {
indices.emplace_back(in_indices[i][j]);
}
}
CHECK_EQ(indices.size(), total_num_rows);
// dedup indices
std::sort(indices.begin(), indices.end());
indices.resize(std::unique(indices.begin(), indices.end()) - indices.begin());
// the one left are unique non-zero rows
size_t nnr = indices.size();
// allocate memory for output
out->CheckAndAlloc({Shape1(nnr)});
auto idx_data = out->aux_data(kIdx).FlatTo1D<cpu, IType>();
auto val_data = out->data().FlatTo2D<cpu, DType>();
for (size_t i = 0; i < nnr; i++) {
// copy indices back
idx_data[i] = indices[i];
bool zeros = true;
for (size_t j = 0; j < num_in; j++) {
if (skip[j]) continue;
size_t offset = offsets[j];
if (offset < num_rows[j]) {
if (indices[i] == in_indices[j][offset]) {
if (zeros) {
Copy(val_data[i], in_vals[j][offset], nullptr);
zeros = false;
} else {
val_data[i] += in_vals[j][offset];
}
offsets[j] += 1;
}
}
}
}
});
});
}
template<typename DType>
inline static void ReduceSumCPU(
const std::vector<DType*> &dptr, size_t offset, index_t size) {
using namespace mshadow; // NOLINT(*)
Tensor<cpu, 1, DType> in_0(dptr[0] + offset, Shape1(size));
for (size_t i = 1; i < dptr.size(); i+=4) {
switch (dptr.size() - i) {
case 1: {
Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size));
in_0 += in_1;
break;
}
case 2: {
Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size));
in_0 += in_1 + in_2;
break;
}
case 3: {
Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size));
in_0 += in_1 + in_2 + in_3;
break;
}
default: {
Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_4(dptr[i+3] + offset, Shape1(size));
in_0 += in_1 + in_2 + in_3 + in_4;
break;
}
}
}
}
template<typename DType>
inline void ReduceSumCPUImpl(std::vector<DType*> dptr, size_t total) {
const size_t step = std::min(bigarray_bound_, static_cast<size_t>(4 << 10));
long ntask = (total + step - 1) / step; // NOLINT(*)
if (total < bigarray_bound_ || nthread_reduction_ <= 1) {
ReduceSumCPU(dptr, 0, total);
} else {
#pragma omp parallel for schedule(static) num_threads(nthread_reduction_)
for (long j = 0; j < ntask; ++j) { // NOLINT(*)
size_t k = static_cast<size_t>(j);
size_t begin = std::min(k * step, total);
size_t end = std::min((k + 1) * step, total);
if (j == ntask - 1) CHECK_EQ(end, total);
ReduceSumCPU(dptr, begin, static_cast<index_t>(end - begin));
}
}
}
/// \brief temporal space for pushing and pulling
struct BufferEntry {
/// \brief the merged value
NDArray merged;
/// \brief the cpu buffer for gpu data
std::vector<NDArray> copy_buf;
};
std::unordered_map<int, BufferEntry> merge_buf_;
size_t bigarray_bound_;
int nthread_reduction_;
bool is_serial_push_;
};
/**
* \brief an implementation of Comm that performs reduction on device
* directly.
*
* It is faster if the total device-to-device bandwidths is larger than
* device-to-cpu, which is often true for 4 or 8 GPUs. But it uses more device
* memory.
*/
class CommDevice : public Comm {
public:
CommDevice() {
inited_ = false;
}
virtual ~CommDevice() { }
void Init(int key, const NDArrayStorageType stype, const TShape& shape,
int dtype = mshadow::kFloat32) override {
if (stype == kDefaultStorage) {
sorted_key_attrs_.push_back(std::make_tuple(key, shape, dtype));
} else {
LOG(FATAL) << "storage type " << stype << " not implemented for device yet";
}
}
const NDArray& Reduce(int key, const std::vector<NDArray>& src,
int priority) override {
// avoid extra copy for single device, but it may bring problems for
// abnormal usage of kvstore
if (src.size() == 1) {
return src[0];
}
if (!inited_) {
std::vector<Context> devs;
for (const auto& a : src) {
devs.push_back(a.ctx());
}
InitMergeBuffer(devs);
if (dmlc::GetEnv("MXNET_ENABLE_GPU_P2P", 1)) {
EnableP2P(devs);
}
}
auto& buf = merge_buf_[key];
std::vector<NDArray> reduce(src.size());
CopyFromTo(src[0], &(buf.merged), priority);
reduce[0] = buf.merged;
if (buf.copy_buf.empty()) {
// TODO(mli) this results in large device memory usage for huge ndarray,
// such as the largest fullc in VGG. consider to do segment reduce with
// NDArray.Slice or gpu direct memory access. for the latter, we need to
// remove some ctx check, and also it reduces 20% perf
buf.copy_buf.resize(src.size()-1);
for (size_t i = 0; i < src.size()-1; ++i) {
buf.copy_buf[i] = NDArray(
buf.merged.shape(), buf.merged.ctx(), false, buf.merged.dtype());
}
}
for (size_t i = 0; i < src.size()-1; ++i) {
CopyFromTo(src[i+1], &(buf.copy_buf[i]), priority);
reduce[i+1] = buf.copy_buf[i];
}
ElementwiseSum(reduce, &buf.merged);
return buf.merged;
}
void Broadcast(int key, const NDArray& src,
const std::vector<NDArray*> dst, int priority) override {
if (!inited_) {
// copy to a random device first
int dev_id = key % dst.size();
CopyFromTo(src, dst[dev_id], priority);
for (size_t i = 0; i < dst.size(); ++i) {
if (i != static_cast<size_t>(dev_id)) {
CopyFromTo(*dst[dev_id], dst[i], priority);
}
}
} else {
auto& buf = merge_buf_[key];
CopyFromTo(src, &buf.merged, priority);
for (auto d : dst) {
CopyFromTo(buf.merged, d, priority);
}
}
}
void BroadcastRowSparse(int key, const NDArray& src,
const std::vector<std::pair<NDArray*, NDArray>>& dst,
const bool use_copy,
const int priority) override {
LOG(FATAL) << "Not implemented yet";
}
private:
void EnableP2P(const std::vector<Context>& devs) {
#if MXNET_USE_CUDA
std::vector<int> gpus;
for (const auto& d : devs) {
if (d.dev_mask() == gpu::kDevMask) {
gpus.push_back(d.dev_id);
}
}
int n = static_cast<int>(gpus.size());
int enabled = 0;
std::vector<int> p2p(n*n);
for (int i = 0; i < n; ++i) {
cudaSetDevice(gpus[i]);
for (int j = 0; j < n; j++) {
int access;
cudaDeviceCanAccessPeer(&access, gpus[i], gpus[j]);
if (access) {
cudaError_t e = cudaDeviceEnablePeerAccess(gpus[j], 0);
if (e == cudaSuccess || e == cudaErrorPeerAccessAlreadyEnabled) {
++enabled;
p2p[i*n+j] = 1;
}
}
}
}
if (enabled != n*(n-1)) {
// print warning info if not fully enabled
LOG(WARNING) << "only " << enabled << " out of "
<< n*(n-1) << " GPU pairs are enabled direct access. "
<< "It may affect the performance. "
<< "You can set MXNET_ENABLE_GPU_P2P=0 to turn it off";
std::string access(n, '.');
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
access[j] = p2p[i*n+j] ? 'v' : '.';
}
LOG(WARNING) << access;
}
}
#endif
}
using KeyAttrs = std::tuple<int, TShape, int>;
// try to allocate buff on device evenly
void InitMergeBuffer(const std::vector<Context>& devs) {
std::sort(sorted_key_attrs_.begin(), sorted_key_attrs_.end(), [](
const KeyAttrs& a, const KeyAttrs& b) {
return std::get<1>(a).Size() > std::get<1>(b).Size();
});
std::unordered_map<int, std::pair<Context, size_t>> ctx_info;
for (auto d : devs) {
ctx_info[d.dev_id] = std::make_pair(d, 0);
}
for (size_t i = 0; i < sorted_key_attrs_.size(); ++i) {
int key = std::get<0>(sorted_key_attrs_[i]);
TShape s = std::get<1>(sorted_key_attrs_[i]);
int type = std::get<2>(sorted_key_attrs_[i]);
auto& buf = merge_buf_[key];
Context ctx;
size_t min_size = std::numeric_limits<size_t>::max();
for (auto it = ctx_info.begin(); it != ctx_info.end(); ++it) {
size_t size = it->second.second;
if (size <= min_size) {
ctx = it->second.first;
min_size = size;
}
}
buf.merged = NDArray(s, ctx, false, type);
ctx_info[ctx.dev_id].second += s.Size();
}
inited_ = true;
}
std::vector<KeyAttrs> sorted_key_attrs_;
/// \brief temporal space for pushing and pulling
struct BufferEntry {
/// \brief the merged value
NDArray merged;
/// \brief the gpu buffer
std::vector<NDArray> copy_buf;
};
std::unordered_map<int, BufferEntry> merge_buf_;
bool inited_;
};
} // namespace kvstore
} // namespace mxnet
#endif // MXNET_KVSTORE_COMM_H_
|
fc_compute.h | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/fluid/operators/math/blas.h"
#include "paddle/fluid/operators/math/jit_kernel.h"
DECLARE_int32(paddle_num_threads);
namespace paddle {
namespace operators {
namespace math {
template <typename DeviceContext, typename T>
inline void FCCompute(const BlasT<DeviceContext, T>& blas, const int M,
const int N, const int K, const T* X, const T* W, T* Y,
const T* B = NULL, bool relu = false) {
blas.MatMul(M, N, K, X, W, Y);
if (B == NULL) {
return;
}
if (relu) {
const auto& vaddrelu = jitkernel::KernelPool::Instance()
.template Get<jitkernel::VAddReluKernel<T>>(N);
for (int i = 0; i < M; i++) {
T* dst = Y + i * N;
vaddrelu->Compute(B, dst, dst);
}
} else {
const auto& vadd = jitkernel::KernelPool::Instance()
.template Get<jitkernel::VAddKernel<T>>(N);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for if (FLAGS_paddle_num_threads > 1)
#endif
for (int i = 0; i < M; i++) {
T* dst = Y + i * N;
vadd->Compute(B, dst, dst);
}
}
}
} // namespace math
} // namespace operators
} // namespace paddle
|
1d.par.256.c |
/*@ begin PerfTuning (
def build
{
arg command = 'icc';
arg options = '-fast -openmp -I/usr/local/icc/include -lm';
}
def performance_counter
{
arg method = 'basic timer';
arg repetitions = 1;
}
def performance_params
{
param T1[] = [1];
param U1[] = [8];
param U2[] = [1];
param VEC[] = [False];
}
def search
{
# arg algorithm = 'Simplex';
arg algorithm = 'Exhaustive';
# arg time_limit = 5;
# arg total_runs = 1;
}
def input_params
{
param TVAL = 10000;
param NVAL = 4000000;
decl int T = TVAL;
decl int N = NVAL;
decl double coeff1 = 0.5;
decl double coeff2 = 0.7;
decl double h[N] = random;
decl double e[N+1] = random;
}
) @*/
int c1, c2, c3, c4, c5;
int t, i, j, k, l, ii;
register int lb, ub, lb1, ub1, lb2, ub2;
register int lbv, ubv;
#define S1(zT0,zT1,t,i) {e[i]=e[i]-coeff1*(h[i]-h[i-1]);}
#define S2(zT0,zT1,t,i) {h[i]=h[i]-coeff2*(e[1+i]-e[i]);}
for (c1=-1;c1<=floord(N+2*T,256);c1++) {
lb1=max(max(0,ceild(128*c1-127,256)),ceild(256*c1-T,256));
ub1=min(min(floord(128*c1+127,128),floord(256*c1+N+255,512)),floord(N+T,256));
#pragma omp parallel for shared(c1,lb1,ub1) private(c2,c3,c4,c5)
for (c2=lb1; c2<=ub1; c2++) {
if ((c1 <= floord(512*c2-N,256)) && (c2 >= ceild(N+1,256))) {
S2(c1-c2,-c1+2*c2,256*c2-N,N-1) ;
}
for (c3=max(max(256*c2-N+1,256*c1-256*c2),1);c3<=min(min(256*c1-256*c2+255,256*c2-N+255),T);c3++) {
for (c4=max(256*c2,c3+1);c4<=c3+N-1;c4++) {
S1(c1-c2,-c1+2*c2,c3,-c3+c4) ;
S2(c1-c2,-c1+2*c2,c3,-c3+c4-1) ;
}
S2(c1-c2,-c1+2*c2,c3,N-1) ;
}
/*@ begin Loop(
transform Composite(
tile = [('c3',T1,'ii')],
unrolljam = [('c3',U1),('c4',U2)],
vector = (VEC, ['ivdep','vector always'])
)
for (c3=max(max(1,256*c1-256*c2),256*c2-N+256);c3<=min(min(256*c1-256*c2+255,T),256*c2+254);c3++)
for (c4=max(256*c2,c3+1);c4<=256*c2+255;c4++)
{
S1(c1-c2,-c1+2*c2,c3,-c3+c4) ;
S2(c1-c2,-c1+2*c2,c3,-c3+c4-1) ;
}
) @*/
for (c3=max(max(1,256*c1-256*c2),256*c2-N+256);c3<=min(min(256*c1-256*c2+255,T),256*c2+254);c3++)
for (c4=max(256*c2,c3+1);c4<=256*c2+255;c4++) {
S1(c1-c2,-c1+2*c2,c3,-c3+c4) ;
S2(c1-c2,-c1+2*c2,c3,-c3+c4-1) ;
}
/*@ end @*/
}
}
/*@ end @*/
|
miss_count.c | /*
* Miss Count micro benchmark
*
*
*
*
*
* A and B are both square matrix. They are statically allocated and
* initialized with constant number, so we can focus on the parallelism.
*
* usage: mpirun -np <N> mm_mpi [-t]
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <omp.h>
#ifdef USE_CALI
#include <caliper/cali.h>
#endif
#ifndef ORDER
#define ORDER 1000 // the order of the matrix
#endif
#define AVAL 3.0 // initial value of A
#define BVAL 5.0 // initial value of B
#define TOL 0.001 // tolerance used to check the result
#define TYPE double
#define TRUE 1
#define FALSE 0
#define CACHE_L1 1
#define CACHE_L2 2
#define CACHE_L3 3
struct Inputs {
char cache_level;
int threads;
};
void test_L1();
void get_input(int argc, char **argv, struct Inputs* input);
void matrix_init(TYPE** A, TYPE** B, TYPE** C, size_t row_len);
void transpose_mat(TYPE* A, int order);
void matrix_free(TYPE* A, TYPE* B, TYPE* C, size_t size);
void print_mat(TYPE* C);
// main function
int main(int argc, char **argv) {
size_t i,j,k,r;
double run_time, start, end;
struct Inputs input;
get_input(argc, argv, &input);
omp_set_num_threads(input.threads);
start = omp_get_wtime();
#ifdef USE_CALI
cali_id_t thread_attr = cali_create_attribute("thread_id", CALI_TYPE_INT, CALI_ATTR_ASVALUE | CALI_ATTR_SKIP_EVENTS);
#pragma omp parallel
{
cali_set_int(thread_attr, omp_get_thread_num());
}
#endif
#pragma omp parallel
{
test_L1();
}
end = omp_get_wtime();
printf("Run time: %f\n", end - start);
return 0;
}
// the square matrices are ORDERxORDER
// For Skyake the L1 cache is 32k = 4000 doubles
// - so I'm assuming it is divisible by the cache line
// - 64 bytes = 8 doubles
// 64*64 = 4096 doubles
void test_L1() {
// int order = 2000;
int order = 64;
int exp_count = 0;
size_t i,j,k,r;
int cache_line_order[] = {6,1,5,2,0,7,3,4};
TYPE *A, *B, *C;
matrix_init(&A, &B, &C, order);
for (exp_count = 0; exp_count < 100; exp_count++) {
#ifdef USE_CALI
CALI_MARK_BEGIN("cache_prep");
#endif
// Load all matrices into the L2 cache
// fill L1 cache with the C matrix
transpose_mat(A, order);
transpose_mat(B, order);
transpose_mat(C, order);
#ifdef USE_CALI
CALI_MARK_END("cache_prep");
#endif
#ifdef USE_CALI
CALI_MARK_BEGIN("cache_test");
#endif
for (int k = 0; k < 8; k++) { // choose element in the cache line
for (int i = 0; i < order; i+=8) { // choose the row
for (int j = 0; j < 8; j++) { // choose the cache line column
int index_1 = i*order + cache_line_order[j]*8 + k;
int index_2 = i*order + cache_line_order[j]*8 + 3;
A[index_1] = A[index_1] + A[index_2];
}
}
}
#ifdef USE_CALI
CALI_MARK_END("cache_test");
#endif
}
// for(i = 0; i < order; i++){
// for(j = 0; j < order; j++){
// for(k = 0; k < order; k++) {
// C[i*order+j] += A[i*order+k] * B[j*order+k];
// }
// }
// }
matrix_free(A,B,C,order);
}
/*************************************************************\
Utility Functions
\*************************************************************/
void get_input(int argc, char **argv, struct Inputs* input) {
int i = 1;
input->cache_level = CACHE_L1;
input->threads = 4;
for(i = 1; i < argc; i++) {
if ( !(strcmp("-1", argv[i])) || !(strcmp("--cache_l1", argv[i])) )
input->cache_level = CACHE_L1;
else if ( !(strcmp("-2", argv[i])) || !(strcmp("--cache_l2", argv[i])) )
input->cache_level = CACHE_L2;
else if ( !(strcmp("-3", argv[i])) || !(strcmp("--cache_l3", argv[i])) )
input->cache_level = CACHE_L3;
if ( !(strcmp("-t", argv[i])) || !(strcmp("--threads", argv[i])) ) {
if (i++ < argc){
input->threads = atoi(argv[i]);
} else {
printf("Please include a thread count that option\n");
exit(1);
}
}
}
}
// Initialize the matrices (uniform values to make an easier check)
void matrix_init(TYPE** A, TYPE** B, TYPE** C, size_t row_len) {
size_t i, j;
if( ((row_len*row_len) % 64) != 0 ) {
printf("ERROR aligning memory; make sure size is multiple of 64 bytes.\n");
exit(1);
}
(*A) = (TYPE*)aligned_alloc(64, row_len*row_len*sizeof(TYPE));
(*B) = (TYPE*)aligned_alloc(64, row_len*row_len*sizeof(TYPE));
(*C) = (TYPE*)aligned_alloc(64, row_len*row_len*sizeof(TYPE));
if( ((*A) == NULL) || ((*B) == NULL) || ((*C) == NULL) ) {
printf("ERROR allocating memory\n");
exit(1);
}
for (j=0; j<row_len*row_len; j++) {
(*A)[j] = AVAL;
(*B)[j] = BVAL;
(*C)[j] = 0.0;
}
}
void transpose_mat(TYPE* A, int order) {
TYPE temp;
for (int i=1; i<order; i++) {
for (int j=i; j<order; j++) {
temp = A[i*order+j];
A[i*order+j]=A[j*order+i];
A[j*order+i]=temp;
}
}
}
void matrix_free(TYPE* A, TYPE* B, TYPE* C, size_t size) {
free(A);
free(B);
free(C);
}
void print_mat(TYPE* C) {
size_t i, j;
double e = 0.0;
double ee = 0.0;
double v = AVAL * BVAL * ORDER;
for (i=0; i<ORDER; i++) {
for (j=0; j<ORDER; j++) {
printf("%f ",C[i*ORDER+j]);
}
printf("\n\n");
}
}
|
common.c | #define PY_SSIZE_T_CLEAN
#include <Python.h>
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
#define NO_IMPORT_ARRAY
#define PY_ARRAY_UNIQUE_SYMBOL MICPY_ARRAY_API
#include <numpy/arrayobject.h>
#include <numpy/npy_3kcompat.h>
#define _MICARRAYMODULE
#include "common.h"
#include "arrayobject.h"
#include "npy_config.h"
NPY_NO_EXPORT int
_zerofill(PyMicArrayObject *ret)
{
if (PyDataType_REFCHK(PyMicArray_DESCR(ret))) {
PyErr_SetString(PyExc_TypeError, "micpy do not support Object type");
return -1;
}
else {
npy_intp n = PyMicArray_NBYTES(ret);
#pragma omp target device(ret->device)
memset(PyMicArray_DATA(ret), 0, n);
}
return 0;
}
NPY_NO_EXPORT int
_IsAligned(PyMicArrayObject *ap)
{
unsigned int i;
npy_uintp aligned;
npy_uintp alignment = PyMicArray_DESCR(ap)->alignment;
/* alignment 1 types should have a efficient alignment for copy loops */
if (PyMicArray_ISFLEXIBLE(ap) || PyMicArray_ISSTRING(ap)) {
npy_intp itemsize = PyMicArray_ITEMSIZE(ap);
/* power of two sizes may be loaded in larger moves */
if (((itemsize & (itemsize - 1)) == 0)) {
alignment = itemsize > NPY_MAX_COPY_ALIGNMENT ?
NPY_MAX_COPY_ALIGNMENT : itemsize;
}
else {
/* if not power of two it will be accessed bytewise */
alignment = 1;
}
}
if (alignment == 1) {
return 1;
}
aligned = (npy_uintp)PyMicArray_DATA(ap);
for (i = 0; i < PyMicArray_NDIM(ap); i++) {
#if NPY_RELAXED_STRIDES_CHECKING
/* skip dim == 1 as it is not required to have stride 0 */
if (PyMicArray_DIM(ap, i) > 1) {
/* if shape[i] == 1, the stride is never used */
aligned |= (npy_uintp)PyMicArray_STRIDES(ap)[i];
}
else if (PyMicArray_DIM(ap, i) == 0) {
/* an array with zero elements is always aligned */
return 1;
}
#else /* not NPY_RELAXED_STRIDES_CHECKING */
aligned |= (npy_uintp)PyMicArray_STRIDES(ap)[i];
#endif /* not NPY_RELAXED_STRIDES_CHECKING */
}
return mpy_is_aligned((void *)aligned, alignment);
}
NPY_NO_EXPORT npy_bool
_IsWriteable(PyMicArrayObject *ap)
{
PyObject *base=PyMicArray_BASE(ap);
void *dummy;
Py_ssize_t n;
/* If we own our own data, then no-problem */
if ((base == NULL) || (PyMicArray_FLAGS(ap) & NPY_ARRAY_OWNDATA)) {
return NPY_TRUE;
}
/*
* Get to the final base object
* If it is a writeable array, then return TRUE
* If we can find an array object
* or a writeable buffer object as the final base object
* or a string object (for pickling support memory savings).
* - this last could be removed if a proper pickleable
* buffer was added to Python.
*
* MW: I think it would better to disallow switching from READONLY
* to WRITEABLE like this...
*/
while(PyMicArray_Check(base)) {
if (PyMicArray_CHKFLAGS((PyMicArrayObject *)base, NPY_ARRAY_OWNDATA)) {
return (npy_bool) (PyMicArray_ISWRITEABLE((PyMicArrayObject *)base));
}
base = PyMicArray_BASE((PyMicArrayObject *)base);
}
/*
* here so pickle support works seamlessly
* and unpickled array can be set and reset writeable
* -- could be abused --
*/
if (PyString_Check(base)) {
return NPY_TRUE;
}
if (PyObject_AsWriteBuffer(base, &dummy, &n) < 0) {
return NPY_FALSE;
}
return NPY_TRUE;
}
/*
* check whether arrays with datatype dtype might have object fields. This will
* only happen for structured dtypes (which may have hidden objects even if the
* HASOBJECT flag is false), object dtypes, or subarray dtypes whose base type
* is either of these.
*/
NPY_NO_EXPORT int
_may_have_objects(PyArray_Descr *dtype)
{
PyArray_Descr *base = dtype;
if (PyDataType_HASSUBARRAY(dtype)) {
base = dtype->subarray->base;
}
return (PyDataType_HASFIELDS(base) ||
PyDataType_FLAGCHK(base, NPY_ITEM_HASOBJECT) );
}
/**
* Convert an array shape to a string such as "(1, 2)".
*
* @param Dimensionality of the shape
* @param npy_intp pointer to shape array
* @param String to append after the shape `(1, 2)%s`.
*
* @return Python unicode string
*/
NPY_NO_EXPORT PyObject *
convert_shape_to_string(npy_intp n, npy_intp *vals, char *ending)
{
npy_intp i;
PyObject *ret, *tmp;
/*
* Negative dimension indicates "newaxis", which can
* be discarded for printing if it's a leading dimension.
* Find the first non-"newaxis" dimension.
*/
for (i = 0; i < n && vals[i] < 0; i++);
if (i == n) {
return PyUString_FromFormat("()%s", ending);
}
else {
ret = PyUString_FromFormat("(%" NPY_INTP_FMT, vals[i++]);
if (ret == NULL) {
return NULL;
}
}
for (; i < n; ++i) {
if (vals[i] < 0) {
tmp = PyUString_FromString(",newaxis");
}
else {
tmp = PyUString_FromFormat(",%" NPY_INTP_FMT, vals[i]);
}
if (tmp == NULL) {
Py_DECREF(ret);
return NULL;
}
PyUString_ConcatAndDel(&ret, tmp);
if (ret == NULL) {
return NULL;
}
}
if (i == 1) {
tmp = PyUString_FromFormat(",)%s", ending);
}
else {
tmp = PyUString_FromFormat(")%s", ending);
}
PyUString_ConcatAndDel(&ret, tmp);
return ret;
}
NPY_NO_EXPORT void
dot_alignment_error(PyMicArrayObject *a, int i, PyMicArrayObject *b, int j)
{
PyObject *errmsg = NULL, *format = NULL, *fmt_args = NULL,
*i_obj = NULL, *j_obj = NULL,
*shape1 = NULL, *shape2 = NULL,
*shape1_i = NULL, *shape2_j = NULL;
format = PyUString_FromString("shapes %s and %s not aligned:"
" %d (dim %d) != %d (dim %d)");
shape1 = convert_shape_to_string(PyMicArray_NDIM(a), PyMicArray_DIMS(a), "");
shape2 = convert_shape_to_string(PyMicArray_NDIM(b), PyMicArray_DIMS(b), "");
i_obj = PyLong_FromLong(i);
j_obj = PyLong_FromLong(j);
shape1_i = PyLong_FromSsize_t(PyMicArray_DIM(a, i));
shape2_j = PyLong_FromSsize_t(PyMicArray_DIM(b, j));
if (!format || !shape1 || !shape2 || !i_obj || !j_obj ||
!shape1_i || !shape2_j) {
goto end;
}
fmt_args = PyTuple_Pack(6, shape1, shape2,
shape1_i, i_obj, shape2_j, j_obj);
if (fmt_args == NULL) {
goto end;
}
errmsg = PyUString_Format(format, fmt_args);
if (errmsg != NULL) {
PyErr_SetObject(PyExc_ValueError, errmsg);
}
else {
PyErr_SetString(PyExc_ValueError, "shapes are not aligned");
}
end:
Py_XDECREF(errmsg);
Py_XDECREF(fmt_args);
Py_XDECREF(format);
Py_XDECREF(i_obj);
Py_XDECREF(j_obj);
Py_XDECREF(shape1);
Py_XDECREF(shape2);
Py_XDECREF(shape1_i);
Py_XDECREF(shape2_j);
}
/* Convert NPY_CASTING to string
* borrow from numpy */
NPY_NO_EXPORT const char *
npy_casting_to_string(NPY_CASTING casting)
{
switch (casting) {
case NPY_NO_CASTING:
return "'no'";
case NPY_EQUIV_CASTING:
return "'equiv'";
case NPY_SAFE_CASTING:
return "'safe'";
case NPY_SAME_KIND_CASTING:
return "'same_kind'";
case NPY_UNSAFE_CASTING:
return "'unsafe'";
default:
return "<unknown>";
}
}
#define GET_DEVICE(ob, val) ((PyMicArray_Check(ob)) ? \
PyMicArray_DEVICE((PyMicArrayObject *)ob) : (val))
NPY_NO_EXPORT int
get_common_device2(PyObject *op1, PyObject *op2)
{
int cpu_device = omp_get_initial_device();
int dev1, dev2;
dev1 = GET_DEVICE(op1, cpu_device);
dev2 = GET_DEVICE(op2, cpu_device);
/* Prefer current device if devices num are different */
if (dev1 != dev2) {
return CURRENT_DEVICE;
}
return dev1;
}
NPY_NO_EXPORT int
get_common_device(PyObject **ops, int nop)
{
int i, idevice, cdevice, cpu_device;
PyObject *iop;
cpu_device = omp_get_initial_device();
cdevice = GET_DEVICE(ops[0], cpu_device);
for (i = 1; i < nop; ++i) {
iop = ops[i];
idevice = GET_DEVICE(iop, cpu_device);
/* Return current device if devices num are different */
if (idevice != cdevice) {
return CURRENT_DEVICE;
}
}
return cdevice;
} |
hoImageRegDissimilarityLocalCCR.h | /** \file hoImageRegDissimilarityLocalCCR.h
\brief Define the class to compute image Local Cross CorRelation (LocalCCR) in gadgetron registration
The analytical derivatives are computed by using the formula proposed at:
[1] Gerardo Hermosillo, Christophe Chefd'Hotel, Olivier Faugeras. Variational Methods for Multimodal Image Matching.
International Journal of Computer Vision. December 2002, Volume 50, Issue 3, pp 329-343.
http://link.springer.com/article/10.1023%2FA%3A1020830525823
[2] Gerardo Hermosillo. Variational Methods for Multimodal Image Matching. PhD Thesis, UNIVERSIT´E DE NICE - SOPHIA ANTIPOLIS. May 2002.
http://webdocs.cs.ualberta.ca/~dana/readingMedIm/papers/hermosilloPhD.pdf
This derivative computation code is based on the listed source code at page 183 - 185 in ref [2].
\author Hui Xue
*/
#ifndef hoImageRegDissimilarityLocalCCR_H_
#define hoImageRegDissimilarityLocalCCR_H_
#pragma once
#include <limits>
#include "hoImageRegDissimilarity.h"
namespace Gadgetron {
template<typename ImageType>
class hoImageRegDissimilarityLocalCCR : public hoImageRegDissimilarity<ImageType>
{
public:
typedef hoImageRegDissimilarityLocalCCR<ImageType> Self;
typedef hoImageRegDissimilarity<ImageType> BaseClass;
enum { D = ImageType::NDIM };
typedef typename BaseClass::InterpolatorType InterpolatorType;
typedef typename BaseClass::ValueType ValueType;
typedef ValueType T;
typedef ValueType element_type;
typedef ValueType value_type;
typedef double computing_value_type;
typedef typename BaseClass::coord_type coord_type;
hoImageRegDissimilarityLocalCCR(computing_value_type betaArg=std::numeric_limits<ValueType>::epsilon() );
hoImageRegDissimilarityLocalCCR(ValueType sigmaArg[D], computing_value_type betaArg=std::numeric_limits<ValueType>::epsilon() );
virtual ~hoImageRegDissimilarityLocalCCR();
void initialize(ImageType& t);
virtual ValueType evaluate(ImageType& w);
virtual bool evaluateDeriv(ImageType& w);
virtual void print(std::ostream& os) const;
/// these parameter names are kept same as the source code on page 183 - 185 in ref [2]
computing_value_type sigmaArg_[D]; // kernel size of local weighting function
computing_value_type betaArg_;
using BaseClass::gt_timer1_;
using BaseClass::gt_timer2_;
using BaseClass::gt_timer3_;
using BaseClass::performTiming_;
using BaseClass::gt_exporter_;
using BaseClass::debugFolder_;
protected:
using BaseClass::target_;
using BaseClass::warpped_;
using BaseClass::deriv_;
using BaseClass::bg_value_;
using BaseClass::dissimilarity_;
using BaseClass::target;
using BaseClass::warped;
using BaseClass::deriv;
using BaseClass::image_dim_;
/// these parameter names are kept same as the source code on page 183 - 185 in ref [2]
hoNDArray<computing_value_type> cc; computing_value_type* p_cc;
hoNDArray<computing_value_type> mu1; computing_value_type* p_mu1;
hoNDArray<computing_value_type> mu2; computing_value_type* p_mu2;
hoNDArray<computing_value_type> v1; computing_value_type* p_v1;
hoNDArray<computing_value_type> v2; computing_value_type* p_v2;
hoNDArray<computing_value_type> v12; computing_value_type* p_v12;
//hoNDArray<computing_value_type> vv1; computing_value_type* p_vv1;
//hoNDArray<computing_value_type> vv2; computing_value_type* p_vv2;
//hoNDArray<computing_value_type> vv12; computing_value_type* p_vv12;
hoNDArray<computing_value_type> mem_;
computing_value_type eps_;
};
template<typename ImageType>
hoImageRegDissimilarityLocalCCR<ImageType>::hoImageRegDissimilarityLocalCCR(computing_value_type betaArg)
: BaseClass(), betaArg_(betaArg)
{
unsigned int ii;
for ( ii=0; ii<D; ii++ )
{
sigmaArg_[ii] = (computing_value_type)(2.0);
}
}
template<typename ImageType>
hoImageRegDissimilarityLocalCCR<ImageType>::hoImageRegDissimilarityLocalCCR(ValueType sigmaArg[D], computing_value_type betaArg)
: BaseClass(), betaArg_(betaArg)
{
unsigned int ii;
for ( ii=0; ii<D; ii++ )
{
sigmaArg_[ii] = (computing_value_type)(sigmaArg[ii]);
}
}
template<typename ImageType>
hoImageRegDissimilarityLocalCCR<ImageType>::~hoImageRegDissimilarityLocalCCR()
{
}
template<typename ImageType>
void hoImageRegDissimilarityLocalCCR<ImageType>::initialize(ImageType& t)
{
BaseClass::initialize(t);
// allocate arrays for the computation
cc.create(image_dim_); p_cc = cc.begin();
mu1.create(image_dim_); p_mu1 = mu1.begin();
mu2.create(image_dim_); p_mu2 = mu2.begin();
v1.create(image_dim_); p_v1 = v1.begin();
v2.create(image_dim_); p_v2 = v2.begin();
v12.create(image_dim_); p_v12 = v12.begin();
//vv1.create(image_dim_); p_vv1 = vv1.begin();
//vv2.create(image_dim_); p_vv2 = vv2.begin();
//vv12.create(image_dim_); p_vv12 = vv12.begin();
#ifdef WIN32
size_t v=0;
for ( size_t ii=0; ii<image_dim_.size(); ii++ ) v+=image_dim_[ii];
mem_.create(2*v);
#endif // WIN32
eps_ = std::numeric_limits<computing_value_type>::epsilon();
}
template<typename ImageType>
typename hoImageRegDissimilarityLocalCCR<ImageType>::ValueType hoImageRegDissimilarityLocalCCR<ImageType>::evaluate(ImageType& w)
{
try
{
/// in the ref [2], the code are:
/*
Image<float>
mu1(I1.domain()), mu2(I1.domain()),
v1(I1.domain()), v2(I1.domain()),
v12(I1.domain()), f1(I1.domain()),
f2(I1.domain()), f3(I1.domain());
Map(I1,x) {
const real i1 = I1[x];
const real i2 = I2[x];
mu1[x] = i1; v1[x] = i1 * i1;
mu2[x] = i2; v12[x] = i1 * i2;
v2[x] = i2 * i2;
}
mu1.SelfRecSmoothZeroBC(sigma); v1.SelfRecSmoothZeroBC(sigma);
mu2.SelfRecSmoothZeroBC(sigma); v2.SelfRecSmoothZeroBC(sigma);
v12.SelfRecSmoothZeroBC(sigma);
criter = 0;
Map(v1,x) {
const real u1 = mu1[x];
const real u2 = mu2[x];
const real vv1 = v1[x] + beta - u1 * u1;
const real vv2 = v2[x] + beta - u2 * u2;
const real vv12 = v12[x] - u1 * u2;
const real ff1 = vv12 / (vv1 * vv2);
const real CC = vv12 * ff1;
const real ff2 = - CC / vv2;
const real ff3 = - (ff2 * u2 + ff1 * u1);
f1[x] = ff1; f2[x] = ff2; f3[x] = ff3;
cc[x] = -CC;
criter += -CC;
}
f1.SelfRecSmoothZeroBC(sigma);
f2.SelfRecSmoothZeroBC(sigma);
f3.SelfRecSmoothZeroBC(sigma);
norm = 0;
Map(f1,x) {
const float val = 2.0 * ( f1[x] * I1[x] + f2[x] * I2[x] + f3[x] ) ;
dist[x] = val;
norm += val * val;
}
*/
/// we rewrite these code for gadgetron
//if ( performTiming_ ) { gt_timer1_.start("1"); }
BaseClass::evaluate(w);
//if ( performTiming_ ) { gt_timer1_.stop(); }
long long N = (long long)target.get_number_of_elements();
//if ( performTiming_ ) { gt_timer1_.start("2"); }
//mu1.copyFrom(target);
//mu2.copyFrom(warped);
//Gadgetron::multiply(mu1, mu1, v1);
//Gadgetron::multiply(mu2, mu2, v2);
//Gadgetron::multiply(mu1, mu2, v12);
long long n;
ValueType* pT = target.begin();
ValueType* pW = warped.begin();
for ( n=0; n<N; ++n )
{
const computing_value_type v1 = (computing_value_type)pT[n];
const computing_value_type v2 = (computing_value_type)pW[n];
p_mu1[n] = v1;
p_mu2[n] = v2;
p_v1[n] = v1*v1;
p_v2[n] = v2*v2;
p_v12[n] = v1*v2;
}
//#ifdef WIN32
Gadgetron::filterGaussian(mu1, sigmaArg_, mem_.begin());
Gadgetron::filterGaussian(mu2, sigmaArg_, mem_.begin());
Gadgetron::filterGaussian(v1, sigmaArg_, mem_.begin());
Gadgetron::filterGaussian(v2, sigmaArg_, mem_.begin());
Gadgetron::filterGaussian(v12, sigmaArg_, mem_.begin());
//#else
// Gadgetron::filterGaussian(mu1, sigmaArg_);
// Gadgetron::filterGaussian(mu2, sigmaArg_);
// Gadgetron::filterGaussian(v1, sigmaArg_);
// Gadgetron::filterGaussian(v2, sigmaArg_);
// Gadgetron::filterGaussian(v12, sigmaArg_);
//#endif // WIN32
//if ( 0 )
//{
// //#pragma omp parallel sections if ( D==2 )
// {
// //#pragma omp section
// {
// Gadgetron::multiply(mu1, mu1, vv1);
// Gadgetron::subtract(v1, vv1, vv1);
// Gadgetron::addEpsilon(vv1);
// }
// //#pragma omp section
// {
// Gadgetron::multiply(mu2, mu2, vv2);
// Gadgetron::subtract(v2, vv2, vv2);
// Gadgetron::addEpsilon(vv2);
// }
// //#pragma omp section
// {
// Gadgetron::multiply(mu1, mu2, vv12);
// Gadgetron::subtract(v12, vv12, vv12);
// }
// }
// Gadgetron::multiply(vv1, vv2, vv1);
// Gadgetron::divide(vv12, vv1, v1); // ff1
// Gadgetron::multiply(vv12, v1, cc); // cc
// Gadgetron::divide(cc, vv2, v2); // ff2
// Gadgetron::scal( (computing_value_type)(-1), v2);
// Gadgetron::multiply(v2, mu2, v12);
// Gadgetron::multiply(v1, mu1, vv12);
// Gadgetron::add(v12, vv12, v12);
// computing_value_type v=0;
// Gadgetron::norm1(cc, v);
// dissimilarity_ = static_cast<T>(-v/N);
//}
dissimilarity_ = 0;
computing_value_type v=0;
//#pragma omp parallel for private(n)
for ( n=0; n<N; ++n )
{
const computing_value_type u1 = p_mu1[n];
const computing_value_type u2 = p_mu2[n];
const computing_value_type vv1 = p_v1[n] - u1 * u1;
const computing_value_type vv2 = p_v2[n] - u2 * u2;
const computing_value_type vv12 = p_v12[n] - u1 * u2;
const computing_value_type ff1 = vv12 / (vv1 * vv2);
const computing_value_type lcc = vv12 * ff1;
const computing_value_type ff2 = - lcc / vv2;
const computing_value_type ff3 = ff2 * u2 + ff1 * u1;
p_v1[n] = ff1; p_v2[n] = ff2; p_v12[n] = ff3;
p_cc[n] = lcc;
}
computing_value_type lcc = 0;
// #pragma omp parallel for reduction(+:lcc)
for (n=0; n<N; n++)
{
lcc += cc[n];
}
dissimilarity_ = -lcc/N;
}
catch(...)
{
GERROR_STREAM("Errors happened in hoImageRegDissimilarityLocalCCR<ImageType>::evaluate(w) ... ");
}
return this->dissimilarity_;
}
template<typename ImageType>
bool hoImageRegDissimilarityLocalCCR<ImageType>::evaluateDeriv(ImageType& w)
{
try
{
this->evaluate(w);
size_t N = target.get_number_of_elements();
long long n;
//#pragma omp parallel sections if ( D==2 )
{
//#ifdef WIN32
//#pragma omp section
{
Gadgetron::filterGaussian(v1, sigmaArg_, mem_.begin());
}
//#pragma omp section
{
Gadgetron::filterGaussian(v2, sigmaArg_, mem_.begin());
}
//#pragma omp section
{
Gadgetron::filterGaussian(v12, sigmaArg_, mem_.begin());
}
//#else
// Gadgetron::filterGaussian(v1, sigmaArg_);
// Gadgetron::filterGaussian(v2, sigmaArg_);
// Gadgetron::filterGaussian(v12, sigmaArg_);
//#endif // WIN32
}
// deriv = f1*i1 + f2*i2 + f3, we don't need to multiply this by 2.0
//if ( typeid(ValueType) == typeid(computing_value_type) )
//{
//Gadgetron::multiply(v1, target, mu1);
//Gadgetron::multiply(v2, warped, mu2);
//Gadgetron::add(mu1, mu2, deriv);
//Gadgetron::subtract(deriv, v12, deriv);
//}
//else
//{
T* pT = target.begin();
T* pW = warped.begin();
// #pragma omp parallel for default(none) shared(N, pT, pW)
for ( n=0; n<(long long)N; n++ )
{
deriv(n) = static_cast<T>( p_v1[n]* (computing_value_type)pT[n] + ( p_v2[n]*(computing_value_type)pW[n] - p_v12[n] ) );
}
//}
}
catch(...)
{
GERROR_STREAM("Errors happened in hoImageRegDissimilarityLocalCCR<ImageType>::evaluateDeriv(w) ... ");
return false;
}
return true;
}
template<typename ImageType>
void hoImageRegDissimilarityLocalCCR<ImageType>::print(std::ostream& os) const
{
using namespace std;
os << "--------------Gagdgetron image dissimilarity LocalCCR measure -------------" << endl;
os << "Image dimension is : " << D << endl;
std::string elemTypeName = std::string(typeid(ValueType).name());
os << "Transformation data type is : " << elemTypeName << endl << ends;
}
}
#endif // hoImageRegDissimilarityLocalCCR_H_
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 32;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
GB_unop__identity_int8_int64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int8_int64)
// op(A') function: GB (_unop_tran__identity_int8_int64)
// C type: int8_t
// A type: int64_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int8_t z = (int8_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = (int8_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int8_int64)
(
int8_t *Cx, // Cx and Ax may be aliased
const int64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
int8_t z = (int8_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int64_t aij = Ax [p] ;
int8_t z = (int8_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int8_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop_transpose.c | //------------------------------------------------------------------------------
// GB_unop_transpose: C=op(cast(A')), transpose, typecast, and apply op
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
{
// Ax unused for some uses of this template
#include "GB_unused.h"
//--------------------------------------------------------------------------
// get A and C
//--------------------------------------------------------------------------
#ifndef GB_ISO_TRANSPOSE
const GB_ATYPE *restrict Ax = (GB_ATYPE *) A->x ;
GB_CTYPE *restrict Cx = (GB_CTYPE *) C->x ;
#endif
//--------------------------------------------------------------------------
// C = op (cast (A'))
//--------------------------------------------------------------------------
if (Workspaces == NULL)
{
//----------------------------------------------------------------------
// A and C are both full or both bitmap
//----------------------------------------------------------------------
// A is avlen-by-avdim; C is avdim-by-avlen
int64_t avlen = A->vlen ;
int64_t avdim = A->vdim ;
int64_t anz = avlen * avdim ; // ignore integer overflow
const int8_t *restrict Ab = A->b ;
int8_t *restrict Cb = C->b ;
ASSERT ((Cb == NULL) == (Ab == NULL)) ;
// TODO: it would be faster to do this by tiles, not rows/columns, for
// large matrices, but in most of the cases in GraphBLAS, A and C will
// be tall-and-thin or short-and-fat.
if (Ab == NULL)
{
//------------------------------------------------------------------
// A and C are both full (no work if A and C are iso)
//------------------------------------------------------------------
#ifndef GB_ISO_TRANSPOSE
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t pC_start, pC_end ;
GB_PARTITION (pC_start, pC_end, anz, tid, nthreads) ;
for (int64_t pC = pC_start ; pC < pC_end ; pC++)
{
// get i and j of the entry C(i,j)
// i = (pC % avdim) ;
// j = (pC / avdim) ;
// find the position of the entry A(j,i)
// pA = j + i * avlen
// Cx [pC] = op (Ax [pA])
GB_CAST_OP (pC, ((pC/avdim) + (pC%avdim) * avlen)) ;
}
}
#endif
}
else
{
//------------------------------------------------------------------
// A and C are both bitmap
//------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t pC_start, pC_end ;
GB_PARTITION (pC_start, pC_end, anz, tid, nthreads) ;
for (int64_t pC = pC_start ; pC < pC_end ; pC++)
{
// get i and j of the entry C(i,j)
// i = (pC % avdim) ;
// j = (pC / avdim) ;
// find the position of the entry A(j,i)
// pA = j + i * avlen
int64_t pA = ((pC / avdim) + (pC % avdim) * avlen) ;
int8_t cij_exists = Ab [pA] ;
Cb [pC] = cij_exists ;
#ifndef GB_ISO_TRANSPOSE
if (cij_exists)
{
// Cx [pC] = op (Ax [pA])
GB_CAST_OP (pC, pA) ;
}
#endif
}
}
}
}
else
{
//----------------------------------------------------------------------
// A is sparse or hypersparse; C is sparse
//----------------------------------------------------------------------
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ah = A->h ;
const int64_t *restrict Ai = A->i ;
const int64_t anvec = A->nvec ;
int64_t *restrict Ci = C->i ;
if (nthreads == 1)
{
//------------------------------------------------------------------
// sequential method
//------------------------------------------------------------------
int64_t *restrict workspace = Workspaces [0] ;
for (int64_t k = 0 ; k < anvec ; k++)
{
// iterate over the entries in A(:,j)
int64_t j = GBH (Ah, k) ;
int64_t pA_start = Ap [k] ;
int64_t pA_end = Ap [k+1] ;
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
// C(j,i) = A(i,j)
int64_t i = Ai [pA] ;
int64_t pC = workspace [i]++ ;
Ci [pC] = j ;
#ifndef GB_ISO_TRANSPOSE
// Cx [pC] = op (Ax [pA])
GB_CAST_OP (pC, pA) ;
#endif
}
}
}
else if (nworkspaces == 1)
{
//------------------------------------------------------------------
// atomic method
//------------------------------------------------------------------
int64_t *restrict workspace = Workspaces [0] ;
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
for (int64_t k = A_slice [tid] ; k < A_slice [tid+1] ; k++)
{
// iterate over the entries in A(:,j)
int64_t j = GBH (Ah, k) ;
int64_t pA_start = Ap [k] ;
int64_t pA_end = Ap [k+1] ;
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
// C(j,i) = A(i,j)
int64_t i = Ai [pA] ;
// do this atomically: pC = workspace [i]++
int64_t pC ;
GB_ATOMIC_CAPTURE_INC64 (pC, workspace [i]) ;
Ci [pC] = j ;
#ifndef GB_ISO_TRANSPOSE
// Cx [pC] = op (Ax [pA])
GB_CAST_OP (pC, pA) ;
#endif
}
}
}
}
else
{
//------------------------------------------------------------------
// non-atomic method
//------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t *restrict workspace = Workspaces [tid] ;
for (int64_t k = A_slice [tid] ; k < A_slice [tid+1] ; k++)
{
// iterate over the entries in A(:,j)
int64_t j = GBH (Ah, k) ;
int64_t pA_start = Ap [k] ;
int64_t pA_end = Ap [k+1] ;
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
// C(j,i) = A(i,j)
int64_t i = Ai [pA] ;
int64_t pC = workspace [i]++ ;
Ci [pC] = j ;
#ifndef GB_ISO_TRANSPOSE
// Cx [pC] = op (Ax [pA])
GB_CAST_OP (pC, pA) ;
#endif
}
}
}
}
}
}
#undef GB_ISO_TRANSPOSE
|
GB_unaryop__ainv_int16_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int16_uint16
// op(A') function: GB_tran__ainv_int16_uint16
// C type: int16_t
// A type: uint16_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, aij) \
int16_t z = (int16_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT16 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int16_uint16
(
int16_t *Cx, // Cx and Ax may be aliased
uint16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int16_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 32;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
piCalc.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#define MARGIN 1e-12
void Usage(char* prog_name);
int workshare(long long points);
int sequential(long long points);
int glob_sum_seq;
int glob_sum_par;
int main(){
printf("\n\n1000000 points\n");
printf("\nSEQUENTIAL\n");
sequential(1000000);
printf("\nPARALLEL\n");
workshare(1000000);
if(abs(glob_sum_seq - glob_sum_par) <= MARGIN)
printf("\nTest PASSED \n");
else
printf("\nTest FAILED \n");
printf("\n\n10000000 points\n");
printf("\nSEQUENTIAL\n");
sequential(10000000);
printf("\nPARALLEL\n");
workshare(10000000);
if(abs(glob_sum_seq - glob_sum_par) <= MARGIN)
printf("\nTest PASSED \n");
else
printf("\nTest FAILED \n");
printf("\n\n100000000 points\n");
printf("\nSEQUENTIAL\n");
sequential(100000000);
printf("\nPARALLEL\n");
workshare(100000000);
if(abs(glob_sum_seq - glob_sum_par) <= MARGIN)
printf("\nTest PASSED \n");
else
printf("\nTest FAILED \n");
printf("\n\n1000000000 points\n");
printf("\nSEQUENTIAL\n");
sequential(1000000000);
printf("\nPARALLEL\n");
workshare(1000000000);
if(abs(glob_sum_seq - glob_sum_par) <= MARGIN)
printf("\nTest PASSED \n");
else
printf("\nTest FAILED \n");
printf("\n\n10000000000 points\n");
printf("\nSEQUENTIAL\n");
sequential(10000000000);
printf("\nPARALLEL\n");
workshare(10000000000);
if(abs(glob_sum_seq - glob_sum_par) <= MARGIN)
printf("\nTest PASSED \n");
else
printf("\nTest FAILED \n");
}
int sequential(long long points) {
long long n, i;
double factor;
double sum = 0.0;
n = points;
double timeStart = omp_get_wtime();
printf("Before for loop, factor = %f.\n", factor);
for (i = 0; i < n; i++) {
factor = (i % 2 == 0) ? 1.0 : -1.0;
sum += factor/(2*i+1);
}
printf("After for loop, factor = %f.\n", factor);
sum = 4.0*sum;
printf("With n = %lld terms\n", n);
printf(" Our estimate of pi = %.14f\n", sum);
printf(" Ref estimate of pi = %.14f\n", 4.0*atan(1.0));
double timeStop = omp_get_wtime();
printf("Elapsed time: %f ", timeStop - timeStart);
return 0;
}
int workshare(long long points) {
long long n, i;
double factor;
double sum = 0.0;
n = points;
double timeStart = omp_get_wtime();
printf("Before for loop, factor = %f.\n", factor);
#pragma omp parallel for \
private(i, factor) \
shared(n)\
reduction(+:sum)
for (i = 0; i < n; i++) {
factor = (i % 2 == 0) ? 1.0 : -1.0;
sum += factor/(2*i+1);
}
printf("After for loop, factor = %f.\n", factor);
sum = 4.0*sum;
printf("With n = %lld terms\n", n);
printf(" Our estimate of pi = %.14f\n", sum);
printf(" Ref estimate of pi = %.14f\n", 4.0*atan(1.0));
double timeStop = omp_get_wtime();
printf("Elapsed time: %f ", timeStop - timeStart);
return 0;
}
void Usage(char* prog_name) {
fprintf(stderr, "usage: %s <thread_count> <n>\n", prog_name);
fprintf(stderr, " n is the number of terms and should be >= 1\n");
exit(0);
}
|
count_omp_threads.c | /******************************************************************************
* FILE: count_threads_omp.c
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int main (int argc, char *argv[])
{
int nthreads, tid;
/* Fork a team of threads giving them their own copies of variables */
#pragma omp parallel private(nthreads, tid)
{
/* Obtain thread number */
tid = omp_get_thread_num();
/* Only master thread does this */
if (tid == 0) {
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
} /* All threads join master thread and disband */
return 0;
}
|
GB_subassign_00.c | //------------------------------------------------------------------------------
// GB_subassign_00: C(I,J)<!,repl> = empty ; using S
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Method 00: C(I,J)<!,repl> = empty ; using S
// M: NULL
// Mask_comp: true
// C_replace: true
// accum: any (present or not; result is the same)
// A: any (scalar or matrix; result is the same)
// S: constructed
#include "GB_subassign_methods.h"
GrB_Info GB_subassign_00
(
GrB_Matrix C,
// input:
const GrB_Index *I,
const int64_t nI,
const int Ikind,
const int64_t Icolon [3],
const GrB_Index *J,
const int64_t nJ,
const int Jkind,
const int64_t Jcolon [3],
const GrB_Matrix S,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
int64_t *GB_RESTRICT Ci = C->i ;
const int64_t *GB_RESTRICT Sx = (int64_t *) S->x ;
//--------------------------------------------------------------------------
// Method 00: C(I,J)<!,repl> = empty ; using S
//--------------------------------------------------------------------------
// Time: Optimal, O(nnz(S)), assuming S has already been constructed.
//--------------------------------------------------------------------------
// Parallel: all entries in S can be processed fully in parallel.
//--------------------------------------------------------------------------
// All entries in C(I,J) are deleted. The result does not depend on A or
// the scalar.
int64_t snz = GB_NNZ (S) ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (snz, chunk, nthreads_max) ;
int64_t nzombies = C->nzombies ;
int64_t pS ;
#pragma omp parallel for num_threads(nthreads) schedule(static) \
reduction(+:nzombies)
for (pS = 0 ; pS < snz ; pS++)
{
// S (inew,jnew) is a pointer back into C (I(inew), J(jnew))
int64_t pC = Sx [pS] ;
int64_t i = Ci [pC] ;
// ----[X A 0] or [X . 0]-----------------------------------------------
// action: ( X ): still a zombie
// ----[C A 0] or [C . 0]-----------------------------------------------
// action: C_repl: ( delete ): becomes a zombie
if (!GB_IS_ZOMBIE (i))
{
nzombies++ ;
Ci [pC] = GB_FLIP (i) ;
}
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
C->nzombies = nzombies ;
return (GrB_SUCCESS) ;
}
|
cpd.c |
/******************************************************************************
* INCLUDES
*****************************************************************************/
#include "base.h"
#include "cpd.h"
#include "matrix.h"
#include "mttkrp.h"
#include "timer.h"
#include "thd_info.h"
#include "util.h"
#include <math.h>
/******************************************************************************
* API FUNCTIONS
*****************************************************************************/
int splatt_cpd_als(
splatt_csf const * const tensors,
splatt_idx_t const nfactors,
double const * const options,
splatt_kruskal * factored)
{
matrix_t * mats[MAX_NMODES+1];
idx_t nmodes = tensors->nmodes;
rank_info rinfo;
rinfo.rank = 0;
/* allocate factor matrices */
idx_t maxdim = tensors->dims[argmax_elem(tensors->dims, nmodes)];
for(idx_t m=0; m < nmodes; ++m) {
mats[m] = (matrix_t *) mat_rand(tensors[0].dims[m], nfactors);
}
mats[MAX_NMODES] = mat_alloc(maxdim, nfactors);
val_t * lambda = (val_t *) splatt_malloc(nfactors * sizeof(val_t));
/* do the factorization! */
factored->fit = cpd_als_iterate(tensors, mats, lambda, nfactors, &rinfo,
options);
/* store output */
factored->rank = nfactors;
factored->nmodes = nmodes;
factored->lambda = lambda;
for(idx_t m=0; m < nmodes; ++m) {
factored->dims[m] = tensors->dims[m];
factored->factors[m] = mats[m]->vals;
}
/* clean up */
mat_free(mats[MAX_NMODES]);
for(idx_t m=0; m < nmodes; ++m) {
free(mats[m]); /* just the matrix_t ptr, data is safely in factored */
}
return SPLATT_SUCCESS;
}
void splatt_free_kruskal(
splatt_kruskal * factored)
{
free(factored->lambda);
for(idx_t m=0; m < factored->nmodes; ++m) {
free(factored->factors[m]);
}
}
/******************************************************************************
* PRIVATE FUNCTIONS
*****************************************************************************/
/**
* @brief Resets serial and MPI timers that were activated during some CPD
* pre-processing.
*
* @param rinfo MPI rank information.
*/
static void p_reset_cpd_timers(
rank_info const * const rinfo)
{
timer_reset(&timers[TIMER_ATA]);
#ifdef SPLATT_USE_MPI
timer_reset(&timers[TIMER_MPI]);
timer_reset(&timers[TIMER_MPI_IDLE]);
timer_reset(&timers[TIMER_MPI_COMM]);
timer_reset(&timers[TIMER_MPI_ATA]);
timer_reset(&timers[TIMER_MPI_REDUCE]);
timer_reset(&timers[TIMER_MPI_NORM]);
timer_reset(&timers[TIMER_MPI_UPDATE]);
timer_reset(&timers[TIMER_MPI_FIT]);
MPI_Barrier(rinfo->comm_3d);
#endif
}
/**
* @brief Find the Frobenius norm squared of a Kruskal tensor. This equivalent
* to via computing <X,X>, the inner product of X with itself. We find
* this via \lambda^T (AtA * BtB * ...) \lambda, where * is the Hadamard
* product.
*
* @param nmodes The number of modes in the tensor.
* @param lambda The vector of column norms.
* @param aTa An array of Gram Matrices (AtA, BtB, ...).
*
* @return The Frobenius norm of X, squared.
*/
static val_t p_kruskal_norm(
idx_t const nmodes,
val_t const * const restrict lambda,
matrix_t ** aTa)
{
idx_t const rank = aTa[0]->J;
val_t * const restrict av = aTa[MAX_NMODES]->vals;
val_t norm_mats = 0;
/* use aTa[MAX_NMODES] as scratch space */
for(idx_t i=0; i < rank; ++i) {
for(idx_t j=i; j < rank; ++j) {
av[j + (i*rank)] = 1.;
}
}
/* aTa[MAX_NMODES] = hada(aTa) */
for(idx_t m=0; m < nmodes; ++m) {
val_t const * const restrict atavals = aTa[m]->vals;
for(idx_t i=0; i < rank; ++i) {
for(idx_t j=i; j < rank; ++j) {
av[j + (i*rank)] *= atavals[j + (i*rank)];
}
}
}
/* now compute lambda^T * aTa[MAX_NMODES] * lambda */
for(idx_t i=0; i < rank; ++i) {
norm_mats += av[i+(i*rank)] * lambda[i] * lambda[i];
for(idx_t j=i+1; j < rank; ++j) {
norm_mats += av[j+(i*rank)] * lambda[i] * lambda[j] * 2;
}
}
return fabs(norm_mats);
}
/**
* @brief Compute the inner product of a Kruskal tensor and an unfactored
* tensor. Assumes that 'm1' contains the MTTKRP result along the last
* mode of the two input tensors. This naturally follows the end of a
* CPD iteration.
*
* @param nmodes The number of modes in the input tensors.
* @param rinfo MPI rank information.
* @param thds OpenMP thread data structures.
* @param lambda The vector of column norms.
* @param mats The Kruskal-tensor matrices.
* @param m1 The result of doing MTTKRP along the last mode.
*
* @return The inner product of the two tensors, computed via:
* 1^T hadamard(mats[nmodes-1], m1) \lambda.
*/
static val_t p_tt_kruskal_inner(
idx_t const nmodes,
rank_info * const rinfo,
thd_info * const thds,
val_t const * const restrict lambda,
matrix_t ** mats,
matrix_t const * const m1)
{
idx_t const rank = mats[0]->J;
idx_t const lastm = nmodes - 1;
idx_t const dim = m1->I;
val_t const * const m0 = mats[lastm]->vals;
val_t const * const mv = m1->vals;
val_t myinner = 0;
#pragma omp parallel reduction(+:myinner)
{
int const tid = splatt_omp_get_thread_num();
val_t * const restrict accumF = (val_t *) thds[tid].scratch[0];
for(idx_t r=0; r < rank; ++r) {
accumF[r] = 0.;
}
#pragma omp for
for(idx_t i=0; i < dim; ++i) {
for(idx_t r=0; r < rank; ++r) {
accumF[r] += m0[r+(i*rank)] * mv[r+(i*rank)];
}
}
/* accumulate everything into 'myinner' */
for(idx_t r=0; r < rank; ++r) {
myinner += accumF[r] * lambda[r];
}
}
val_t inner = 0.;
#ifdef SPLATT_USE_MPI
timer_start(&timers[TIMER_MPI_FIT]);
MPI_Allreduce(&myinner, &inner, 1, SPLATT_MPI_VAL, MPI_SUM, rinfo->comm_3d);
timer_stop(&timers[TIMER_MPI_FIT]);
#else
inner = myinner;
#endif
return inner;
}
/**
* @brief Compute the fit of a Kruskal tensor, Z, to an input tensor, X. This
* is computed via 1 - [sqrt(<X,X> + <Z,Z> - 2<X,Z>) / sqrt(<X,X>)].
*
* @param nmodes The number of modes in the input tensors.
* @param rinfo MPI rank information.
* @param thds OpenMP thread data structures.
* @param ttnormsq The norm (squared) of the original input tensor, <X,X>.
* @param lambda The vector of column norms.
* @param mats The Kruskal-tensor matrices.
* @param m1 The result of doing MTTKRP along the last mode.
* @param aTa An array of matrices (length MAX_NMODES)containing BtB, CtC, etc.
*
* @return The inner product of the two tensors, computed via:
* \lambda^T hadamard(mats[nmodes-1], m1) \lambda.
*/
static val_t p_calc_fit(
idx_t const nmodes,
rank_info * const rinfo,
thd_info * const thds,
val_t const ttnormsq,
val_t const * const restrict lambda,
matrix_t ** mats,
matrix_t const * const m1,
matrix_t ** aTa)
{
timer_start(&timers[TIMER_FIT]);
/* First get norm of new model: lambda^T * (hada aTa) * lambda. */
val_t const norm_mats = p_kruskal_norm(nmodes, lambda, aTa);
/* Compute inner product of tensor with new model */
val_t const inner = p_tt_kruskal_inner(nmodes, rinfo, thds, lambda, mats,m1);
/*
* We actually want sqrt(<X,X> + <Y,Y> - 2<X,Y>), but if the fit is perfect
* just make it 0.
*/
val_t residual = ttnormsq + norm_mats - (2 * inner);
if(residual > 0.) {
residual = sqrt(residual);
}
timer_stop(&timers[TIMER_FIT]);
return 1 - (residual / sqrt(ttnormsq));
}
/******************************************************************************
* PUBLIC FUNCTIONS
*****************************************************************************/
double cpd_als_iterate(
splatt_csf const * const tensors,
matrix_t ** mats,
val_t * const lambda,
idx_t const nfactors,
rank_info * const rinfo,
double const * const opts)
{
idx_t const nmodes = tensors[0].nmodes;
idx_t const nthreads = (idx_t) opts[SPLATT_OPTION_NTHREADS];
/* Setup thread structures. + 64 bytes is to avoid false sharing.
* TODO make this better */
splatt_omp_set_num_threads(nthreads);
thd_info * thds = thd_init(nthreads, 3,
(nmodes * nfactors * sizeof(val_t)) + 64,
0,
(nmodes * nfactors * sizeof(val_t)) + 64);
matrix_t * m1 = mats[MAX_NMODES];
/* Initialize first A^T * A mats. We redundantly do the first because it
* makes communication easier. */
matrix_t * aTa[MAX_NMODES+1];
for(idx_t m=0; m < nmodes; ++m) {
aTa[m] = mat_alloc(nfactors, nfactors);
memset(aTa[m]->vals, 0, nfactors * nfactors * sizeof(val_t));
mat_aTa(mats[m], aTa[m], rinfo, thds, nthreads);
}
/* used as buffer space */
aTa[MAX_NMODES] = mat_alloc(nfactors, nfactors);
/* mttkrp workspace */
splatt_mttkrp_ws * mttkrp_ws = splatt_mttkrp_alloc_ws(tensors,nfactors,opts);
/* Compute input tensor norm */
double oldfit = 0;
double fit = 0;
val_t ttnormsq = csf_frobsq(tensors);
/* setup timers */
p_reset_cpd_timers(rinfo);
sp_timer_t itertime;
sp_timer_t modetime[MAX_NMODES];
timer_start(&timers[TIMER_CPD]);
idx_t const niters = (idx_t) opts[SPLATT_OPTION_NITER];
for(idx_t it=0; it < niters; ++it) {
timer_fstart(&itertime);
for(idx_t m=0; m < nmodes; ++m) {
//timer_fstart(&modetime[m]);
mats[MAX_NMODES]->I = tensors[0].dims[m];
m1->I = mats[m]->I;
/* M1 = X * (C o B) */
timer_fstart(&modetime[m]);
timer_start(&timers[TIMER_MTTKRP]);
mttkrp_csf(tensors, mats, m, thds, mttkrp_ws, opts);
timer_stop(&timers[TIMER_MTTKRP]);
timer_stop(&modetime[m]);
#if 0
/* M2 = (CtC .* BtB .* ...)^-1 */
calc_gram_inv(m, nmodes, aTa);
/* A = M1 * M2 */
memset(mats[m]->vals, 0, mats[m]->I * nfactors * sizeof(val_t));
mat_matmul(m1, aTa[MAX_NMODES], mats[m]);
#else
par_memcpy(mats[m]->vals, m1->vals, m1->I * nfactors * sizeof(val_t));
mat_solve_normals(m, nmodes, aTa, mats[m],
opts[SPLATT_OPTION_REGULARIZE]);
#endif
/* normalize columns and extract lambda */
if(it == 0) {
mat_normalize(mats[m], lambda, MAT_NORM_2, rinfo, thds, nthreads);
} else {
mat_normalize(mats[m], lambda, MAT_NORM_MAX, rinfo, thds,nthreads);
}
/* update A^T*A */
mat_aTa(mats[m], aTa[m], rinfo, thds, nthreads);
//timer_stop(&modetime[m]);
} /* foreach mode */
fit = p_calc_fit(nmodes, rinfo, thds, ttnormsq, lambda, mats, m1, aTa);
timer_stop(&itertime);
if(rinfo->rank == 0 &&
opts[SPLATT_OPTION_VERBOSITY] > SPLATT_VERBOSITY_NONE) {
printf(" its = %3"SPLATT_PF_IDX" (%0.3fs) fit = %0.5f delta = %+0.4e\n",
it+1, itertime.seconds, fit, fit - oldfit);
if(opts[SPLATT_OPTION_VERBOSITY] > SPLATT_VERBOSITY_LOW) {
for(idx_t m=0; m < nmodes; ++m) {
printf(" mode = %1"SPLATT_PF_IDX" (%0.3fs)\n", m+1,
modetime[m].seconds);
}
}
}
if(fit == 1. ||
(it > 0 && fabs(fit - oldfit) < opts[SPLATT_OPTION_TOLERANCE])) {
break;
}
oldfit = fit;
}
timer_stop(&timers[TIMER_CPD]);
cpd_post_process(nfactors, nmodes, mats, lambda, thds, nthreads, rinfo);
/* CLEAN UP */
splatt_mttkrp_free_ws(mttkrp_ws);
for(idx_t m=0; m < nmodes; ++m) {
mat_free(aTa[m]);
}
mat_free(aTa[MAX_NMODES]);
thd_free(thds, nthreads);
return fit;
}
void cpd_post_process(
idx_t const nfactors,
idx_t const nmodes,
matrix_t ** mats,
val_t * const lambda,
thd_info * const thds,
idx_t const nthreads,
rank_info * const rinfo)
{
val_t * tmp = splatt_malloc(nfactors * sizeof(*tmp));
/* normalize each matrix and adjust lambda */
for(idx_t m=0; m < nmodes; ++m) {
mat_normalize(mats[m], tmp, MAT_NORM_2, rinfo, thds, nthreads);
for(idx_t f=0; f < nfactors; ++f) {
lambda[f] *= tmp[f];
}
}
free(tmp);
}
|
GB_binop__iseq_int8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__iseq_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__iseq_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__iseq_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__iseq_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_int8)
// A*D function (colscale): GB (_AxD__iseq_int8)
// D*A function (rowscale): GB (_DxB__iseq_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__iseq_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__iseq_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_int8)
// C=scalar+B GB (_bind1st__iseq_int8)
// C=scalar+B' GB (_bind1st_tran__iseq_int8)
// C=A+scalar GB (_bind2nd__iseq_int8)
// C=A'+scalar GB (_bind2nd_tran__iseq_int8)
// C type: int8_t
// A type: int8_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x == y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_INT8 || GxB_NO_ISEQ_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__iseq_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__iseq_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__iseq_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__iseq_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__iseq_int8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__iseq_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__iseq_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__iseq_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__iseq_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__iseq_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__iseq_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__iseq_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB (_bind1st_tran__iseq_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB (_bind2nd_tran__iseq_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
requantize_leakyrelu_pack8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void requantize_leakyrelu_pack8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& scale_in_data, const Mat& scale_out_data, const Mat& bias_data, float slope, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int size = w * h;
int scale_in_data_size = scale_in_data.w;
int scale_out_data_size = scale_out_data.w;
int bias_data_size = bias_data.w;
// int8(leakyrelu(v * scale_in, slope) * scale_out)
// int8_leakyrelu(v * (scale_in * scale_out), slope)
// int8(leakyrelu(v * scale_in + bias, slope) * scale_out)
// int8_leakyrelu(v * (scale_in * scale_out) + (bias * scale_out), slope)
if (bias_data_size == 0)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const int* intptr = bottom_blob.channel(q);
signed char* ptr = top_blob.channel(q);
float32x4_t _scale_in0 = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 8);
float32x4_t _scale_in1 = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 8 + 4);
float32x4_t _scale_out0 = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 8);
float32x4_t _scale_out1 = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 8 + 4);
float32x4_t _scale0 = vmulq_f32(_scale_in0, _scale_out0);
float32x4_t _scale1 = vmulq_f32(_scale_in1, _scale_out1);
float32x4_t _slope = vdupq_n_f32(slope);
int i = 0;
#if __aarch64__
for (; i + 3 < size; i += 4)
{
float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr));
float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr + 4));
float32x4_t _v2 = vcvtq_f32_s32(vld1q_s32(intptr + 8));
float32x4_t _v3 = vcvtq_f32_s32(vld1q_s32(intptr + 12));
float32x4_t _v4 = vcvtq_f32_s32(vld1q_s32(intptr + 16));
float32x4_t _v5 = vcvtq_f32_s32(vld1q_s32(intptr + 20));
float32x4_t _v6 = vcvtq_f32_s32(vld1q_s32(intptr + 24));
float32x4_t _v7 = vcvtq_f32_s32(vld1q_s32(intptr + 28));
_v0 = vmulq_f32(_v0, _scale0);
_v1 = vmulq_f32(_v1, _scale1);
_v2 = vmulq_f32(_v2, _scale0);
_v3 = vmulq_f32(_v3, _scale1);
_v4 = vmulq_f32(_v4, _scale0);
_v5 = vmulq_f32(_v5, _scale1);
_v6 = vmulq_f32(_v6, _scale0);
_v7 = vmulq_f32(_v7, _scale1);
vst1_s8(ptr, float2int8leakyrelu(_v0, _v1, _slope));
vst1_s8(ptr + 8, float2int8leakyrelu(_v2, _v3, _slope));
vst1_s8(ptr + 16, float2int8leakyrelu(_v4, _v5, _slope));
vst1_s8(ptr + 24, float2int8leakyrelu(_v6, _v7, _slope));
intptr += 32;
ptr += 32;
}
#endif // __aarch64__
for (; i + 1 < size; i += 2)
{
float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr));
float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr + 4));
float32x4_t _v2 = vcvtq_f32_s32(vld1q_s32(intptr + 8));
float32x4_t _v3 = vcvtq_f32_s32(vld1q_s32(intptr + 12));
_v0 = vmulq_f32(_v0, _scale0);
_v1 = vmulq_f32(_v1, _scale1);
_v2 = vmulq_f32(_v2, _scale0);
_v3 = vmulq_f32(_v3, _scale1);
vst1_s8(ptr, float2int8leakyrelu(_v0, _v1, _slope));
vst1_s8(ptr + 8, float2int8leakyrelu(_v2, _v3, _slope));
intptr += 16;
ptr += 16;
}
for (; i < size; i++)
{
float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr));
float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr + 4));
_v0 = vmulq_f32(_v0, _scale0);
_v1 = vmulq_f32(_v1, _scale1);
vst1_s8(ptr, float2int8leakyrelu(_v0, _v1, _slope));
intptr += 8;
ptr += 8;
}
}
}
else
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const int* intptr = bottom_blob.channel(q);
signed char* ptr = top_blob.channel(q);
float32x4_t _scale_in0 = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 8);
float32x4_t _scale_in1 = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 8 + 4);
float32x4_t _scale_out0 = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 8);
float32x4_t _scale_out1 = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 8 + 4);
float32x4_t _bias0 = bias_data_size == 1 ? vdupq_n_f32(bias_data[0]) : vld1q_f32((const float*)bias_data + q * 8);
float32x4_t _bias1 = bias_data_size == 1 ? vdupq_n_f32(bias_data[0]) : vld1q_f32((const float*)bias_data + q * 8 + 4);
float32x4_t _scale0 = vmulq_f32(_scale_in0, _scale_out0);
float32x4_t _scale1 = vmulq_f32(_scale_in1, _scale_out1);
_bias0 = vmulq_f32(_bias0, _scale_out0);
_bias1 = vmulq_f32(_bias1, _scale_out1);
float32x4_t _slope = vdupq_n_f32(slope);
int i = 0;
#if __aarch64__
for (; i + 3 < size; i += 4)
{
float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr));
float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr + 4));
float32x4_t _v2 = vcvtq_f32_s32(vld1q_s32(intptr + 8));
float32x4_t _v3 = vcvtq_f32_s32(vld1q_s32(intptr + 12));
float32x4_t _v4 = vcvtq_f32_s32(vld1q_s32(intptr + 16));
float32x4_t _v5 = vcvtq_f32_s32(vld1q_s32(intptr + 20));
float32x4_t _v6 = vcvtq_f32_s32(vld1q_s32(intptr + 24));
float32x4_t _v7 = vcvtq_f32_s32(vld1q_s32(intptr + 28));
_v0 = vfmaq_f32(_bias0, _v0, _scale0);
_v1 = vfmaq_f32(_bias1, _v1, _scale1);
_v2 = vfmaq_f32(_bias0, _v2, _scale0);
_v3 = vfmaq_f32(_bias1, _v3, _scale1);
_v4 = vfmaq_f32(_bias0, _v4, _scale0);
_v5 = vfmaq_f32(_bias1, _v5, _scale1);
_v6 = vfmaq_f32(_bias0, _v6, _scale0);
_v7 = vfmaq_f32(_bias1, _v7, _scale1);
vst1_s8(ptr, float2int8leakyrelu(_v0, _v1, _slope));
vst1_s8(ptr + 8, float2int8leakyrelu(_v2, _v3, _slope));
vst1_s8(ptr + 16, float2int8leakyrelu(_v4, _v5, _slope));
vst1_s8(ptr + 24, float2int8leakyrelu(_v6, _v7, _slope));
intptr += 32;
ptr += 32;
}
#endif // __aarch64__
for (; i + 1 < size; i += 2)
{
#if __aarch64__
float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr));
float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr + 4));
float32x4_t _v2 = vcvtq_f32_s32(vld1q_s32(intptr + 8));
float32x4_t _v3 = vcvtq_f32_s32(vld1q_s32(intptr + 12));
_v0 = vfmaq_f32(_bias0, _v0, _scale0);
_v1 = vfmaq_f32(_bias1, _v1, _scale1);
_v2 = vfmaq_f32(_bias0, _v2, _scale0);
_v3 = vfmaq_f32(_bias1, _v3, _scale1);
vst1_s8(ptr, float2int8leakyrelu(_v0, _v1, _slope));
vst1_s8(ptr + 8, float2int8leakyrelu(_v2, _v3, _slope));
intptr += 16;
ptr += 16;
#else // __aarch64__
asm volatile(
"pld [%0, #512] \n"
"vldm %0!, {d8-d15} \n"
"vmov q0, %q6 \n"
"vmov q1, %q7 \n"
"vmov q2, %q6 \n"
"vmov q3, %q7 \n"
"vcvt.f32.s32 q4, q4 \n"
"vcvt.f32.s32 q5, q5 \n"
"vcvt.f32.s32 q6, q6 \n"
"vcvt.f32.s32 q7, q7 \n"
"vmla.f32 q0, q4, %q4 \n"
"vmla.f32 q1, q5, %q5 \n"
"vmla.f32 q2, q6, %q4 \n"
"vmla.f32 q3, q7, %q5 \n"
"vmul.f32 q4, q0, %q8 \n"
"vmul.f32 q5, q1, %q8 \n"
"vmul.f32 q6, q2, %q8 \n"
"vmul.f32 q7, q3, %q8 \n"
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s4, s4 \n"
"vcvtr.s32.f32 s5, s5 \n"
"vcvtr.s32.f32 s6, s6 \n"
"vcvtr.s32.f32 s7, s7 \n"
"vcvtr.s32.f32 s8, s8 \n"
"vcvtr.s32.f32 s9, s9 \n"
"vcvtr.s32.f32 s10, s10 \n"
"vcvtr.s32.f32 s11, s11 \n"
"vcvtr.s32.f32 s12, s12 \n"
"vcvtr.s32.f32 s13, s13 \n"
"vcvtr.s32.f32 s14, s14 \n"
"vcvtr.s32.f32 s15, s15 \n"
"vcvtr.s32.f32 s16, s16 \n"
"vcvtr.s32.f32 s17, s17 \n"
"vcvtr.s32.f32 s18, s18 \n"
"vcvtr.s32.f32 s19, s19 \n"
"vcvtr.s32.f32 s20, s20 \n"
"vcvtr.s32.f32 s21, s21 \n"
"vcvtr.s32.f32 s22, s22 \n"
"vcvtr.s32.f32 s23, s23 \n"
"vcvtr.s32.f32 s24, s24 \n"
"vcvtr.s32.f32 s25, s25 \n"
"vcvtr.s32.f32 s26, s26 \n"
"vcvtr.s32.f32 s27, s27 \n"
"vcvtr.s32.f32 s28, s28 \n"
"vcvtr.s32.f32 s29, s29 \n"
"vcvtr.s32.f32 s30, s30 \n"
"vcvtr.s32.f32 s31, s31 \n"
"vqmovn.s32 d0, q0 \n"
"vqmovn.s32 d1, q1 \n"
"vqmovn.s32 d4, q2 \n"
"vqmovn.s32 d5, q3 \n"
"vqmovn.s32 d8, q4 \n"
"vqmovn.s32 d9, q5 \n"
"vqmovn.s32 d12, q6 \n"
"vqmovn.s32 d13, q7 \n"
"vqmovn.s16 d0, q0 \n"
"vqmovn.s16 d1, q2 \n"
"vqmovn.s16 d8, q4 \n"
"vqmovn.s16 d9, q6 \n"
"vmax.s8 q0, q0, q4 \n"
"vst1.s8 {d0-d1}, [%1 :128]! \n"
: "=r"(intptr),
"=r"(ptr)
: "0"(intptr),
"1"(ptr),
"w"(_scale0), // %4
"w"(_scale1), // %5
"w"(_bias0), // %6
"w"(_bias1), // %7
"w"(_slope) // %8
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7");
#endif // __aarch64__
}
for (; i < size; i++)
{
#if __aarch64__
float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr));
float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr + 4));
_v0 = vmlaq_f32(_bias0, _v0, _scale0);
_v1 = vmlaq_f32(_bias1, _v1, _scale1);
vst1_s8(ptr, float2int8leakyrelu(_v0, _v1, _slope));
intptr += 8;
ptr += 8;
#else // __aarch64__
asm volatile(
"pld [%0, #256] \n"
"vld1.s32 {d8-d11}, [%0 :128]! \n"
"vmov q0, %q6 \n"
"vmov q1, %q7 \n"
"vcvt.f32.s32 q4, q4 \n"
"vcvt.f32.s32 q5, q5 \n"
"vmla.f32 q0, q4, %q4 \n"
"vmla.f32 q1, q5, %q5 \n"
"vmul.f32 q2, q0, %q8 \n"
"vmul.f32 q3, q1, %q8 \n"
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s4, s4 \n"
"vcvtr.s32.f32 s5, s5 \n"
"vcvtr.s32.f32 s6, s6 \n"
"vcvtr.s32.f32 s7, s7 \n"
"vcvtr.s32.f32 s8, s8 \n"
"vcvtr.s32.f32 s9, s9 \n"
"vcvtr.s32.f32 s10, s10 \n"
"vcvtr.s32.f32 s11, s11 \n"
"vcvtr.s32.f32 s12, s12 \n"
"vcvtr.s32.f32 s13, s13 \n"
"vcvtr.s32.f32 s14, s14 \n"
"vcvtr.s32.f32 s15, s15 \n"
"vqmovn.s32 d8, q0 \n"
"vqmovn.s32 d9, q1 \n"
"vqmovn.s32 d10, q2 \n"
"vqmovn.s32 d11, q3 \n"
"vqmovn.s16 d8, q4 \n"
"vqmovn.s16 d10, q5 \n"
"vmax.s8 d8, d8, d10 \n"
"vst1.s8 {d8}, [%1 :64]! \n"
: "=r"(intptr),
"=r"(ptr)
: "0"(intptr),
"1"(ptr),
"w"(_scale0), // %4
"w"(_scale1), // %5
"w"(_bias0), // %6
"w"(_bias1), // %7
"w"(_slope) // %8
: "memory", "q0", "q1", "q2", "q3", "q4", "q5");
#endif // __aarch64__
}
}
}
}
|
a5.c | #define N 100000000
#define MAX 4
int a[N],b[N],ind[N];
long long s=0;
main()
{
int i;
/* inicialitzacio, no en paral.lel */
for(i=0;i<N;i++)
{
a[i]=1;
b[i]=2;
ind[i]=i%MAX;
}
#pragma omp parallel for schedule(static,1)
//#pragma omp parallel for
for (i=0;i<N;i++)
b[ind[i]] += a[i];
for (i=0;i<MAX;i++)
{
printf("Valor %d, de b %d \n",i,b[i]);
s+=b[i];
}
printf("Suma total de b: %ld\n",s);
}
|
DRB059-lastprivate-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Using lastprivate() to resolve an output dependence.
Semantics of lastprivate (x):
causes the corresponding original list item to be updated after the end of the region.
The compiler/runtime copies the local value back to the shared one within the last iteration.
*/
#include <stdio.h>
void foo()
{
int i,x;
#pragma omp parallel for private(i ) lastprivate(x )
for (i=0;i<100;i++)
x=i;
printf("x=%d",x);
}
int main()
{
foo();
return 0;
}
|
mkl_quantized_conv_ops.h | /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_KERNELS_MKL_QUANTIZED_CONV_OPS_H_
#define TENSORFLOW_CORE_KERNELS_MKL_QUANTIZED_CONV_OPS_H_
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/tensor.h"
#ifdef INTEL_MKL
namespace tensorflow {
template <class T>
float MklFloatForOneQuantizedLevel(float range_min, float range_max) {
int64 highest = static_cast<int64>(Eigen::NumTraits<T>::highest());
int64 lowest = static_cast<int64>(Eigen::NumTraits<T>::lowest());
// Adjusting for having a symmetric range.
// for example: for 8-bit [-127, 127] as opposed to [-128, 127].
if (lowest < -highest) ++lowest;
const float float_for_one_quantized_level =
(range_max - range_min) / (highest - lowest);
return float_for_one_quantized_level;
}
template <class T1, class T2, class T3>
void MklQuantizationRangeForMultiplication(float min_a, float max_a,
float min_b, float max_b,
float* min_c, float* max_c) {
const float a_float_for_one_quant_level =
MklFloatForOneQuantizedLevel<T1>(min_a, max_a);
const float b_float_for_one_quant_level =
MklFloatForOneQuantizedLevel<T2>(min_b, max_b);
const int64 c_highest = static_cast<int64>(Eigen::NumTraits<T3>::highest());
const int64 c_lowest = static_cast<int64>(Eigen::NumTraits<T3>::lowest());
const float c_float_for_one_quant_level =
a_float_for_one_quant_level * b_float_for_one_quant_level;
*min_c = c_float_for_one_quant_level * c_lowest;
*max_c = c_float_for_one_quant_level * c_highest;
}
template <class T1, class T2, class T3>
void MklQuantizationRangeForMultiplication(float min_a, float max_a,
const Tensor& min_b_vector,
const Tensor& max_b_vector,
Tensor** min_c_vector,
Tensor** max_c_vector) {
DCHECK(min_b_vector.NumElements() == (*min_c_vector)->NumElements());
DCHECK(max_b_vector.NumElements() == (*max_c_vector)->NumElements());
size_t n_channel = min_b_vector.NumElements();
const int64 c_highest = static_cast<int64>(Eigen::NumTraits<T3>::highest());
const int64 c_lowest = static_cast<int64>(Eigen::NumTraits<T3>::lowest());
const float* min_b = min_b_vector.flat<float>().data();
const float* max_b = max_b_vector.flat<float>().data();
float* min_c = (*min_c_vector)->flat<float>().data();
float* max_c = (*max_c_vector)->flat<float>().data();
#pragma omp parallel for
for (size_t n = 0; n < n_channel; ++n) {
float a_float_for_one_quant_level =
MklFloatForOneQuantizedLevel<T1>(min_a, max_a);
float b_float_for_one_quant_level =
MklFloatForOneQuantizedLevel<T2>(min_b[n], max_b[n]);
float c_float_for_one_quant_level =
a_float_for_one_quant_level * b_float_for_one_quant_level;
min_c[n] = c_float_for_one_quant_level * c_lowest;
max_c[n] = c_float_for_one_quant_level * c_highest;
}
}
} // namespace tensorflow
#endif // INTEL_MKL
#endif // TENSORFLOW_CORE_KERNELS_MKL_QUANTIZED_CONV_OPS_H_
|
GB_assign_zombie3.c | //------------------------------------------------------------------------------
// GB_assign_zombie3: delete entries in C(:,j) for C_replace_phase
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// For GrB_Row_assign or GrB_Col_assign, C(I,j)<#M,repl>=any must delete all
// entries C(i,j) outside of C(I,j), if the mask M(i,0) (or its complement) is
// zero. This step is not done for GxB_*_subassign, since that method does not
// modify anything outside IxJ.
// GB_assign_zombie3 and GB_assign_zombie4 are transposes of each other.
// C must be sparse or hypersparse.
// M can have any sparsity structure: hypersparse, sparse, bitmap, or full
#include "GB_assign.h"
#include "GB_assign_zombie.h"
#include "GB_subassign_methods.h"
void GB_assign_zombie3
(
GrB_Matrix C, // the matrix C, or a copy
const GrB_Matrix M,
const bool Mask_comp,
const bool Mask_struct,
const int64_t j, // vector index with entries to delete
const GrB_Index *I,
const int64_t nI,
const int Ikind,
const int64_t Icolon [3],
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (!GB_IS_FULL (C)) ;
ASSERT (!GB_IS_BITMAP (C)) ;
ASSERT (GB_ZOMBIES_OK (C)) ;
ASSERT (GB_JUMBLED_OK (C)) ;
ASSERT (!GB_PENDING (C)) ;
ASSERT (!GB_ZOMBIES (M)) ;
ASSERT (!GB_JUMBLED (M)) ; // binary search on M
ASSERT (!GB_PENDING (M)) ;
ASSERT (!GB_aliased (C, M)) ; // NO ALIAS of C==M
//--------------------------------------------------------------------------
// get C (:,j)
//--------------------------------------------------------------------------
const int64_t *GB_RESTRICT Ch = C->h ;
const int64_t *GB_RESTRICT Cp = C->p ;
int64_t *GB_RESTRICT Ci = C->i ;
int64_t pC_start, pC_end, pleft = 0, pright = C->nvec-1 ;
GB_lookup (C->h != NULL, Ch, Cp, C->vlen, &pleft, pright, j,
&pC_start, &pC_end) ;
int64_t nzombies = C->nzombies ;
const int64_t zjnz = pC_end - pC_start ;
//--------------------------------------------------------------------------
// get M(:,0)
//--------------------------------------------------------------------------
const int64_t *GB_RESTRICT Mp = M->p ;
const int8_t *GB_RESTRICT Mb = M->b ;
const int64_t *GB_RESTRICT Mi = M->i ;
const GB_void *GB_RESTRICT Mx = (GB_void *) (Mask_struct ? NULL : (M->x)) ;
const size_t msize = M->type->size ;
const int64_t Mvlen = M->vlen ;
int64_t pM_start = 0 ; // Mp [0]
int64_t pM_end = GBP (Mp, 1, Mvlen) ;
const bool M_is_bitmap = GB_IS_BITMAP (M) ;
const bool mjdense = (pM_end - pM_start) == Mvlen ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (zjnz, chunk, nthreads_max) ;
int ntasks = (nthreads == 1) ? 1 : (64 * nthreads) ;
//--------------------------------------------------------------------------
// delete entries from C(:,j) that are outside I, if the mask M allows it
//--------------------------------------------------------------------------
int taskid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
int64_t p1, p2 ;
GB_PARTITION (p1, p2, zjnz, taskid, ntasks) ;
for (int64_t pC = pC_start + p1 ; pC < pC_start + p2 ; pC++)
{
//------------------------------------------------------------------
// get C(i,j)
//------------------------------------------------------------------
int64_t i = Ci [pC] ;
if (!GB_IS_ZOMBIE (i))
{
//--------------------------------------------------------------
// C(i,j) is outside C(I,j) if i is not in the list I
//--------------------------------------------------------------
bool i_outside = !GB_ij_is_in_list (I, nI, i, Ikind, Icolon) ;
if (i_outside)
{
//----------------------------------------------------------
// C(i,j) is a live entry not in the C(I,J) submatrix
//----------------------------------------------------------
// Check the mask M to see if it should be deleted.
GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (i) ;
if (Mask_comp)
{
// negate the mask if Mask_comp is true
mij = !mij ;
}
if (!mij)
{
// delete C(i,j) by marking it as a zombie
nzombies++ ;
Ci [pC] = GB_FLIP (i) ;
}
}
}
}
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
C->nzombies = nzombies ;
}
|
matrix.c |
void normalize_evectors(double complex *evecs);
void reigenvalues(double complex *A, double complex *Q, double complex *evals, double complex *evecs, int nA, int generalized)
{
/* Computes the eigenvalues and right eigenvectors of the complex matrix A which is NxN.
A . evecs = evals Q . evecs
This is essentially a wrapper for the ZGEEV LAPACK routine.
INPUTS:
The matrices M, Q, and evecs and the vector evals which are all overwritten
OUTPUTS:
The eigenvalues are stored in the evals array.
The eigenvectors are stored in the ROWS of the evecs matrix
*/
int i,j;
char JOBVL = 'N';
char JOBVR = 'V';
int INFO;
int LDA = nA;
int LDB = nA;
int LDVL = nA;
int LDVR = nA;
int LWORK = 2*nA;
double *RWORK = (double *)malloc(sizeof(double)*8*nA);
double complex *CWORK = (double complex *)malloc(sizeof(double complex)*2*nA);
double complex *tA = (double complex *)malloc(sizeof(double complex)*nA*nA);
double complex *tQ = (double complex *)malloc(sizeof(double complex)*nA*nA);
double complex *evals_alpha = (double complex *)malloc(sizeof(double complex)*nA);
double complex *evals_beta = (double complex *)malloc(sizeof(double complex)*nA);
for(i=0;i<nA;i++) {
for(j=0;j<nA;j++) {
tA[i+nA*j] = A[j+nA*i];
tQ[i+nA*j] = Q[j+nA*i];
}
}
if (generalized) {
zggev_( &JOBVL, &JOBVR, &nA, tA, &LDA, tQ, &LDB, evals_alpha,evals_beta, NULL, &LDVL, evecs, &LDVR, CWORK, &LWORK, RWORK, &INFO );
for(i=0;i<nA;i++) {
if (cabs(evals_beta[i]) != 0) {
evals[i] = evals_alpha[i]/evals_beta[i];
}
}
}
else {
zgeev_( &JOBVL, &JOBVR, &nA, tA, &LDA, evals, tQ, &LDVL, evecs, &LDVR, CWORK, &LWORK, RWORK, &INFO );
}
// normalize_evectors(evecs);
SAFE_FREE(tA); SAFE_FREE(tQ);
SAFE_FREE(RWORK); SAFE_FREE(CWORK);
SAFE_FREE(evals_alpha);
SAFE_FREE(evals_beta);
return;
}
void solve(double *A, double *B,int nA) {
int N = nA;
int NRHS = 1;
int LDA = nA;
int *IPIV = (int *)malloc(sizeof(int)*nA);
int LDB = NA;
int INFO;
dgesv_(&nA, &NRHS, A, &LDA, IPIV, B, &LDB, &INFO);
return;
}
void matmat(double *A, double *B, double *C,
double alpha, double beta, int nA)
{
/* Performs \alpha * A.B + \beta * C and stores the output in C.
A,B, and C are all matrices.
This is essenitally a wrapper for the ZGEMM BLAS routine
*/
int i,j;
char TRANSA = 't';
char TRANSB = 't';
int m = nA;
int n = nA;
int k = nA;
int LDA = nA;
int LDB = nA;
int LDC = nA;
for(i=0;i<nA;i++) {
for(j=0;j<nA;j++) work[i+N*j] = C[j + nA*i];
}
for(i=0;i<nA;i++) {
for(j=0;j<nA;j++) C[i + nA*j] = work[i+N*j];
}
dgemm_(&TRANSA, &TRANSB, &m,&n,&k,&alpha,A,&LDA,B,&LDB,&beta,C,&LDC);
for(i=0;i<nA;i++) {
for(j=0;j<nA;j++) work[i+N*j] = C[j + nA*i];
}
for(i=0;i<nA;i++) {
for(j=0;j<nA;j++) C[i + nA*j] = work[i+N*j];
}
return;
}
void matmat_simple(double *A, double *B, double *C, double alpha, double beta,int n) {
int i,j;
for(i=0;i<n;i++) {
for(j=0;j<n;j++) {
res = 0;
for(k=0;k<n;k++) {
res += a[k+ i*n]*b[j + k*n];
}
c[j + i*n] = alpha * res + beta * c[j+i*n];
}
}
return;
}
void cmatmat(double complex *A, double complex *B, double complex *C,
double complex alpha, double complex beta, int nA)
{
/* Performs \alpha * A.B + \beta * C and stores the output in C.
A,B, and C are all matrices.
This is essenitally a wrapper for the ZGEMM BLAS routine
*/
int i,j;
char TRANSA = 't';
char TRANSB = 't';
int m = nA;
int n = nA;
int k = nA;
int LDA = nA;
int LDB = nA;
int LDC = nA;
for(i=0;i<nA;i++) {
for(j=0;j<nA;j++) cwork[i+N*j] = C[j + nA*i];
}
for(i=0;i<nA;i++) {
for(j=0;j<nA;j++) C[i + nA*j] = cwork[i+N*j];
}
zgemm_(&TRANSA, &TRANSB, &m,&n,&k,&alpha,A,&LDA,B,&LDB,&beta,C,&LDC);
for(i=0;i<nA;i++) {
for(j=0;j<nA;j++) cwork[i+N*j] = C[j + nA*i];
}
for(i=0;i<nA;i++) {
for(j=0;j<nA;j++) C[i + nA*j] = cwork[i+N*j];
}
return;
}
void matvec(double *A, double*B, double *C,
double alpha, double beta, int nB)
{
/* Performs \alpha * A.B + \beta * C and stores the output in C.
A is a matrix, B and C are vectors.
This is essenitally a wrapper for the ZGEMV BLAS routine
*/
char TRANS = 't';
int m = nB;
int n = nB;
int LDA = nB;
int INCX = 1;
int INCY = 1;
dgemv_(&TRANS, &m,&n,&alpha,A,&LDA,B,&INCX,&beta,C,&INCY);
return;
}
void solve(double *A, double *B,int nA) {
int N = nA;
int NRHS = 1;
int LDA = nA;
int *IPIV = (int *)malloc(sizeof(int)*nA);
int LDB = nA;
int INFO;
double *AT = (double *)malloc(sizeof(double)*nA*nA);
int i,j;
for(i=0;i<nA;i++) {
for(j=0;j<nA;j++) {
AT[i + nA*j] = A[j + nA*i];
}
}
dgesv_(&nA, &NRHS, AT, &LDA, IPIV, B, &LDB, &INFO);
SAFE_FREE(AT);
return;
}
void csolve(double complex *A, double complex *B,int nA) {
int N = nA;
int NRHS = 1;
int LDA = nA;
int *IPIV = (int *)malloc(sizeof(int)*nA);
int LDB = nA;
int INFO;
double complex *AT = (double complex *)malloc(sizeof(double complex)*nA*nA);
int i,j;
for(i=0;i<nA;i++) {
for(j=0;j<nA;j++) {
AT[i + nA*j] = A[j + nA*i];
}
}
zgesv_(&nA, &NRHS, AT, &LDA, IPIV, B, &LDB, &INFO);
SAFE_FREE(AT);
return;
}
void cmatvec(double complex *A, double complex *B, double complex *C,
double complex alpha, double complex beta, int nB)
{
/* Performs \alpha * A.B + \beta * C and stores the output in C.
A is a matrix, B and C are vectors.
This is essenitally a wrapper for the ZGEMV BLAS routine
*/
char TRANS = 't';
int m = nB;
int n = nB;
int LDA = nB;
int INCX = 1;
int INCY = 1;
zgemv_(&TRANS, &m,&n,&alpha,A,&LDA,B,&INCX,&beta,C,&INCY);
return;
}
//
// void normalize_evectors(double complex *evecs) {
// /* Normalize the eigenvectors */
// /* Calculate the factor to normalize the disk eccentricity.
// Each planet eccentricity will then be normalized by the same factor.
// */
// int i,j,indx;
// double norm;
//
//
// #ifdef OPENMP
// #pragma omp parallel private(i,j,norm,indx) shared(evecs,nrows,ncols)
// #pragma omp for schedule(static)
// #endif
// for(i=0;i<nrows;i++) {
//
//
// norm = 0;
// #ifdef NORMALIZE_INT
// for(j=0;j<N;j++) {
// indx = j + ncols*i;
//
// norm += weights[j]*conj(evecs[indx])*evecs[indx];
//
// }
// norm = sqrt(norm);
// #else
// #ifdef NORMALIZE_MAX
// for(j=0;j<N;j++) {
// indx = j+ncols*i;
// // printf("%lg\t%lg\t%lg",norm,abs(evecs[indx]),fmax(norm,abs(evecs[indx])));
// norm = fmax(norm,abs(evecs[indx]));
// }
// if (norm == 0) norm = 1;
//
//
// #endif
// #endif
// for(j=0;j<ncols;j++) {
// indx = j + ncols*i;
// evecs[indx] /= norm;
// }
// }
//
// return;
//
//
// }
void normalize_evectors(double complex *evecs) {
/* Normalize the eigenvectors */
/* Calculate the factor to normalize the disk eccentricity.
Each planet eccentricity will then be normalized by the same factor.
*/
int i,j,indx;
double norm;
#ifdef OPENMP
#pragma omp parallel private(i,j,norm,indx) shared(evecs,nrows,ncols)
#pragma omp for schedule(static)
#endif
for(i=0;i<nrows;i++) {
norm = 0;
#ifdef NORMALIZE_NORM
for(j=0;j<N;j++) {
indx = j +ncols*i;
norm += conj(evecs[indx])*evecs[indx];
}
norm = sqrt(norm);
#else
#ifdef NORMALIZE_INT
for(j=0;j<N;j++) {
indx = j + ncols*i;
norm += weights[j]*conj(evecs[indx])*evecs[indx];
}
norm = sqrt(norm);
#else
#ifdef NORMALIZE_MAX
for(j=0;j<N;j++) {
indx = j+ncols*i;
// printf("%lg\t%lg\t%lg",norm,abs(evecs[indx]),fmax(norm,abs(evecs[indx])));
norm = fmax(norm,abs(evecs[indx]));
}
#endif
#endif
#endif
if (norm == 0) norm = 1;
for(j=0;j<ncols;j++) {
indx = j + ncols*i;
evecs[indx] /= norm;
}
}
return;
}
void matmat3(double *A, double *B, double *C, double alpha, double beta) {
C[0] = beta*C[0] + alpha*(A[0]*B[0] + A[1]*B[3] + A[2]*B[6]);
C[1] = beta*C[1] + alpha*(A[0]*B[1] + A[1]*B[4] + A[2]*B[7]);
C[2] = beta*C[2] + alpha*(A[0]*B[2] + A[1]*B[5] + A[2]*B[8]);
C[3] = beta*C[3] + alpha*(A[3]*B[0] + A[4]*B[3] + A[5]*B[6]);
C[4] = beta*C[4] + alpha*(A[3]*B[1] + A[4]*B[4] + A[5]*B[7]);
C[5] = beta*C[5] + alpha*(A[3]*B[2] + A[4]*B[5] + A[5]*B[8]);
C[6] = beta*C[6] + alpha*(A[6]*B[0] + A[7]*B[3] + A[8]*B[6]);
C[7] = beta*C[7] + alpha*(A[6]*B[1] + A[7]*B[4] + A[8]*B[7]);
C[8] = beta*C[8] + alpha*(A[6]*B[2] + A[7]*B[5] + A[8]*B[8]);
return;
}
void matvec3(double *A, double *B, double *C, double alpha, double beta) {
C[0] = beta*C[0] + alpha*(A[0]*B[0] + A[1]*B[1] + A[2]*B[2]);
C[1] = beta*C[1] + alpha*(A[3]*B[0] + A[4]*B[1] + A[5]*B[2]);
C[2] = beta*C[2] + alpha*(A[6]*B[0] + A[7]*B[1] + A[8]*B[2]);
return;
}
void matvec4(double *A, double *B, double *C, double alpha, double beta) {
C[0] = beta*C[0] + alpha*(A[0]*B[0] + A[1]*B[1] + A[2]*B[2] + A[3]*B[3]);
C[1] = beta*C[1] + alpha*(A[4]*B[0] + A[5]*B[1] + A[6]*B[2] + A[7]*B[3]);
C[2] = beta*C[2] + alpha*(A[8]*B[0] + A[9]*B[1] + A[10]*B[2] + A[11]*B[3]);
C[3] = beta*C[3] + alpha*(A[12]*B[0] + A[13]*B[1] + A[14]*B[2] + A[15]*B[3]);
return;
}
void matmat4(double *A, double *B, double *C, double alpha, double beta) {
C[0] = beta*C[0] + alpha*(A[0]*B[0] + A[1]*B[4] + A[2]*B[8] + A[3]*B[12]);
C[1] = beta*C[1] + alpha*(A[0]*B[1] + A[1]*B[5] + A[2]*B[9] + A[3]*B[13]);
C[2] = beta*C[2] + alpha*(A[0]*B[2] + A[1]*B[6] + A[2]*B[10] + A[3]*B[14]);
C[3] = beta*C[3] + alpha*(A[0]*B[3] + A[1]*B[7] + A[2]*B[11] + A[3]*B[15]);
C[4] = beta*C[4] + alpha*(A[4]*B[0] + A[5]*B[4] + A[6]*B[8] + A[7]*B[12]);
C[5] = beta*C[5] + alpha*(A[4]*B[1] + A[5]*B[5] + A[6]*B[9] + A[7]*B[13]);
C[6] = beta*C[6] + alpha*(A[4]*B[2] + A[5]*B[6] + A[6]*B[10] + A[7]*B[14]);
C[7] = beta*C[7] + alpha*(A[4]*B[3] + A[5]*B[7] + A[6]*B[11] + A[7]*B[15]);
C[8] = beta*C[8] + alpha*(A[8]*B[0] + A[9]*B[4] + A[10]*B[8] + A[11]*B[12]);
C[9] = beta*C[9] + alpha*(A[8]*B[1] + A[9]*B[5] + A[10]*B[9] + A[11]*B[13]);
C[10] = beta*C[10] + alpha*(A[8]*B[2] + A[9]*B[6] + A[10]*B[10] + A[11]*B[14]);
C[11] = beta*C[11] + alpha*(A[8]*B[3] + A[9]*B[7] + A[10]*B[11] + A[11]*B[15]);
C[12] = beta*C[12] + alpha*(A[12]*B[0] + A[13]*B[4] + A[14]*B[8] + A[15]*B[12]);
C[13] = beta*C[13] + alpha*(A[12]*B[1] + A[13]*B[5] + A[14]*B[9] + A[15]*B[13]);
C[14] = beta*C[14] + alpha*(A[12]*B[2] + A[13]*B[6] + A[14]*B[10] + A[15]*B[14]);
C[15] = beta*C[15] + alpha*(A[12]*B[3] + A[13]*B[7] + A[14]*B[11] + A[15]*B[15]);
return;
}
void cmatmat3(double complex *A, double complex *B, double complex *C, double complex alpha, double complex beta) {
C[0] = beta*C[0] + alpha*(A[0]*B[0] + A[1]*B[3] + A[2]*B[6]);
C[1] = beta*C[1] + alpha*(A[0]*B[1] + A[1]*B[4] + A[2]*B[7]);
C[2] = beta*C[2] + alpha*(A[0]*B[2] + A[1]*B[5] + A[2]*B[8]);
C[3] = beta*C[3] + alpha*(A[3]*B[0] + A[4]*B[3] + A[5]*B[6]);
C[4] = beta*C[4] + alpha*(A[3]*B[1] + A[4]*B[4] + A[5]*B[7]);
C[5] = beta*C[5] + alpha*(A[3]*B[2] + A[4]*B[5] + A[5]*B[8]);
C[6] = beta*C[6] + alpha*(A[6]*B[0] + A[7]*B[3] + A[8]*B[6]);
C[7] = beta*C[7] + alpha*(A[6]*B[1] + A[7]*B[4] + A[8]*B[7]);
C[8] = beta*C[8] + alpha*(A[6]*B[2] + A[7]*B[5] + A[8]*B[8]);
return;
}
void cmatvec3(double complex *A, double complex *B, double complex *C, double complex alpha, double complex beta) {
C[0] = beta*C[0] + alpha*(A[0]*B[0] + A[1]*B[1] + A[2]*B[2]);
C[1] = beta*C[1] + alpha*(A[3]*B[0] + A[4]*B[1] + A[5]*B[2]);
C[2] = beta*C[2] + alpha*(A[6]*B[0] + A[7]*B[1] + A[8]*B[2]);
return;
}
void cmatvec4(double complex *A, double complex *B, double complex *C, double complex alpha, double complex beta) {
C[0] = beta*C[0] + alpha*(A[0]*B[0] + A[1]*B[1] + A[2]*B[2] + A[3]*B[3]);
C[1] = beta*C[1] + alpha*(A[4]*B[0] + A[5]*B[1] + A[6]*B[2] + A[7]*B[3]);
C[2] = beta*C[2] + alpha*(A[8]*B[0] + A[9]*B[1] + A[10]*B[2] + A[11]*B[3]);
C[3] = beta*C[3] + alpha*(A[12]*B[0] + A[13]*B[1] + A[14]*B[2] + A[15]*B[3]);
return;
}
void cmatmat4(double complex *A, double complex *B, double complex *C, double complex alpha, double complex beta) {
C[0] = beta*C[0] + alpha*(A[0]*B[0] + A[1]*B[4] + A[2]*B[8] + A[3]*B[12]);
C[1] = beta*C[1] + alpha*(A[0]*B[1] + A[1]*B[5] + A[2]*B[9] + A[3]*B[13]);
C[2] = beta*C[2] + alpha*(A[0]*B[2] + A[1]*B[6] + A[2]*B[10] + A[3]*B[14]);
C[3] = beta*C[3] + alpha*(A[0]*B[3] + A[1]*B[7] + A[2]*B[11] + A[3]*B[15]);
C[4] = beta*C[4] + alpha*(A[4]*B[0] + A[5]*B[4] + A[6]*B[8] + A[7]*B[12]);
C[5] = beta*C[5] + alpha*(A[4]*B[1] + A[5]*B[5] + A[6]*B[9] + A[7]*B[13]);
C[6] = beta*C[6] + alpha*(A[4]*B[2] + A[5]*B[6] + A[6]*B[10] + A[7]*B[14]);
C[7] = beta*C[7] + alpha*(A[4]*B[3] + A[5]*B[7] + A[6]*B[11] + A[7]*B[15]);
C[8] = beta*C[8] + alpha*(A[8]*B[0] + A[9]*B[4] + A[10]*B[8] + A[11]*B[12]);
C[9] = beta*C[9] + alpha*(A[8]*B[1] + A[9]*B[5] + A[10]*B[9] + A[11]*B[13]);
C[10] = beta*C[10] + alpha*(A[8]*B[2] + A[9]*B[6] + A[10]*B[10] + A[11]*B[14]);
C[11] = beta*C[11] + alpha*(A[8]*B[3] + A[9]*B[7] + A[10]*B[11] + A[11]*B[15]);
C[12] = beta*C[12] + alpha*(A[12]*B[0] + A[13]*B[4] + A[14]*B[8] + A[15]*B[12]);
C[13] = beta*C[13] + alpha*(A[12]*B[1] + A[13]*B[5] + A[14]*B[9] + A[15]*B[13]);
C[14] = beta*C[14] + alpha*(A[12]*B[2] + A[13]*B[6] + A[14]*B[10] + A[15]*B[14]);
C[15] = beta*C[15] + alpha*(A[12]*B[3] + A[13]*B[7] + A[14]*B[11] + A[15]*B[15]);
return;
}
|
GB_binop__isne_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__isne_fp64
// A.*B function (eWiseMult): GB_AemultB__isne_fp64
// A*D function (colscale): GB_AxD__isne_fp64
// D*A function (rowscale): GB_DxB__isne_fp64
// C+=B function (dense accum): GB_Cdense_accumB__isne_fp64
// C+=b function (dense accum): GB_Cdense_accumb__isne_fp64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isne_fp64
// C=scalar+B GB_bind1st__isne_fp64
// C=scalar+B' GB_bind1st_tran__isne_fp64
// C=A+scalar GB_bind2nd__isne_fp64
// C=A'+scalar GB_bind2nd_tran__isne_fp64
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x != y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISNE || GxB_NO_FP64 || GxB_NO_ISNE_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__isne_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__isne_fp64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__isne_fp64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__isne_fp64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *GB_RESTRICT Cx = (double *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__isne_fp64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *GB_RESTRICT Cx = (double *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__isne_fp64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__isne_fp64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__isne_fp64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = Bx [p] ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__isne_fp64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = Ax [p] ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB_bind1st_tran__isne_fp64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB_bind2nd_tran__isne_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
OpenMP.c | /*
OpenMP.c
This file contains a parallel implementation of the imperative bitonic sort,
using OpenMP for parallel loops.
*/
/*
------- ----------------------
Brouzos Rafael rnm1816@gmail.com www.github.com/bronzeRaf
-----------------------------
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <omp.h>
//global variables
struct timeval startwtime, endwtime;
double seq_time;
int *a;
int N;
int NUM_THREADS;
//declare functions
void print(void);
void test(void);
inline void exchange(int i, int j);
void PimpBitonicSort(void);
void init(void);
void impBitonicSort(void);
int desc( const void *a, const void *b );
int asc( const void *a, const void *b );
int main(int argc, char **argv){
int i; //index
//check arguments
if (argc != 3) {
printf("Usage: %s q\n where n=2^q is problem size (power of two)\n",
argv[0]);
exit(1);
}
//N=2^q
N = 1<<atoi(argv[1]);
//NUM_THREADS=2^p
NUM_THREADS = 1<<atoi(argv[2]);
//memory allocation
a = (int *) malloc(N * sizeof(int));
//fire up the parallel algorithm
init();
gettimeofday (&startwtime, NULL);
PimpBitonicSort();
gettimeofday (&endwtime, NULL);
seq_time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6
+ endwtime.tv_sec - startwtime.tv_sec);
printf("Parallel (openmp) Imperative wall clock time = %f\n", seq_time);
test();
//fire up the serial algorithm
init();
gettimeofday (&startwtime, NULL);
impBitonicSort();
gettimeofday (&endwtime, NULL);
seq_time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6
+ endwtime.tv_sec - startwtime.tv_sec);
printf("Imperative wall clock time = %f\n", seq_time);
test();
init();
gettimeofday (&startwtime, NULL);
qsort(a,N,sizeof(int),asc);
gettimeofday (&endwtime, NULL);
seq_time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6
+ endwtime.tv_sec - startwtime.tv_sec);
printf("Quick sort wall clock time = %f\n", seq_time);
test();
free(a);
}
/**
parallel imperative version of bitonic sort with OMP
source: https://github.com/Theodosis/parallel-programming/blob/master/pthreads/bitonic/pbitonic.c
**/
void PimpBitonicSort() {
omp_set_num_threads( NUM_THREADS );
int i,j,k=0;
for (k = 2; k <= N; k *= 2 ) {
for (j=k>>1; j>0; j=j>>1) {
#pragma omp parallel for
for (i=0; i<N; i++) {
int ij=i^j;
if ((ij)>i) {
if ((i&k)==0 && a[i] > a[ij]) {
exchange(i,ij);
}
if ((i&k)!=0 && a[i] < a[ij]){
exchange(i,ij);
}
}
}
}
}
}
/**
imperative version of bitonic sort
**/
void impBitonicSort() {
int i,j,k;
for (k=2; k<=N; k=2*k) {
for (j=k>>1; j>0; j=j>>1) {
for (i=0; i<N; i++) {
int ij=i^j;
if ((ij)>i) {
if ((i&k)==0 && a[i] > a[ij])
exchange(i,ij);
if ((i&k)!=0 && a[i] < a[ij])
exchange(i,ij);
}
}
}
}
}
/** -------------- SUB-PROCEDURES ----------------- **/
/** procedure init() : initialize array "a" with data **/
void init() {
int i;
for (i = 0; i < N; i++) {
a[i] = rand() % N; // (N - i);
}
}
/** INLINE procedure exchange() : pair swap **/
inline void exchange(int i, int j) {
int t;
t = a[i];
a[i] = a[j];
a[j] = t;
}
/** procedure print() : print array elements **/
void print() {
int i;
for (i = 0; i < N; i++) {
printf("%d\n", a[i]);
}
printf("\n");
}
/** procedure test() : verify sort results **/
void test() {
int pass = 1;
int i;
for (i = 1; i < N; i++) {
pass &= (a[i-1] <= a[i]);
}
printf(" TEST %s\n",(pass) ? "PASSed" : "FAILed");
}
/**
descending compare function for qsort
source: https://github.com/Theodosis/parallel-programming/blob/master/pthreads/bitonic/pbitonic.c
**/
int desc( const void *a, const void *b ){
int* arg1 = (int *)a;
int* arg2 = (int *)b;
if( *arg1 > *arg2 ) return -1;
else if( *arg1 == *arg2 ) return 0;
return 1;
}
/**
ascending compare function for qsort
source: https://github.com/Theodosis/parallel-programming/blob/master/pthreads/bitonic/pbitonic.c
**/
int asc( const void *a, const void *b ){
int* arg1 = (int *)a;
int* arg2 = (int *)b;
if( *arg1 < *arg2 ) return -1;
else if( *arg1 == *arg2 ) return 0;
return 1;
}
|
hyantes_para.c | /* Generated by Cython 0.15.1 on Mon Feb 11 15:34:12 2013 */
#define PY_SSIZE_T_CLEAN
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#else
#include <stddef.h> /* For offsetof */
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#if PY_VERSION_HEX < 0x02040000
#define METH_COEXIST 0
#define PyDict_CheckExact(op) (Py_TYPE(op) == &PyDict_Type)
#define PyDict_Contains(d,o) PySequence_Contains(d,o)
#endif
#if PY_VERSION_HEX < 0x02050000
typedef int Py_ssize_t;
#define PY_SSIZE_T_MAX INT_MAX
#define PY_SSIZE_T_MIN INT_MIN
#define PY_FORMAT_SIZE_T ""
#define PyInt_FromSsize_t(z) PyInt_FromLong(z)
#define PyInt_AsSsize_t(o) __Pyx_PyInt_AsInt(o)
#define PyNumber_Index(o) PyNumber_Int(o)
#define PyIndex_Check(o) PyNumber_Check(o)
#define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message)
#endif
#if PY_VERSION_HEX < 0x02060000
#define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt)
#define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)
#define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size)
#define PyVarObject_HEAD_INIT(type, size) \
PyObject_HEAD_INIT(type) size,
#define PyType_Modified(t)
typedef struct {
void *buf;
PyObject *obj;
Py_ssize_t len;
Py_ssize_t itemsize;
int readonly;
int ndim;
char *format;
Py_ssize_t *shape;
Py_ssize_t *strides;
Py_ssize_t *suboffsets;
void *internal;
} Py_buffer;
#define PyBUF_SIMPLE 0
#define PyBUF_WRITABLE 0x0001
#define PyBUF_FORMAT 0x0004
#define PyBUF_ND 0x0008
#define PyBUF_STRIDES (0x0010 | PyBUF_ND)
#define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES)
#define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES)
#define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES)
#define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES)
#endif
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#endif
#if PY_MAJOR_VERSION >= 3
#define Py_TPFLAGS_CHECKTYPES 0
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3)
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#endif
#if PY_VERSION_HEX < 0x02060000
#define PyBytesObject PyStringObject
#define PyBytes_Type PyString_Type
#define PyBytes_Check PyString_Check
#define PyBytes_CheckExact PyString_CheckExact
#define PyBytes_FromString PyString_FromString
#define PyBytes_FromStringAndSize PyString_FromStringAndSize
#define PyBytes_FromFormat PyString_FromFormat
#define PyBytes_DecodeEscape PyString_DecodeEscape
#define PyBytes_AsString PyString_AsString
#define PyBytes_AsStringAndSize PyString_AsStringAndSize
#define PyBytes_Size PyString_Size
#define PyBytes_AS_STRING PyString_AS_STRING
#define PyBytes_GET_SIZE PyString_GET_SIZE
#define PyBytes_Repr PyString_Repr
#define PyBytes_Concat PyString_Concat
#define PyBytes_ConcatAndDel PyString_ConcatAndDel
#endif
#if PY_VERSION_HEX < 0x02060000
#define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type)
#define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type)
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_VERSION_HEX < 0x03020000
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t PyInt_AsLong
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300)
#define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b)
#define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value)
#define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b)
#else
#define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \
(PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \
(likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \
(PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0)))
#define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \
(PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \
(likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \
(PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1)))
#define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \
(PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \
(likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \
(PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1)))
#endif
#if PY_MAJOR_VERSION >= 3
#define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func))
#endif
#if PY_VERSION_HEX < 0x02050000
#define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n)))
#define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a))
#define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n)))
#else
#define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n))
#define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a))
#define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n))
#endif
#if PY_VERSION_HEX < 0x02050000
#define __Pyx_NAMESTR(n) ((char *)(n))
#define __Pyx_DOCSTR(n) ((char *)(n))
#else
#define __Pyx_NAMESTR(n) (n)
#define __Pyx_DOCSTR(n) (n)
#endif
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#define __PYX_HAVE__chyantes_omp
#define __PYX_HAVE_API__chyantes_omp
#include "math.h"
#include "stdio.h"
#include "stdlib.h"
#include "numpy/arrayobject.h"
#include "numpy/ufuncobject.h"
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#ifdef PYREX_WITHOUT_ASSERTIONS
#define CYTHON_WITHOUT_ASSERTIONS
#endif
/* inline attribute */
#ifndef CYTHON_INLINE
#if defined(__GNUC__)
#define CYTHON_INLINE __inline__
#elif defined(_MSC_VER)
#define CYTHON_INLINE __inline
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
#define CYTHON_INLINE
#endif
#endif
/* unused attribute */
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || defined(__INTEL_COMPILER)
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/
/* Type Conversion Predeclarations */
#define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s)
#define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s))
#define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None)
#define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False))
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x);
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*);
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#ifdef __GNUC__
/* Test for GCC > 2.95 */
#if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* __GNUC__ > 2 ... */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ > 2 ... */
#else /* __GNUC__ */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static PyObject *__pyx_m;
static PyObject *__pyx_b;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
#if !defined(CYTHON_CCOMPLEX)
#if defined(__cplusplus)
#define CYTHON_CCOMPLEX 1
#elif defined(_Complex_I)
#define CYTHON_CCOMPLEX 1
#else
#define CYTHON_CCOMPLEX 0
#endif
#endif
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#include <complex>
#else
#include <complex.h>
#endif
#endif
#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__)
#undef _Complex_I
#define _Complex_I 1.0fj
#endif
static const char *__pyx_f[] = {
"hyantes_para.pyx",
"numpy.pxd",
};
/* "numpy.pxd":719
* # in Cython to enable them only on the right systems.
*
* ctypedef npy_int8 int8_t # <<<<<<<<<<<<<<
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
*/
typedef npy_int8 __pyx_t_5numpy_int8_t;
/* "numpy.pxd":720
*
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t # <<<<<<<<<<<<<<
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t
*/
typedef npy_int16 __pyx_t_5numpy_int16_t;
/* "numpy.pxd":721
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t # <<<<<<<<<<<<<<
* ctypedef npy_int64 int64_t
* #ctypedef npy_int96 int96_t
*/
typedef npy_int32 __pyx_t_5numpy_int32_t;
/* "numpy.pxd":722
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t # <<<<<<<<<<<<<<
* #ctypedef npy_int96 int96_t
* #ctypedef npy_int128 int128_t
*/
typedef npy_int64 __pyx_t_5numpy_int64_t;
/* "numpy.pxd":726
* #ctypedef npy_int128 int128_t
*
* ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<<
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
*/
typedef npy_uint8 __pyx_t_5numpy_uint8_t;
/* "numpy.pxd":727
*
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<<
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t
*/
typedef npy_uint16 __pyx_t_5numpy_uint16_t;
/* "numpy.pxd":728
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<<
* ctypedef npy_uint64 uint64_t
* #ctypedef npy_uint96 uint96_t
*/
typedef npy_uint32 __pyx_t_5numpy_uint32_t;
/* "numpy.pxd":729
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<<
* #ctypedef npy_uint96 uint96_t
* #ctypedef npy_uint128 uint128_t
*/
typedef npy_uint64 __pyx_t_5numpy_uint64_t;
/* "numpy.pxd":733
* #ctypedef npy_uint128 uint128_t
*
* ctypedef npy_float32 float32_t # <<<<<<<<<<<<<<
* ctypedef npy_float64 float64_t
* #ctypedef npy_float80 float80_t
*/
typedef npy_float32 __pyx_t_5numpy_float32_t;
/* "numpy.pxd":734
*
* ctypedef npy_float32 float32_t
* ctypedef npy_float64 float64_t # <<<<<<<<<<<<<<
* #ctypedef npy_float80 float80_t
* #ctypedef npy_float128 float128_t
*/
typedef npy_float64 __pyx_t_5numpy_float64_t;
/* "numpy.pxd":743
* # The int types are mapped a bit surprising --
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t
*/
typedef npy_long __pyx_t_5numpy_int_t;
/* "numpy.pxd":744
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong longlong_t
*
*/
typedef npy_longlong __pyx_t_5numpy_long_t;
/* "numpy.pxd":745
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_ulong uint_t
*/
typedef npy_longlong __pyx_t_5numpy_longlong_t;
/* "numpy.pxd":747
* ctypedef npy_longlong longlong_t
*
* ctypedef npy_ulong uint_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t
*/
typedef npy_ulong __pyx_t_5numpy_uint_t;
/* "numpy.pxd":748
*
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulonglong_t
*
*/
typedef npy_ulonglong __pyx_t_5numpy_ulong_t;
/* "numpy.pxd":749
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_intp intp_t
*/
typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t;
/* "numpy.pxd":751
* ctypedef npy_ulonglong ulonglong_t
*
* ctypedef npy_intp intp_t # <<<<<<<<<<<<<<
* ctypedef npy_uintp uintp_t
*
*/
typedef npy_intp __pyx_t_5numpy_intp_t;
/* "numpy.pxd":752
*
* ctypedef npy_intp intp_t
* ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<<
*
* ctypedef npy_double float_t
*/
typedef npy_uintp __pyx_t_5numpy_uintp_t;
/* "numpy.pxd":754
* ctypedef npy_uintp uintp_t
*
* ctypedef npy_double float_t # <<<<<<<<<<<<<<
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t
*/
typedef npy_double __pyx_t_5numpy_float_t;
/* "numpy.pxd":755
*
* ctypedef npy_double float_t
* ctypedef npy_double double_t # <<<<<<<<<<<<<<
* ctypedef npy_longdouble longdouble_t
*
*/
typedef npy_double __pyx_t_5numpy_double_t;
/* "numpy.pxd":756
* ctypedef npy_double float_t
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cfloat cfloat_t
*/
typedef npy_longdouble __pyx_t_5numpy_longdouble_t;
/* "hyantes_para.pyx":10
*
* DTYPE = np.double
* ctypedef np.double_t DTYPE_t # <<<<<<<<<<<<<<
*
* @cython.boundscheck(False)
*/
typedef __pyx_t_5numpy_double_t __pyx_t_12chyantes_omp_DTYPE_t;
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< float > __pyx_t_float_complex;
#else
typedef float _Complex __pyx_t_float_complex;
#endif
#else
typedef struct { float real, imag; } __pyx_t_float_complex;
#endif
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< double > __pyx_t_double_complex;
#else
typedef double _Complex __pyx_t_double_complex;
#endif
#else
typedef struct { double real, imag; } __pyx_t_double_complex;
#endif
/*--- Type declarations ---*/
/* "numpy.pxd":758
* ctypedef npy_longdouble longdouble_t
*
* ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<<
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t
*/
typedef npy_cfloat __pyx_t_5numpy_cfloat_t;
/* "numpy.pxd":759
*
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<<
* ctypedef npy_clongdouble clongdouble_t
*
*/
typedef npy_cdouble __pyx_t_5numpy_cdouble_t;
/* "numpy.pxd":760
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cdouble complex_t
*/
typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t;
/* "numpy.pxd":762
* ctypedef npy_clongdouble clongdouble_t
*
* ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew1(a):
*/
typedef npy_cdouble __pyx_t_5numpy_complex_t;
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#define __Pyx_RefNannySetupContext(name) __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#define __Pyx_RefNannyFinishContext() __Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif /* CYTHON_REFNANNY */
static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name, PyObject* kw_name); /*proto*/
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name); /*proto*/
static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed,
const char *name, int exact); /*proto*/
/* Run-time type information about structs used with buffers */
struct __Pyx_StructField_;
typedef struct {
const char* name; /* for error messages only */
struct __Pyx_StructField_* fields;
size_t size; /* sizeof(type) */
char typegroup; /* _R_eal, _C_omplex, Signed _I_nt, _U_nsigned int, _S_truct, _P_ointer, _O_bject */
} __Pyx_TypeInfo;
typedef struct __Pyx_StructField_ {
__Pyx_TypeInfo* type;
const char* name;
size_t offset;
} __Pyx_StructField;
typedef struct {
__Pyx_StructField* field;
size_t parent_offset;
} __Pyx_BufFmt_StackElem;
static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack);
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info);
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/
#include <string.h>
void __pyx_init_nan(void);
static float __PYX_NAN;
#define __Pyx_BufPtrStrided2d(type, buf, i0, s0, i1, s1) (type)((char*)buf + i0 * s0 + i1 * s1)
static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
static void __Pyx_UnpackTupleError(PyObject *, Py_ssize_t index); /*proto*/
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
static void __Pyx_ReleaseBuffer(Py_buffer *view);
#else
#define __Pyx_GetBuffer PyObject_GetBuffer
#define __Pyx_ReleaseBuffer PyBuffer_Release
#endif
Py_ssize_t __Pyx_zeros[] = {0, 0};
Py_ssize_t __Pyx_minusones[] = {-1, -1};
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level); /*proto*/
#ifndef __PYX_FORCE_INIT_THREADS
#if PY_VERSION_HEX < 0x02040200
#define __PYX_FORCE_INIT_THREADS 1
#else
#define __PYX_FORCE_INIT_THREADS 0
#endif
#endif
static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_Py_intptr_t(Py_intptr_t);
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#define __Pyx_CREAL(z) ((z).real())
#define __Pyx_CIMAG(z) ((z).imag())
#else
#define __Pyx_CREAL(z) (__real__(z))
#define __Pyx_CIMAG(z) (__imag__(z))
#endif
#else
#define __Pyx_CREAL(z) ((z).real)
#define __Pyx_CIMAG(z) ((z).imag)
#endif
#if defined(_WIN32) && defined(__cplusplus) && CYTHON_CCOMPLEX
#define __Pyx_SET_CREAL(z,x) ((z).real(x))
#define __Pyx_SET_CIMAG(z,y) ((z).imag(y))
#else
#define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x)
#define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y)
#endif
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float);
#if CYTHON_CCOMPLEX
#define __Pyx_c_eqf(a, b) ((a)==(b))
#define __Pyx_c_sumf(a, b) ((a)+(b))
#define __Pyx_c_difff(a, b) ((a)-(b))
#define __Pyx_c_prodf(a, b) ((a)*(b))
#define __Pyx_c_quotf(a, b) ((a)/(b))
#define __Pyx_c_negf(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zerof(z) ((z)==(float)0)
#define __Pyx_c_conjf(z) (::std::conj(z))
#if 1
#define __Pyx_c_absf(z) (::std::abs(z))
#define __Pyx_c_powf(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zerof(z) ((z)==0)
#define __Pyx_c_conjf(z) (conjf(z))
#if 1
#define __Pyx_c_absf(z) (cabsf(z))
#define __Pyx_c_powf(a, b) (cpowf(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex);
static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex);
#if 1
static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex);
#endif
#endif
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double);
#if CYTHON_CCOMPLEX
#define __Pyx_c_eq(a, b) ((a)==(b))
#define __Pyx_c_sum(a, b) ((a)+(b))
#define __Pyx_c_diff(a, b) ((a)-(b))
#define __Pyx_c_prod(a, b) ((a)*(b))
#define __Pyx_c_quot(a, b) ((a)/(b))
#define __Pyx_c_neg(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zero(z) ((z)==(double)0)
#define __Pyx_c_conj(z) (::std::conj(z))
#if 1
#define __Pyx_c_abs(z) (::std::abs(z))
#define __Pyx_c_pow(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zero(z) ((z)==0)
#define __Pyx_c_conj(z) (conj(z))
#if 1
#define __Pyx_c_abs(z) (cabs(z))
#define __Pyx_c_pow(a, b) (cpow(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex);
static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex);
#if 1
static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex);
#endif
#endif
static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *);
static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *);
static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *);
static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject *);
static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject *);
static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject *);
static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *);
static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *);
static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *);
static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject *);
static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *);
static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *);
static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject *);
static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *);
static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *);
static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *);
static int __Pyx_check_binary_version(void);
static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /*proto*/
static PyObject *__Pyx_ImportModule(const char *name); /*proto*/
static void __Pyx_AddTraceback(const char *funcname, int __pyx_clineno,
int __pyx_lineno, const char *__pyx_filename); /*proto*/
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/
/* Module declarations from 'libc.math' */
/* Module declarations from 'cpython.buffer' */
/* Module declarations from 'cpython.ref' */
/* Module declarations from 'libc.stdio' */
/* Module declarations from 'cpython.object' */
/* Module declarations from 'libc.stdlib' */
/* Module declarations from 'numpy' */
/* Module declarations from 'numpy' */
static PyTypeObject *__pyx_ptype_5numpy_dtype = 0;
static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0;
static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0;
static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0;
static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0;
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *); /*proto*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *, PyObject *); /*proto*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *, PyObject *, PyObject *); /*proto*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *, PyObject *, PyObject *, PyObject *); /*proto*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *, PyObject *, PyObject *, PyObject *, PyObject *); /*proto*/
static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/
static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *, PyObject *); /*proto*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *); /*proto*/
/* Module declarations from 'cython.cython.view' */
/* Module declarations from 'cython' */
/* Module declarations from 'chyantes_omp' */
static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_12chyantes_omp_DTYPE_t = { "DTYPE_t", NULL, sizeof(__pyx_t_12chyantes_omp_DTYPE_t), 'R' };
#define __Pyx_MODULE_NAME "chyantes_omp"
int __pyx_module_is_main_chyantes_omp = 0;
/* Implementation of 'chyantes_omp' */
static PyObject *__pyx_builtin_xrange;
static PyObject *__pyx_builtin_ValueError;
static PyObject *__pyx_builtin_range;
static PyObject *__pyx_builtin_RuntimeError;
static char __pyx_k_1[] = "ndarray is not C contiguous";
static char __pyx_k_3[] = "ndarray is not Fortran contiguous";
static char __pyx_k_5[] = "Non-native byte order not supported";
static char __pyx_k_7[] = "unknown dtype code in numpy.pxd (%d)";
static char __pyx_k_8[] = "Format string allocated too short, see comment in numpy.pxd";
static char __pyx_k_11[] = "Format string allocated too short.";
static char __pyx_k__B[] = "B";
static char __pyx_k__H[] = "H";
static char __pyx_k__I[] = "I";
static char __pyx_k__L[] = "L";
static char __pyx_k__O[] = "O";
static char __pyx_k__Q[] = "Q";
static char __pyx_k__b[] = "b";
static char __pyx_k__d[] = "d";
static char __pyx_k__f[] = "f";
static char __pyx_k__g[] = "g";
static char __pyx_k__h[] = "h";
static char __pyx_k__i[] = "i";
static char __pyx_k__l[] = "l";
static char __pyx_k__q[] = "q";
static char __pyx_k__t[] = "t";
static char __pyx_k__Zd[] = "Zd";
static char __pyx_k__Zf[] = "Zf";
static char __pyx_k__Zg[] = "Zg";
static char __pyx_k__np[] = "np";
static char __pyx_k__run[] = "run";
static char __pyx_k__step[] = "step";
static char __pyx_k__xmax[] = "xmax";
static char __pyx_k__xmin[] = "xmin";
static char __pyx_k__ymax[] = "ymax";
static char __pyx_k__ymin[] = "ymin";
static char __pyx_k__DTYPE[] = "DTYPE";
static char __pyx_k__numpy[] = "numpy";
static char __pyx_k__range[] = "range";
static char __pyx_k__zeros[] = "zeros";
static char __pyx_k__double[] = "double";
static char __pyx_k__range_[] = "range_";
static char __pyx_k__xrange[] = "xrange";
static char __pyx_k__range_x[] = "range_x";
static char __pyx_k__range_y[] = "range_y";
static char __pyx_k____main__[] = "__main__";
static char __pyx_k____test__[] = "__test__";
static char __pyx_k__ValueError[] = "ValueError";
static char __pyx_k__RuntimeError[] = "RuntimeError";
static char __pyx_k__chyantes_omp[] = "chyantes_omp";
static PyObject *__pyx_kp_u_1;
static PyObject *__pyx_kp_u_11;
static PyObject *__pyx_kp_u_3;
static PyObject *__pyx_kp_u_5;
static PyObject *__pyx_kp_u_7;
static PyObject *__pyx_kp_u_8;
static PyObject *__pyx_n_s__DTYPE;
static PyObject *__pyx_n_s__RuntimeError;
static PyObject *__pyx_n_s__ValueError;
static PyObject *__pyx_n_s____main__;
static PyObject *__pyx_n_s____test__;
static PyObject *__pyx_n_s__chyantes_omp;
static PyObject *__pyx_n_s__double;
static PyObject *__pyx_n_s__np;
static PyObject *__pyx_n_s__numpy;
static PyObject *__pyx_n_s__range;
static PyObject *__pyx_n_s__range_;
static PyObject *__pyx_n_s__range_x;
static PyObject *__pyx_n_s__range_y;
static PyObject *__pyx_n_s__run;
static PyObject *__pyx_n_s__step;
static PyObject *__pyx_n_s__t;
static PyObject *__pyx_n_s__xmax;
static PyObject *__pyx_n_s__xmin;
static PyObject *__pyx_n_s__xrange;
static PyObject *__pyx_n_s__ymax;
static PyObject *__pyx_n_s__ymin;
static PyObject *__pyx_n_s__zeros;
static PyObject *__pyx_int_15;
static PyObject *__pyx_k_tuple_2;
static PyObject *__pyx_k_tuple_4;
static PyObject *__pyx_k_tuple_6;
static PyObject *__pyx_k_tuple_9;
static PyObject *__pyx_k_tuple_10;
static PyObject *__pyx_k_tuple_12;
/* "hyantes_para.pyx":14
* @cython.boundscheck(False)
* @cython.cdivision(True)
* def run(double xmin, double ymin, double xmax, double ymax, double step, double range_, int range_x, int range_y, np.ndarray[DTYPE_t, ndim=2, negative_indices=False] t): # <<<<<<<<<<<<<<
* cdef np.ndarray[DTYPE_t, ndim=2, negative_indices=False] pt = np.zeros((range_x, range_y))
* cdef int i,j,k
*/
static PyObject *__pyx_pf_12chyantes_omp_run(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_12chyantes_omp_run = {__Pyx_NAMESTR("run"), (PyCFunction)__pyx_pf_12chyantes_omp_run, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)};
static PyObject *__pyx_pf_12chyantes_omp_run(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
double __pyx_v_xmin;
double __pyx_v_ymin;
double __pyx_v_xmax;
double __pyx_v_ymax;
double __pyx_v_step;
double __pyx_v_range_;
int __pyx_v_range_x;
int __pyx_v_range_y;
PyArrayObject *__pyx_v_t = 0;
PyArrayObject *__pyx_v_pt = 0;
int __pyx_v_i;
int __pyx_v_j;
int __pyx_v_k;
double __pyx_v_tmp;
double __pyx_v_xi;
double __pyx_v_yj;
Py_buffer __pyx_bstruct_pt;
Py_ssize_t __pyx_bstride_0_pt = 0;
Py_ssize_t __pyx_bstride_1_pt = 0;
Py_ssize_t __pyx_bshape_0_pt = 0;
Py_ssize_t __pyx_bshape_1_pt = 0;
Py_buffer __pyx_bstruct_t;
Py_ssize_t __pyx_bstride_0_t = 0;
Py_ssize_t __pyx_bstride_1_t = 0;
Py_ssize_t __pyx_bshape_0_t = 0;
Py_ssize_t __pyx_bshape_1_t = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyArrayObject *__pyx_t_5 = NULL;
int __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
npy_intp __pyx_t_11;
int __pyx_t_12;
int __pyx_t_13;
long __pyx_t_14;
int __pyx_t_15;
long __pyx_t_16;
int __pyx_t_17;
long __pyx_t_18;
int __pyx_t_19;
int __pyx_t_20;
long __pyx_t_21;
int __pyx_t_22;
int __pyx_t_23;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__xmin,&__pyx_n_s__ymin,&__pyx_n_s__xmax,&__pyx_n_s__ymax,&__pyx_n_s__step,&__pyx_n_s__range_,&__pyx_n_s__range_x,&__pyx_n_s__range_y,&__pyx_n_s__t,0};
__Pyx_RefNannySetupContext("run");
__pyx_self = __pyx_self;
{
PyObject* values[9] = {0,0,0,0,0,0,0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8);
case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 0:
values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__xmin);
if (likely(values[0])) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__ymin);
if (likely(values[1])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("run", 1, 9, 9, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 2:
values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__xmax);
if (likely(values[2])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("run", 1, 9, 9, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 3:
values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__ymax);
if (likely(values[3])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("run", 1, 9, 9, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 4:
values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__step);
if (likely(values[4])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("run", 1, 9, 9, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 5:
values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__range_);
if (likely(values[5])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("run", 1, 9, 9, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 6:
values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__range_x);
if (likely(values[6])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("run", 1, 9, 9, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 7:
values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__range_y);
if (likely(values[7])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("run", 1, 9, 9, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 8:
values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__t);
if (likely(values[8])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("run", 1, 9, 9, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "run") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 9) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
values[8] = PyTuple_GET_ITEM(__pyx_args, 8);
}
__pyx_v_xmin = __pyx_PyFloat_AsDouble(values[0]); if (unlikely((__pyx_v_xmin == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_ymin = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_ymin == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_xmax = __pyx_PyFloat_AsDouble(values[2]); if (unlikely((__pyx_v_xmax == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_ymax = __pyx_PyFloat_AsDouble(values[3]); if (unlikely((__pyx_v_ymax == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_step = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_step == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_range_ = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_range_ == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_range_x = __Pyx_PyInt_AsInt(values[6]); if (unlikely((__pyx_v_range_x == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_range_y = __Pyx_PyInt_AsInt(values[7]); if (unlikely((__pyx_v_range_y == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_t = ((PyArrayObject *)values[8]);
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("run", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("chyantes_omp.run", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_bstruct_pt.buf = NULL;
__pyx_bstruct_t.buf = NULL;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_t), __pyx_ptype_5numpy_ndarray, 1, "t", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_t, (PyObject*)__pyx_v_t, &__Pyx_TypeInfo_nn___pyx_t_12chyantes_omp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_bstride_0_t = __pyx_bstruct_t.strides[0]; __pyx_bstride_1_t = __pyx_bstruct_t.strides[1];
__pyx_bshape_0_t = __pyx_bstruct_t.shape[0]; __pyx_bshape_1_t = __pyx_bstruct_t.shape[1];
/* "hyantes_para.pyx":15
* @cython.cdivision(True)
* def run(double xmin, double ymin, double xmax, double ymax, double step, double range_, int range_x, int range_y, np.ndarray[DTYPE_t, ndim=2, negative_indices=False] t):
* cdef np.ndarray[DTYPE_t, ndim=2, negative_indices=False] pt = np.zeros((range_x, range_y)) # <<<<<<<<<<<<<<
* cdef int i,j,k
* cdef double tmp, xi, yj
*/
__pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__zeros); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyInt_FromLong(__pyx_v_range_x); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = PyInt_FromLong(__pyx_v_range_y); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_4));
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_3));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_4));
__Pyx_GIVEREF(((PyObject *)__pyx_t_4));
__pyx_t_4 = 0;
__pyx_t_4 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_5 = ((PyArrayObject *)__pyx_t_4);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_pt, (PyObject*)__pyx_t_5, &__Pyx_TypeInfo_nn___pyx_t_12chyantes_omp_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) {
__pyx_v_pt = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_bstruct_pt.buf = NULL;
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
} else {__pyx_bstride_0_pt = __pyx_bstruct_pt.strides[0]; __pyx_bstride_1_pt = __pyx_bstruct_pt.strides[1];
__pyx_bshape_0_pt = __pyx_bstruct_pt.shape[0]; __pyx_bshape_1_pt = __pyx_bstruct_pt.shape[1];
}
}
__pyx_t_5 = 0;
__pyx_v_pt = ((PyArrayObject *)__pyx_t_4);
__pyx_t_4 = 0;
/* "hyantes_para.pyx":19
* cdef double tmp, xi, yj
* "omp parallel for private(j,k,tmp)"
* with nogil, parallel(): # <<<<<<<<<<<<<<
* for i in prange(range_x):
* for j in xrange(range_y):
*/
{
#ifdef WITH_THREAD
PyThreadState *_save = NULL;
#endif
Py_UNBLOCK_THREADS
/*try:*/ {
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_7, __pyx_t_18, __pyx_t_6, __pyx_t_9, __pyx_t_10, __pyx_t_15, __pyx_t_8, __pyx_t_22, __pyx_t_19, __pyx_t_16, __pyx_t_21, __pyx_t_11, __pyx_t_23, __pyx_t_13, __pyx_t_14, __pyx_t_12, __pyx_t_17, __pyx_t_20)
#endif /* _OPENMP */
{
/* "hyantes_para.pyx":20
* "omp parallel for private(j,k,tmp)"
* with nogil, parallel():
* for i in prange(range_x): # <<<<<<<<<<<<<<
* for j in xrange(range_y):
* xi = xmin+step*i
*/
__pyx_t_6 = __pyx_v_range_x;
if (1 == 0) abort();
{
__pyx_t_8 = (__pyx_t_6 - 0) / 1;
if (__pyx_t_8 > 0)
{
__pyx_v_i = 0;
#ifdef _OPENMP
#pragma omp for lastprivate(__pyx_v_yj) lastprivate(__pyx_v_xi) lastprivate(__pyx_v_k) lastprivate(__pyx_v_j) lastprivate(__pyx_v_tmp) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i)
#endif /* _OPENMP */
for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_8; __pyx_t_7++){
{
__pyx_v_i = 0 + 1 * __pyx_t_7;
/* Initialize private variables to invalid values */
__pyx_v_yj = ((double)__PYX_NAN);
__pyx_v_xi = ((double)__PYX_NAN);
__pyx_v_k = ((int)0xbad0bad0);
__pyx_v_j = ((int)0xbad0bad0);
__pyx_v_tmp = ((double)__PYX_NAN);
/* "hyantes_para.pyx":21
* with nogil, parallel():
* for i in prange(range_x):
* for j in xrange(range_y): # <<<<<<<<<<<<<<
* xi = xmin+step*i
* yj = ymin+step*j
*/
__pyx_t_9 = __pyx_v_range_y;
for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) {
__pyx_v_j = __pyx_t_10;
/* "hyantes_para.pyx":22
* for i in prange(range_x):
* for j in xrange(range_y):
* xi = xmin+step*i # <<<<<<<<<<<<<<
* yj = ymin+step*j
* for k in xrange(t.shape[0]):
*/
__pyx_v_xi = (__pyx_v_xmin + (__pyx_v_step * __pyx_v_i));
/* "hyantes_para.pyx":23
* for j in xrange(range_y):
* xi = xmin+step*i
* yj = ymin+step*j # <<<<<<<<<<<<<<
* for k in xrange(t.shape[0]):
* tmp = 6368.* math.acos( math.cos(xi)*math.cos( t[k,0] ) * math.cos(yj-t[k,1])+ math.sin(xi)*math.sin(t[k,0]))
*/
__pyx_v_yj = (__pyx_v_ymin + (__pyx_v_step * __pyx_v_j));
/* "hyantes_para.pyx":24
* xi = xmin+step*i
* yj = ymin+step*j
* for k in xrange(t.shape[0]): # <<<<<<<<<<<<<<
* tmp = 6368.* math.acos( math.cos(xi)*math.cos( t[k,0] ) * math.cos(yj-t[k,1])+ math.sin(xi)*math.sin(t[k,0]))
* if tmp < range_:
*/
__pyx_t_11 = (__pyx_v_t->dimensions[0]);
for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) {
__pyx_v_k = __pyx_t_12;
/* "hyantes_para.pyx":25
* yj = ymin+step*j
* for k in xrange(t.shape[0]):
* tmp = 6368.* math.acos( math.cos(xi)*math.cos( t[k,0] ) * math.cos(yj-t[k,1])+ math.sin(xi)*math.sin(t[k,0])) # <<<<<<<<<<<<<<
* if tmp < range_:
* pt[i,j]+=t[k,2] / (1+tmp)
*/
__pyx_t_13 = __pyx_v_k;
__pyx_t_14 = 0;
__pyx_t_15 = __pyx_v_k;
__pyx_t_16 = 1;
__pyx_t_17 = __pyx_v_k;
__pyx_t_18 = 0;
__pyx_v_tmp = (6368. * acos((((cos(__pyx_v_xi) * cos((*__Pyx_BufPtrStrided2d(__pyx_t_12chyantes_omp_DTYPE_t *, __pyx_bstruct_t.buf, __pyx_t_13, __pyx_bstride_0_t, __pyx_t_14, __pyx_bstride_1_t)))) * cos((__pyx_v_yj - (*__Pyx_BufPtrStrided2d(__pyx_t_12chyantes_omp_DTYPE_t *, __pyx_bstruct_t.buf, __pyx_t_15, __pyx_bstride_0_t, __pyx_t_16, __pyx_bstride_1_t))))) + (sin(__pyx_v_xi) * sin((*__Pyx_BufPtrStrided2d(__pyx_t_12chyantes_omp_DTYPE_t *, __pyx_bstruct_t.buf, __pyx_t_17, __pyx_bstride_0_t, __pyx_t_18, __pyx_bstride_1_t)))))));
/* "hyantes_para.pyx":26
* for k in xrange(t.shape[0]):
* tmp = 6368.* math.acos( math.cos(xi)*math.cos( t[k,0] ) * math.cos(yj-t[k,1])+ math.sin(xi)*math.sin(t[k,0]))
* if tmp < range_: # <<<<<<<<<<<<<<
* pt[i,j]+=t[k,2] / (1+tmp)
* return pt
*/
__pyx_t_19 = (__pyx_v_tmp < __pyx_v_range_);
if (__pyx_t_19) {
/* "hyantes_para.pyx":27
* tmp = 6368.* math.acos( math.cos(xi)*math.cos( t[k,0] ) * math.cos(yj-t[k,1])+ math.sin(xi)*math.sin(t[k,0]))
* if tmp < range_:
* pt[i,j]+=t[k,2] / (1+tmp) # <<<<<<<<<<<<<<
* return pt
*/
__pyx_t_20 = __pyx_v_k;
__pyx_t_21 = 2;
__pyx_t_22 = __pyx_v_i;
__pyx_t_23 = __pyx_v_j;
*__Pyx_BufPtrStrided2d(__pyx_t_12chyantes_omp_DTYPE_t *, __pyx_bstruct_pt.buf, __pyx_t_22, __pyx_bstride_0_pt, __pyx_t_23, __pyx_bstride_1_pt) += ((*__Pyx_BufPtrStrided2d(__pyx_t_12chyantes_omp_DTYPE_t *, __pyx_bstruct_t.buf, __pyx_t_20, __pyx_bstride_0_t, __pyx_t_21, __pyx_bstride_1_t)) / (1.0 + __pyx_v_tmp));
goto __pyx_L21;
}
__pyx_L21:;
}
}
}
}
}
}
}
}
}
/* "hyantes_para.pyx":19
* cdef double tmp, xi, yj
* "omp parallel for private(j,k,tmp)"
* with nogil, parallel(): # <<<<<<<<<<<<<<
* for i in prange(range_x):
* for j in xrange(range_y):
*/
/*finally:*/ {
Py_BLOCK_THREADS
}
}
/* "hyantes_para.pyx":28
* if tmp < range_:
* pt[i,j]+=t[k,2] / (1+tmp)
* return pt # <<<<<<<<<<<<<<
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_pt));
__pyx_r = ((PyObject *)__pyx_v_pt);
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_pt);
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_t);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("chyantes_omp.run", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_pt);
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_t);
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_pt);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "numpy.pxd":190
* # experimental exception made for __getbuffer__ and __releasebuffer__
* # -- the details of this may change.
* def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<<
* # This implementation of getbuffer is geared towards Cython
* # requirements, and does not yet fullfill the PEP.
*/
static CYTHON_UNUSED int __pyx_pf_5numpy_7ndarray___getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_pf_5numpy_7ndarray___getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_v_copy_shape;
int __pyx_v_i;
int __pyx_v_ndim;
int __pyx_v_endian_detector;
int __pyx_v_little_endian;
int __pyx_v_t;
char *__pyx_v_f;
PyArray_Descr *__pyx_v_descr = 0;
int __pyx_v_offset;
int __pyx_v_hasfields;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
PyObject *__pyx_t_8 = NULL;
char *__pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getbuffer__");
if (__pyx_v_info != NULL) {
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
}
/* "numpy.pxd":196
* # of flags
*
* if info == NULL: return # <<<<<<<<<<<<<<
*
* cdef int copy_shape, i, ndim
*/
__pyx_t_1 = (__pyx_v_info == NULL);
if (__pyx_t_1) {
__pyx_r = 0;
goto __pyx_L0;
goto __pyx_L5;
}
__pyx_L5:;
/* "numpy.pxd":199
*
* cdef int copy_shape, i, ndim
* cdef int endian_detector = 1 # <<<<<<<<<<<<<<
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
*
*/
__pyx_v_endian_detector = 1;
/* "numpy.pxd":200
* cdef int copy_shape, i, ndim
* cdef int endian_detector = 1
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<<
*
* ndim = PyArray_NDIM(self)
*/
__pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
/* "numpy.pxd":202
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
*
* ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<<
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
*/
__pyx_v_ndim = PyArray_NDIM(((PyArrayObject *)__pyx_v_self));
/* "numpy.pxd":204
* ndim = PyArray_NDIM(self)
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* copy_shape = 1
* else:
*/
__pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t)));
if (__pyx_t_1) {
/* "numpy.pxd":205
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* copy_shape = 1 # <<<<<<<<<<<<<<
* else:
* copy_shape = 0
*/
__pyx_v_copy_shape = 1;
goto __pyx_L6;
}
/*else*/ {
/* "numpy.pxd":207
* copy_shape = 1
* else:
* copy_shape = 0 # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
*/
__pyx_v_copy_shape = 0;
}
__pyx_L6:;
/* "numpy.pxd":209
* copy_shape = 0
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous")
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS);
if (__pyx_t_1) {
/* "numpy.pxd":210
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<<
* raise ValueError(u"ndarray is not C contiguous")
*
*/
__pyx_t_2 = (!PyArray_CHKFLAGS(((PyArrayObject *)__pyx_v_self), NPY_C_CONTIGUOUS));
__pyx_t_3 = __pyx_t_2;
} else {
__pyx_t_3 = __pyx_t_1;
}
if (__pyx_t_3) {
/* "numpy.pxd":211
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
*/
__pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L7;
}
__pyx_L7:;
/* "numpy.pxd":213
* raise ValueError(u"ndarray is not C contiguous")
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous")
*/
__pyx_t_3 = ((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS);
if (__pyx_t_3) {
/* "numpy.pxd":214
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<<
* raise ValueError(u"ndarray is not Fortran contiguous")
*
*/
__pyx_t_1 = (!PyArray_CHKFLAGS(((PyArrayObject *)__pyx_v_self), NPY_F_CONTIGUOUS));
__pyx_t_2 = __pyx_t_1;
} else {
__pyx_t_2 = __pyx_t_3;
}
if (__pyx_t_2) {
/* "numpy.pxd":215
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<<
*
* info.buf = PyArray_DATA(self)
*/
__pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_4), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L8;
}
__pyx_L8:;
/* "numpy.pxd":217
* raise ValueError(u"ndarray is not Fortran contiguous")
*
* info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<<
* info.ndim = ndim
* if copy_shape:
*/
__pyx_v_info->buf = PyArray_DATA(((PyArrayObject *)__pyx_v_self));
/* "numpy.pxd":218
*
* info.buf = PyArray_DATA(self)
* info.ndim = ndim # <<<<<<<<<<<<<<
* if copy_shape:
* # Allocate new buffer for strides and shape info.
*/
__pyx_v_info->ndim = __pyx_v_ndim;
/* "numpy.pxd":219
* info.buf = PyArray_DATA(self)
* info.ndim = ndim
* if copy_shape: # <<<<<<<<<<<<<<
* # Allocate new buffer for strides and shape info.
* # This is allocated as one block, strides first.
*/
if (__pyx_v_copy_shape) {
/* "numpy.pxd":222
* # Allocate new buffer for strides and shape info.
* # This is allocated as one block, strides first.
* info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) # <<<<<<<<<<<<<<
* info.shape = info.strides + ndim
* for i in range(ndim):
*/
__pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2)));
/* "numpy.pxd":223
* # This is allocated as one block, strides first.
* info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)
* info.shape = info.strides + ndim # <<<<<<<<<<<<<<
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i]
*/
__pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim);
/* "numpy.pxd":224
* info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)
* info.shape = info.strides + ndim
* for i in range(ndim): # <<<<<<<<<<<<<<
* info.strides[i] = PyArray_STRIDES(self)[i]
* info.shape[i] = PyArray_DIMS(self)[i]
*/
__pyx_t_5 = __pyx_v_ndim;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "numpy.pxd":225
* info.shape = info.strides + ndim
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<<
* info.shape[i] = PyArray_DIMS(self)[i]
* else:
*/
(__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(((PyArrayObject *)__pyx_v_self))[__pyx_v_i]);
/* "numpy.pxd":226
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i]
* info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<<
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
*/
(__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(((PyArrayObject *)__pyx_v_self))[__pyx_v_i]);
}
goto __pyx_L9;
}
/*else*/ {
/* "numpy.pxd":228
* info.shape[i] = PyArray_DIMS(self)[i]
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self) # <<<<<<<<<<<<<<
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL
*/
__pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(((PyArrayObject *)__pyx_v_self)));
/* "numpy.pxd":229
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
* info.shape = <Py_ssize_t*>PyArray_DIMS(self) # <<<<<<<<<<<<<<
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self)
*/
__pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(((PyArrayObject *)__pyx_v_self)));
}
__pyx_L9:;
/* "numpy.pxd":230
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL # <<<<<<<<<<<<<<
* info.itemsize = PyArray_ITEMSIZE(self)
* info.readonly = not PyArray_ISWRITEABLE(self)
*/
__pyx_v_info->suboffsets = NULL;
/* "numpy.pxd":231
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<<
* info.readonly = not PyArray_ISWRITEABLE(self)
*
*/
__pyx_v_info->itemsize = PyArray_ITEMSIZE(((PyArrayObject *)__pyx_v_self));
/* "numpy.pxd":232
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self)
* info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<<
*
* cdef int t
*/
__pyx_v_info->readonly = (!PyArray_ISWRITEABLE(((PyArrayObject *)__pyx_v_self)));
/* "numpy.pxd":235
*
* cdef int t
* cdef char* f = NULL # <<<<<<<<<<<<<<
* cdef dtype descr = self.descr
* cdef list stack
*/
__pyx_v_f = NULL;
/* "numpy.pxd":236
* cdef int t
* cdef char* f = NULL
* cdef dtype descr = self.descr # <<<<<<<<<<<<<<
* cdef list stack
* cdef int offset
*/
__Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_v_self)->descr));
__pyx_v_descr = ((PyArrayObject *)__pyx_v_self)->descr;
/* "numpy.pxd":240
* cdef int offset
*
* cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<<
*
* if not hasfields and not copy_shape:
*/
__pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr);
/* "numpy.pxd":242
* cdef bint hasfields = PyDataType_HASFIELDS(descr)
*
* if not hasfields and not copy_shape: # <<<<<<<<<<<<<<
* # do not call releasebuffer
* info.obj = None
*/
__pyx_t_2 = (!__pyx_v_hasfields);
if (__pyx_t_2) {
__pyx_t_3 = (!__pyx_v_copy_shape);
__pyx_t_1 = __pyx_t_3;
} else {
__pyx_t_1 = __pyx_t_2;
}
if (__pyx_t_1) {
/* "numpy.pxd":244
* if not hasfields and not copy_shape:
* # do not call releasebuffer
* info.obj = None # <<<<<<<<<<<<<<
* else:
* # need to call releasebuffer
*/
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = Py_None;
goto __pyx_L12;
}
/*else*/ {
/* "numpy.pxd":247
* else:
* # need to call releasebuffer
* info.obj = self # <<<<<<<<<<<<<<
*
* if not hasfields:
*/
__Pyx_INCREF(__pyx_v_self);
__Pyx_GIVEREF(__pyx_v_self);
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = __pyx_v_self;
}
__pyx_L12:;
/* "numpy.pxd":249
* info.obj = self
*
* if not hasfields: # <<<<<<<<<<<<<<
* t = descr.type_num
* if ((descr.byteorder == '>' and little_endian) or
*/
__pyx_t_1 = (!__pyx_v_hasfields);
if (__pyx_t_1) {
/* "numpy.pxd":250
*
* if not hasfields:
* t = descr.type_num # <<<<<<<<<<<<<<
* if ((descr.byteorder == '>' and little_endian) or
* (descr.byteorder == '<' and not little_endian)):
*/
__pyx_v_t = __pyx_v_descr->type_num;
/* "numpy.pxd":251
* if not hasfields:
* t = descr.type_num
* if ((descr.byteorder == '>' and little_endian) or # <<<<<<<<<<<<<<
* (descr.byteorder == '<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
__pyx_t_1 = (__pyx_v_descr->byteorder == '>');
if (__pyx_t_1) {
__pyx_t_2 = __pyx_v_little_endian;
} else {
__pyx_t_2 = __pyx_t_1;
}
if (!__pyx_t_2) {
/* "numpy.pxd":252
* t = descr.type_num
* if ((descr.byteorder == '>' and little_endian) or
* (descr.byteorder == '<' and not little_endian)): # <<<<<<<<<<<<<<
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b"
*/
__pyx_t_1 = (__pyx_v_descr->byteorder == '<');
if (__pyx_t_1) {
__pyx_t_3 = (!__pyx_v_little_endian);
__pyx_t_7 = __pyx_t_3;
} else {
__pyx_t_7 = __pyx_t_1;
}
__pyx_t_1 = __pyx_t_7;
} else {
__pyx_t_1 = __pyx_t_2;
}
if (__pyx_t_1) {
/* "numpy.pxd":253
* if ((descr.byteorder == '>' and little_endian) or
* (descr.byteorder == '<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
*/
__pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_6), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L14;
}
__pyx_L14:;
/* "numpy.pxd":254
* (descr.byteorder == '<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<<
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h"
*/
__pyx_t_1 = (__pyx_v_t == NPY_BYTE);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__b;
goto __pyx_L15;
}
/* "numpy.pxd":255
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<<
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H"
*/
__pyx_t_1 = (__pyx_v_t == NPY_UBYTE);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__B;
goto __pyx_L15;
}
/* "numpy.pxd":256
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<<
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i"
*/
__pyx_t_1 = (__pyx_v_t == NPY_SHORT);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__h;
goto __pyx_L15;
}
/* "numpy.pxd":257
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<<
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I"
*/
__pyx_t_1 = (__pyx_v_t == NPY_USHORT);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__H;
goto __pyx_L15;
}
/* "numpy.pxd":258
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<<
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l"
*/
__pyx_t_1 = (__pyx_v_t == NPY_INT);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__i;
goto __pyx_L15;
}
/* "numpy.pxd":259
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<<
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L"
*/
__pyx_t_1 = (__pyx_v_t == NPY_UINT);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__I;
goto __pyx_L15;
}
/* "numpy.pxd":260
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<<
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q"
*/
__pyx_t_1 = (__pyx_v_t == NPY_LONG);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__l;
goto __pyx_L15;
}
/* "numpy.pxd":261
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<<
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q"
*/
__pyx_t_1 = (__pyx_v_t == NPY_ULONG);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__L;
goto __pyx_L15;
}
/* "numpy.pxd":262
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<<
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f"
*/
__pyx_t_1 = (__pyx_v_t == NPY_LONGLONG);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__q;
goto __pyx_L15;
}
/* "numpy.pxd":263
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<<
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d"
*/
__pyx_t_1 = (__pyx_v_t == NPY_ULONGLONG);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__Q;
goto __pyx_L15;
}
/* "numpy.pxd":264
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<<
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g"
*/
__pyx_t_1 = (__pyx_v_t == NPY_FLOAT);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__f;
goto __pyx_L15;
}
/* "numpy.pxd":265
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<<
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf"
*/
__pyx_t_1 = (__pyx_v_t == NPY_DOUBLE);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__d;
goto __pyx_L15;
}
/* "numpy.pxd":266
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<<
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd"
*/
__pyx_t_1 = (__pyx_v_t == NPY_LONGDOUBLE);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__g;
goto __pyx_L15;
}
/* "numpy.pxd":267
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<<
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg"
*/
__pyx_t_1 = (__pyx_v_t == NPY_CFLOAT);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__Zf;
goto __pyx_L15;
}
/* "numpy.pxd":268
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<<
* elif t == NPY_CLONGDOUBLE: f = "Zg"
* elif t == NPY_OBJECT: f = "O"
*/
__pyx_t_1 = (__pyx_v_t == NPY_CDOUBLE);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__Zd;
goto __pyx_L15;
}
/* "numpy.pxd":269
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<<
* elif t == NPY_OBJECT: f = "O"
* else:
*/
__pyx_t_1 = (__pyx_v_t == NPY_CLONGDOUBLE);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__Zg;
goto __pyx_L15;
}
/* "numpy.pxd":270
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg"
* elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<<
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
*/
__pyx_t_1 = (__pyx_v_t == NPY_OBJECT);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__O;
goto __pyx_L15;
}
/*else*/ {
/* "numpy.pxd":272
* elif t == NPY_OBJECT: f = "O"
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<<
* info.format = f
* return
*/
__pyx_t_4 = PyInt_FromLong(__pyx_v_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_8 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_7), __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_8));
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_4));
PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_8));
__Pyx_GIVEREF(((PyObject *)__pyx_t_8));
__pyx_t_8 = 0;
__pyx_t_8 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_L15:;
/* "numpy.pxd":273
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* info.format = f # <<<<<<<<<<<<<<
* return
* else:
*/
__pyx_v_info->format = __pyx_v_f;
/* "numpy.pxd":274
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* info.format = f
* return # <<<<<<<<<<<<<<
* else:
* info.format = <char*>stdlib.malloc(_buffer_format_string_len)
*/
__pyx_r = 0;
goto __pyx_L0;
goto __pyx_L13;
}
/*else*/ {
/* "numpy.pxd":276
* return
* else:
* info.format = <char*>stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<<
* info.format[0] = '^' # Native data types, manual alignment
* offset = 0
*/
__pyx_v_info->format = ((char *)malloc(255));
/* "numpy.pxd":277
* else:
* info.format = <char*>stdlib.malloc(_buffer_format_string_len)
* info.format[0] = '^' # Native data types, manual alignment # <<<<<<<<<<<<<<
* offset = 0
* f = _util_dtypestring(descr, info.format + 1,
*/
(__pyx_v_info->format[0]) = '^';
/* "numpy.pxd":278
* info.format = <char*>stdlib.malloc(_buffer_format_string_len)
* info.format[0] = '^' # Native data types, manual alignment
* offset = 0 # <<<<<<<<<<<<<<
* f = _util_dtypestring(descr, info.format + 1,
* info.format + _buffer_format_string_len,
*/
__pyx_v_offset = 0;
/* "numpy.pxd":281
* f = _util_dtypestring(descr, info.format + 1,
* info.format + _buffer_format_string_len,
* &offset) # <<<<<<<<<<<<<<
* f[0] = 0 # Terminate format string
*
*/
__pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_f = __pyx_t_9;
/* "numpy.pxd":282
* info.format + _buffer_format_string_len,
* &offset)
* f[0] = 0 # Terminate format string # <<<<<<<<<<<<<<
*
* def __releasebuffer__(ndarray self, Py_buffer* info):
*/
(__pyx_v_f[0]) = 0;
}
__pyx_L13:;
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(Py_None);
__Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL;
}
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_descr);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "numpy.pxd":284
* f[0] = 0 # Terminate format string
*
* def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<<
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format)
*/
static CYTHON_UNUSED void __pyx_pf_5numpy_7ndarray_1__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/
static CYTHON_UNUSED void __pyx_pf_5numpy_7ndarray_1__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) {
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__releasebuffer__");
/* "numpy.pxd":285
*
* def __releasebuffer__(ndarray self, Py_buffer* info):
* if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<<
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
*/
__pyx_t_1 = PyArray_HASFIELDS(((PyArrayObject *)__pyx_v_self));
if (__pyx_t_1) {
/* "numpy.pxd":286
* def __releasebuffer__(ndarray self, Py_buffer* info):
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format) # <<<<<<<<<<<<<<
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* stdlib.free(info.strides)
*/
free(__pyx_v_info->format);
goto __pyx_L5;
}
__pyx_L5:;
/* "numpy.pxd":287
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* stdlib.free(info.strides)
* # info.shape was stored after info.strides in the same block
*/
__pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t)));
if (__pyx_t_1) {
/* "numpy.pxd":288
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* stdlib.free(info.strides) # <<<<<<<<<<<<<<
* # info.shape was stored after info.strides in the same block
*
*/
free(__pyx_v_info->strides);
goto __pyx_L6;
}
__pyx_L6:;
__Pyx_RefNannyFinishContext();
}
/* "numpy.pxd":764
* ctypedef npy_cdouble complex_t
*
* cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(1, <void*>a)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew1");
/* "numpy.pxd":765
*
* cdef inline object PyArray_MultiIterNew1(a):
* return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew2(a, b):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 765; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "numpy.pxd":767
* return PyArray_MultiIterNew(1, <void*>a)
*
* cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew2");
/* "numpy.pxd":768
*
* cdef inline object PyArray_MultiIterNew2(a, b):
* return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 768; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "numpy.pxd":770
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
* cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew3");
/* "numpy.pxd":771
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 771; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "numpy.pxd":773
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew4");
/* "numpy.pxd":774
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 774; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "numpy.pxd":776
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew5");
/* "numpy.pxd":777
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<<
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 777; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "numpy.pxd":779
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<<
* # Recursive utility function used in __getbuffer__ to get format
* # string. The new location in the format string is returned.
*/
static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) {
PyArray_Descr *__pyx_v_child = 0;
int __pyx_v_endian_detector;
int __pyx_v_little_endian;
PyObject *__pyx_v_fields = 0;
PyObject *__pyx_v_childname = NULL;
PyObject *__pyx_v_new_offset = NULL;
PyObject *__pyx_v_t = NULL;
char *__pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
Py_ssize_t __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
long __pyx_t_10;
char *__pyx_t_11;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("_util_dtypestring");
/* "numpy.pxd":786
* cdef int delta_offset
* cdef tuple i
* cdef int endian_detector = 1 # <<<<<<<<<<<<<<
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
* cdef tuple fields
*/
__pyx_v_endian_detector = 1;
/* "numpy.pxd":787
* cdef tuple i
* cdef int endian_detector = 1
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<<
* cdef tuple fields
*
*/
__pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
/* "numpy.pxd":790
* cdef tuple fields
*
* for childname in descr.names: # <<<<<<<<<<<<<<
* fields = descr.fields[childname]
* child, new_offset = fields
*/
if (unlikely(((PyObject *)__pyx_v_descr->names) == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 790; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_t_1 = ((PyObject *)__pyx_v_descr->names); __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0;
for (;;) {
if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
__pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++;
__Pyx_XDECREF(__pyx_v_childname);
__pyx_v_childname = __pyx_t_3;
__pyx_t_3 = 0;
/* "numpy.pxd":791
*
* for childname in descr.names:
* fields = descr.fields[childname] # <<<<<<<<<<<<<<
* child, new_offset = fields
*
*/
__pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (!__pyx_t_3) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 791; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected tuple, got %.200s", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 791; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_XDECREF(((PyObject *)__pyx_v_fields));
__pyx_v_fields = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "numpy.pxd":792
* for childname in descr.names:
* fields = descr.fields[childname]
* child, new_offset = fields # <<<<<<<<<<<<<<
*
* if (end - f) - (new_offset - offset[0]) < 15:
*/
if (likely(PyTuple_CheckExact(((PyObject *)__pyx_v_fields)))) {
PyObject* sequence = ((PyObject *)__pyx_v_fields);
if (unlikely(PyTuple_GET_SIZE(sequence) != 2)) {
if (PyTuple_GET_SIZE(sequence) > 2) __Pyx_RaiseTooManyValuesError(2);
else __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(sequence));
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_t_3 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
} else {
__Pyx_UnpackTupleError(((PyObject *)__pyx_v_fields), 2);
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_XDECREF(((PyObject *)__pyx_v_child));
__pyx_v_child = ((PyArray_Descr *)__pyx_t_3);
__pyx_t_3 = 0;
__Pyx_XDECREF(__pyx_v_new_offset);
__pyx_v_new_offset = __pyx_t_4;
__pyx_t_4 = 0;
/* "numpy.pxd":794
* child, new_offset = fields
*
* if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
*/
__pyx_t_4 = PyInt_FromLong((__pyx_v_end - __pyx_v_f)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = PyNumber_Subtract(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = PyObject_RichCompare(__pyx_t_3, __pyx_int_15, Py_LT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_6) {
/* "numpy.pxd":795
*
* if (end - f) - (new_offset - offset[0]) < 15:
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<<
*
* if ((child.byteorder == '>' and little_endian) or
*/
__pyx_t_5 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_9), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
/* "numpy.pxd":797
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
* if ((child.byteorder == '>' and little_endian) or # <<<<<<<<<<<<<<
* (child.byteorder == '<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
__pyx_t_6 = (__pyx_v_child->byteorder == '>');
if (__pyx_t_6) {
__pyx_t_7 = __pyx_v_little_endian;
} else {
__pyx_t_7 = __pyx_t_6;
}
if (!__pyx_t_7) {
/* "numpy.pxd":798
*
* if ((child.byteorder == '>' and little_endian) or
* (child.byteorder == '<' and not little_endian)): # <<<<<<<<<<<<<<
* raise ValueError(u"Non-native byte order not supported")
* # One could encode it in the format string and have Cython
*/
__pyx_t_6 = (__pyx_v_child->byteorder == '<');
if (__pyx_t_6) {
__pyx_t_8 = (!__pyx_v_little_endian);
__pyx_t_9 = __pyx_t_8;
} else {
__pyx_t_9 = __pyx_t_6;
}
__pyx_t_6 = __pyx_t_9;
} else {
__pyx_t_6 = __pyx_t_7;
}
if (__pyx_t_6) {
/* "numpy.pxd":799
* if ((child.byteorder == '>' and little_endian) or
* (child.byteorder == '<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* # One could encode it in the format string and have Cython
* # complain instead, BUT: < and > in format strings also imply
*/
__pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_10), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L6;
}
__pyx_L6:;
/* "numpy.pxd":809
*
* # Output padding bytes
* while offset[0] < new_offset: # <<<<<<<<<<<<<<
* f[0] = 120 # "x"; pad byte
* f += 1
*/
while (1) {
__pyx_t_5 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = PyObject_RichCompare(__pyx_t_5, __pyx_v_new_offset, Py_LT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (!__pyx_t_6) break;
/* "numpy.pxd":810
* # Output padding bytes
* while offset[0] < new_offset:
* f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<<
* f += 1
* offset[0] += 1
*/
(__pyx_v_f[0]) = 120;
/* "numpy.pxd":811
* while offset[0] < new_offset:
* f[0] = 120 # "x"; pad byte
* f += 1 # <<<<<<<<<<<<<<
* offset[0] += 1
*
*/
__pyx_v_f = (__pyx_v_f + 1);
/* "numpy.pxd":812
* f[0] = 120 # "x"; pad byte
* f += 1
* offset[0] += 1 # <<<<<<<<<<<<<<
*
* offset[0] += child.itemsize
*/
__pyx_t_10 = 0;
(__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + 1);
}
/* "numpy.pxd":814
* offset[0] += 1
*
* offset[0] += child.itemsize # <<<<<<<<<<<<<<
*
* if not PyDataType_HASFIELDS(child):
*/
__pyx_t_10 = 0;
(__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + __pyx_v_child->elsize);
/* "numpy.pxd":816
* offset[0] += child.itemsize
*
* if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<<
* t = child.type_num
* if end - f < 5:
*/
__pyx_t_6 = (!PyDataType_HASFIELDS(__pyx_v_child));
if (__pyx_t_6) {
/* "numpy.pxd":817
*
* if not PyDataType_HASFIELDS(child):
* t = child.type_num # <<<<<<<<<<<<<<
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.")
*/
__pyx_t_3 = PyInt_FromLong(__pyx_v_child->type_num); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 817; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_v_t);
__pyx_v_t = __pyx_t_3;
__pyx_t_3 = 0;
/* "numpy.pxd":818
* if not PyDataType_HASFIELDS(child):
* t = child.type_num
* if end - f < 5: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short.")
*
*/
__pyx_t_6 = ((__pyx_v_end - __pyx_v_f) < 5);
if (__pyx_t_6) {
/* "numpy.pxd":819
* t = child.type_num
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<<
*
* # Until ticket #99 is fixed, use integers to avoid warnings
*/
__pyx_t_3 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_12), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 819; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 819; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L10;
}
__pyx_L10:;
/* "numpy.pxd":822
*
* # Until ticket #99 is fixed, use integers to avoid warnings
* if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<<
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h"
*/
__pyx_t_3 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 98;
goto __pyx_L11;
}
/* "numpy.pxd":823
* # Until ticket #99 is fixed, use integers to avoid warnings
* if t == NPY_BYTE: f[0] = 98 #"b"
* elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<<
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H"
*/
__pyx_t_5 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 66;
goto __pyx_L11;
}
/* "numpy.pxd":824
* if t == NPY_BYTE: f[0] = 98 #"b"
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<<
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i"
*/
__pyx_t_3 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 104;
goto __pyx_L11;
}
/* "numpy.pxd":825
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<<
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I"
*/
__pyx_t_5 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 825; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 825; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 825; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 72;
goto __pyx_L11;
}
/* "numpy.pxd":826
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<<
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l"
*/
__pyx_t_3 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 105;
goto __pyx_L11;
}
/* "numpy.pxd":827
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<<
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L"
*/
__pyx_t_5 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 73;
goto __pyx_L11;
}
/* "numpy.pxd":828
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<<
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
*/
__pyx_t_3 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 108;
goto __pyx_L11;
}
/* "numpy.pxd":829
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<<
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
*/
__pyx_t_5 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 76;
goto __pyx_L11;
}
/* "numpy.pxd":830
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<<
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f"
*/
__pyx_t_3 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 113;
goto __pyx_L11;
}
/* "numpy.pxd":831
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<<
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
*/
__pyx_t_5 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 81;
goto __pyx_L11;
}
/* "numpy.pxd":832
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<<
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
*/
__pyx_t_3 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 102;
goto __pyx_L11;
}
/* "numpy.pxd":833
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<<
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
*/
__pyx_t_5 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 100;
goto __pyx_L11;
}
/* "numpy.pxd":834
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<<
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
*/
__pyx_t_3 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 103;
goto __pyx_L11;
}
/* "numpy.pxd":835
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<<
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
*/
__pyx_t_5 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 102;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L11;
}
/* "numpy.pxd":836
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<<
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
* elif t == NPY_OBJECT: f[0] = 79 #"O"
*/
__pyx_t_3 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 100;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L11;
}
/* "numpy.pxd":837
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<<
* elif t == NPY_OBJECT: f[0] = 79 #"O"
* else:
*/
__pyx_t_5 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 103;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L11;
}
/* "numpy.pxd":838
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
* elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<<
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
*/
__pyx_t_3 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 79;
goto __pyx_L11;
}
/*else*/ {
/* "numpy.pxd":840
* elif t == NPY_OBJECT: f[0] = 79 #"O"
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<<
* f += 1
* else:
*/
__pyx_t_5 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_7), __pyx_v_t); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_5));
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_3));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_5));
__Pyx_GIVEREF(((PyObject *)__pyx_t_5));
__pyx_t_5 = 0;
__pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_L11:;
/* "numpy.pxd":841
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* f += 1 # <<<<<<<<<<<<<<
* else:
* # Cython ignores struct boundary information ("T{...}"),
*/
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L9;
}
/*else*/ {
/* "numpy.pxd":845
* # Cython ignores struct boundary information ("T{...}"),
* # so don't output it
* f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<<
* return f
*
*/
__pyx_t_11 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_11 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_f = __pyx_t_11;
}
__pyx_L9:;
}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "numpy.pxd":846
* # so don't output it
* f = _util_dtypestring(child, f, end, offset)
* return f # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_f;
goto __pyx_L0;
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_child);
__Pyx_XDECREF(__pyx_v_fields);
__Pyx_XDECREF(__pyx_v_childname);
__Pyx_XDECREF(__pyx_v_new_offset);
__Pyx_XDECREF(__pyx_v_t);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "numpy.pxd":961
*
*
* cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
* cdef PyObject* baseptr
* if base is None:
*/
static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) {
PyObject *__pyx_v_baseptr;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("set_array_base");
/* "numpy.pxd":963
* cdef inline void set_array_base(ndarray arr, object base):
* cdef PyObject* baseptr
* if base is None: # <<<<<<<<<<<<<<
* baseptr = NULL
* else:
*/
__pyx_t_1 = (__pyx_v_base == Py_None);
if (__pyx_t_1) {
/* "numpy.pxd":964
* cdef PyObject* baseptr
* if base is None:
* baseptr = NULL # <<<<<<<<<<<<<<
* else:
* Py_INCREF(base) # important to do this before decref below!
*/
__pyx_v_baseptr = NULL;
goto __pyx_L3;
}
/*else*/ {
/* "numpy.pxd":966
* baseptr = NULL
* else:
* Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<<
* baseptr = <PyObject*>base
* Py_XDECREF(arr.base)
*/
Py_INCREF(__pyx_v_base);
/* "numpy.pxd":967
* else:
* Py_INCREF(base) # important to do this before decref below!
* baseptr = <PyObject*>base # <<<<<<<<<<<<<<
* Py_XDECREF(arr.base)
* arr.base = baseptr
*/
__pyx_v_baseptr = ((PyObject *)__pyx_v_base);
}
__pyx_L3:;
/* "numpy.pxd":968
* Py_INCREF(base) # important to do this before decref below!
* baseptr = <PyObject*>base
* Py_XDECREF(arr.base) # <<<<<<<<<<<<<<
* arr.base = baseptr
*
*/
Py_XDECREF(__pyx_v_arr->base);
/* "numpy.pxd":969
* baseptr = <PyObject*>base
* Py_XDECREF(arr.base)
* arr.base = baseptr # <<<<<<<<<<<<<<
*
* cdef inline object get_array_base(ndarray arr):
*/
__pyx_v_arr->base = __pyx_v_baseptr;
__Pyx_RefNannyFinishContext();
}
/* "numpy.pxd":971
* arr.base = baseptr
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* if arr.base is NULL:
* return None
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("get_array_base");
/* "numpy.pxd":972
*
* cdef inline object get_array_base(ndarray arr):
* if arr.base is NULL: # <<<<<<<<<<<<<<
* return None
* else:
*/
__pyx_t_1 = (__pyx_v_arr->base == NULL);
if (__pyx_t_1) {
/* "numpy.pxd":973
* cdef inline object get_array_base(ndarray arr):
* if arr.base is NULL:
* return None # <<<<<<<<<<<<<<
* else:
* return <object>arr.base
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(Py_None);
__pyx_r = Py_None;
goto __pyx_L0;
goto __pyx_L3;
}
/*else*/ {
/* "numpy.pxd":975
* return None
* else:
* return <object>arr.base # <<<<<<<<<<<<<<
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_arr->base));
__pyx_r = ((PyObject *)__pyx_v_arr->base);
goto __pyx_L0;
}
__pyx_L3:;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyMethodDef __pyx_methods[] = {
{0, 0, 0, 0}
};
#if PY_MAJOR_VERSION >= 3
static struct PyModuleDef __pyx_moduledef = {
PyModuleDef_HEAD_INIT,
__Pyx_NAMESTR("chyantes_omp"),
0, /* m_doc */
-1, /* m_size */
__pyx_methods /* m_methods */,
NULL, /* m_reload */
NULL, /* m_traverse */
NULL, /* m_clear */
NULL /* m_free */
};
#endif
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_kp_u_1, __pyx_k_1, sizeof(__pyx_k_1), 0, 1, 0, 0},
{&__pyx_kp_u_11, __pyx_k_11, sizeof(__pyx_k_11), 0, 1, 0, 0},
{&__pyx_kp_u_3, __pyx_k_3, sizeof(__pyx_k_3), 0, 1, 0, 0},
{&__pyx_kp_u_5, __pyx_k_5, sizeof(__pyx_k_5), 0, 1, 0, 0},
{&__pyx_kp_u_7, __pyx_k_7, sizeof(__pyx_k_7), 0, 1, 0, 0},
{&__pyx_kp_u_8, __pyx_k_8, sizeof(__pyx_k_8), 0, 1, 0, 0},
{&__pyx_n_s__DTYPE, __pyx_k__DTYPE, sizeof(__pyx_k__DTYPE), 0, 0, 1, 1},
{&__pyx_n_s__RuntimeError, __pyx_k__RuntimeError, sizeof(__pyx_k__RuntimeError), 0, 0, 1, 1},
{&__pyx_n_s__ValueError, __pyx_k__ValueError, sizeof(__pyx_k__ValueError), 0, 0, 1, 1},
{&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1},
{&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1},
{&__pyx_n_s__chyantes_omp, __pyx_k__chyantes_omp, sizeof(__pyx_k__chyantes_omp), 0, 0, 1, 1},
{&__pyx_n_s__double, __pyx_k__double, sizeof(__pyx_k__double), 0, 0, 1, 1},
{&__pyx_n_s__np, __pyx_k__np, sizeof(__pyx_k__np), 0, 0, 1, 1},
{&__pyx_n_s__numpy, __pyx_k__numpy, sizeof(__pyx_k__numpy), 0, 0, 1, 1},
{&__pyx_n_s__range, __pyx_k__range, sizeof(__pyx_k__range), 0, 0, 1, 1},
{&__pyx_n_s__range_, __pyx_k__range_, sizeof(__pyx_k__range_), 0, 0, 1, 1},
{&__pyx_n_s__range_x, __pyx_k__range_x, sizeof(__pyx_k__range_x), 0, 0, 1, 1},
{&__pyx_n_s__range_y, __pyx_k__range_y, sizeof(__pyx_k__range_y), 0, 0, 1, 1},
{&__pyx_n_s__run, __pyx_k__run, sizeof(__pyx_k__run), 0, 0, 1, 1},
{&__pyx_n_s__step, __pyx_k__step, sizeof(__pyx_k__step), 0, 0, 1, 1},
{&__pyx_n_s__t, __pyx_k__t, sizeof(__pyx_k__t), 0, 0, 1, 1},
{&__pyx_n_s__xmax, __pyx_k__xmax, sizeof(__pyx_k__xmax), 0, 0, 1, 1},
{&__pyx_n_s__xmin, __pyx_k__xmin, sizeof(__pyx_k__xmin), 0, 0, 1, 1},
{&__pyx_n_s__xrange, __pyx_k__xrange, sizeof(__pyx_k__xrange), 0, 0, 1, 1},
{&__pyx_n_s__ymax, __pyx_k__ymax, sizeof(__pyx_k__ymax), 0, 0, 1, 1},
{&__pyx_n_s__ymin, __pyx_k__ymin, sizeof(__pyx_k__ymin), 0, 0, 1, 1},
{&__pyx_n_s__zeros, __pyx_k__zeros, sizeof(__pyx_k__zeros), 0, 0, 1, 1},
{0, 0, 0, 0, 0, 0, 0}
};
static int __Pyx_InitCachedBuiltins(void) {
#if PY_MAJOR_VERSION >= 3
__pyx_builtin_xrange = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_xrange) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_builtin_xrange = __Pyx_GetName(__pyx_b, __pyx_n_s__xrange); if (!__pyx_builtin_xrange) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
__pyx_builtin_ValueError = __Pyx_GetName(__pyx_b, __pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_range = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 224; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_RuntimeError = __Pyx_GetName(__pyx_b, __pyx_n_s__RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
return 0;
__pyx_L1_error:;
return -1;
}
static int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants");
/* "numpy.pxd":211
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
*/
__pyx_k_tuple_2 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_k_tuple_2));
__Pyx_INCREF(((PyObject *)__pyx_kp_u_1));
PyTuple_SET_ITEM(__pyx_k_tuple_2, 0, ((PyObject *)__pyx_kp_u_1));
__Pyx_GIVEREF(((PyObject *)__pyx_kp_u_1));
__Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_2));
/* "numpy.pxd":215
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<<
*
* info.buf = PyArray_DATA(self)
*/
__pyx_k_tuple_4 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_k_tuple_4));
__Pyx_INCREF(((PyObject *)__pyx_kp_u_3));
PyTuple_SET_ITEM(__pyx_k_tuple_4, 0, ((PyObject *)__pyx_kp_u_3));
__Pyx_GIVEREF(((PyObject *)__pyx_kp_u_3));
__Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_4));
/* "numpy.pxd":253
* if ((descr.byteorder == '>' and little_endian) or
* (descr.byteorder == '<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
*/
__pyx_k_tuple_6 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_k_tuple_6));
__Pyx_INCREF(((PyObject *)__pyx_kp_u_5));
PyTuple_SET_ITEM(__pyx_k_tuple_6, 0, ((PyObject *)__pyx_kp_u_5));
__Pyx_GIVEREF(((PyObject *)__pyx_kp_u_5));
__Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_6));
/* "numpy.pxd":795
*
* if (end - f) - (new_offset - offset[0]) < 15:
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<<
*
* if ((child.byteorder == '>' and little_endian) or
*/
__pyx_k_tuple_9 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_k_tuple_9));
__Pyx_INCREF(((PyObject *)__pyx_kp_u_8));
PyTuple_SET_ITEM(__pyx_k_tuple_9, 0, ((PyObject *)__pyx_kp_u_8));
__Pyx_GIVEREF(((PyObject *)__pyx_kp_u_8));
__Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_9));
/* "numpy.pxd":799
* if ((child.byteorder == '>' and little_endian) or
* (child.byteorder == '<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* # One could encode it in the format string and have Cython
* # complain instead, BUT: < and > in format strings also imply
*/
__pyx_k_tuple_10 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_10)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_k_tuple_10));
__Pyx_INCREF(((PyObject *)__pyx_kp_u_5));
PyTuple_SET_ITEM(__pyx_k_tuple_10, 0, ((PyObject *)__pyx_kp_u_5));
__Pyx_GIVEREF(((PyObject *)__pyx_kp_u_5));
__Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_10));
/* "numpy.pxd":819
* t = child.type_num
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<<
*
* # Until ticket #99 is fixed, use integers to avoid warnings
*/
__pyx_k_tuple_12 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_12)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 819; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_k_tuple_12));
__Pyx_INCREF(((PyObject *)__pyx_kp_u_11));
PyTuple_SET_ITEM(__pyx_k_tuple_12, 0, ((PyObject *)__pyx_kp_u_11));
__Pyx_GIVEREF(((PyObject *)__pyx_kp_u_11));
__Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_12));
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_InitGlobals(void) {
/* Initialize NaN. The sign is irrelevant, an exponent with all bits 1 and
a nonzero mantissa means NaN. If the first bit in the mantissa is 1, it is
a quiet NaN. */
memset(&__PYX_NAN, 0xFF, sizeof(__PYX_NAN));
PyEval_InitThreads();
if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
__pyx_int_15 = PyInt_FromLong(15); if (unlikely(!__pyx_int_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
return 0;
__pyx_L1_error:;
return -1;
}
#if PY_MAJOR_VERSION < 3
PyMODINIT_FUNC initchyantes_omp(void); /*proto*/
PyMODINIT_FUNC initchyantes_omp(void)
#else
PyMODINIT_FUNC PyInit_chyantes_omp(void); /*proto*/
PyMODINIT_FUNC PyInit_chyantes_omp(void)
#endif
{
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannyDeclarations
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
__Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_chyantes_omp(void)");
if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#ifdef __pyx_binding_PyCFunctionType_USED
if (__pyx_binding_PyCFunctionType_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
/*--- Library function declarations ---*/
/*--- Threads initialization code ---*/
#if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
#ifdef WITH_THREAD /* Python build with threading support? */
PyEval_InitThreads();
#endif
#endif
/*--- Module creation code ---*/
#if PY_MAJOR_VERSION < 3
__pyx_m = Py_InitModule4(__Pyx_NAMESTR("chyantes_omp"), __pyx_methods, 0, 0, PYTHON_API_VERSION);
#else
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
#if PY_MAJOR_VERSION < 3
Py_INCREF(__pyx_m);
#endif
__pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME));
if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
/*--- Initialize various global constants etc. ---*/
if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (__pyx_module_is_main_chyantes_omp) {
if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
}
/*--- Builtin init code ---*/
if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/*--- Constants init code ---*/
if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/*--- Global init code ---*/
/*--- Variable export code ---*/
/*--- Function export code ---*/
/*--- Type init code ---*/
/*--- Type import code ---*/
__pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 151; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 161; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 857; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/*--- Variable import code ---*/
/*--- Function import code ---*/
/*--- Execution code ---*/
/* "hyantes_para.pyx":4
* #runas import numpy as np ; x = [ [i/10., i/10., i/20.] for i in xrange(400) ] ; x = np.array(x) ; run(0,0,90,90, 1, 0.0001, 400, 400, x)
* cimport libc.math as math
* import numpy as np # <<<<<<<<<<<<<<
* cimport numpy as np
* cimport cython
*/
__pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__numpy), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
if (PyObject_SetAttr(__pyx_m, __pyx_n_s__np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "hyantes_para.pyx":9
* from cython.parallel import parallel, prange
*
* DTYPE = np.double # <<<<<<<<<<<<<<
* ctypedef np.double_t DTYPE_t
*
*/
__pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__double); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (PyObject_SetAttr(__pyx_m, __pyx_n_s__DTYPE, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "hyantes_para.pyx":14
* @cython.boundscheck(False)
* @cython.cdivision(True)
* def run(double xmin, double ymin, double xmax, double ymax, double step, double range_, int range_x, int range_y, np.ndarray[DTYPE_t, ndim=2, negative_indices=False] t): # <<<<<<<<<<<<<<
* cdef np.ndarray[DTYPE_t, ndim=2, negative_indices=False] pt = np.zeros((range_x, range_y))
* cdef int i,j,k
*/
__pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_12chyantes_omp_run, NULL, __pyx_n_s__chyantes_omp); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
if (PyObject_SetAttr(__pyx_m, __pyx_n_s__run, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 14; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "hyantes_para.pyx":1
* #pythran export run(double, double, double, double, double, double, int, int, double [] []) # <<<<<<<<<<<<<<
* #runas import numpy as np ; x = [ [i/10., i/10., i/20.] for i in xrange(400) ] ; x = np.array(x) ; run(0,0,90,90, 1, 0.0001, 400, 400, x)
* cimport libc.math as math
*/
__pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_2));
if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_2)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
/* "numpy.pxd":971
* arr.base = baseptr
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* if arr.base is NULL:
* return None
*/
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
if (__pyx_m) {
__Pyx_AddTraceback("init chyantes_omp", __pyx_clineno, __pyx_lineno, __pyx_filename);
Py_DECREF(__pyx_m); __pyx_m = 0;
} else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ImportError, "init chyantes_omp");
}
__pyx_L0:;
__Pyx_RefNannyFinishContext();
#if PY_MAJOR_VERSION < 3
return;
#else
return __pyx_m;
#endif
}
/* Runtime support code */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule((char *)modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, (char *)"RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif /* CYTHON_REFNANNY */
static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) {
PyObject *result;
result = PyObject_GetAttr(dict, name);
if (!result) {
if (dict != __pyx_b) {
PyErr_Clear();
result = PyObject_GetAttr(__pyx_b, name);
}
if (!result) {
PyErr_SetObject(PyExc_NameError, name);
}
}
return result;
}
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%s() takes %s %"PY_FORMAT_SIZE_T"d positional argument%s (%"PY_FORMAT_SIZE_T"d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AS_STRING(kw_name));
#endif
}
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
} else {
#if PY_MAJOR_VERSION < 3
if (unlikely(!PyString_CheckExact(key)) && unlikely(!PyString_Check(key))) {
#else
if (unlikely(!PyUnicode_CheckExact(key)) && unlikely(!PyUnicode_Check(key))) {
#endif
goto invalid_keyword_type;
} else {
for (name = first_kw_arg; *name; name++) {
#if PY_MAJOR_VERSION >= 3
if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) &&
PyUnicode_Compare(**name, key) == 0) break;
#else
if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) &&
_PyString_Eq(**name, key)) break;
#endif
}
if (*name) {
values[name-argnames] = value;
} else {
/* unexpected keyword found */
for (name=argnames; name != first_kw_arg; name++) {
if (**name == key) goto arg_passed_twice;
#if PY_MAJOR_VERSION >= 3
if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) &&
PyUnicode_Compare(**name, key) == 0) goto arg_passed_twice;
#else
if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) &&
_PyString_Eq(**name, key)) goto arg_passed_twice;
#endif
}
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
}
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, **name);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%s() got an unexpected keyword argument '%s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed,
const char *name, int exact)
{
if (!type) {
PyErr_Format(PyExc_SystemError, "Missing type object");
return 0;
}
if (none_allowed && obj == Py_None) return 1;
else if (exact) {
if (Py_TYPE(obj) == type) return 1;
}
else {
if (PyObject_TypeCheck(obj, type)) return 1;
}
PyErr_Format(PyExc_TypeError,
"Argument '%s' has incorrect type (expected %s, got %s)",
name, type->tp_name, Py_TYPE(obj)->tp_name);
return 0;
}
static CYTHON_INLINE int __Pyx_IsLittleEndian(void) {
unsigned int n = 1;
return *(unsigned char*)(&n) != 0;
}
typedef struct {
__Pyx_StructField root;
__Pyx_BufFmt_StackElem* head;
size_t fmt_offset;
size_t new_count, enc_count;
int is_complex;
char enc_type;
char new_packmode;
char enc_packmode;
} __Pyx_BufFmt_Context;
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type) {
stack[0].field = &ctx->root;
stack[0].parent_offset = 0;
ctx->root.type = type;
ctx->root.name = "buffer dtype";
ctx->root.offset = 0;
ctx->head = stack;
ctx->head->field = &ctx->root;
ctx->fmt_offset = 0;
ctx->head->parent_offset = 0;
ctx->new_packmode = '@';
ctx->enc_packmode = '@';
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->is_complex = 0;
while (type->typegroup == 'S') {
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = 0;
type = type->fields->type;
}
}
static int __Pyx_BufFmt_ParseNumber(const char** ts) {
int count;
const char* t = *ts;
if (*t < '0' || *t > '9') {
return -1;
} else {
count = *t++ - '0';
while (*t >= '0' && *t < '9') {
count *= 10;
count += *t++ - '0';
}
}
*ts = t;
return count;
}
static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) {
PyErr_Format(PyExc_ValueError,
"Unexpected format string character: '%c'", ch);
}
static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) {
switch (ch) {
case 'b': return "'char'";
case 'B': return "'unsigned char'";
case 'h': return "'short'";
case 'H': return "'unsigned short'";
case 'i': return "'int'";
case 'I': return "'unsigned int'";
case 'l': return "'long'";
case 'L': return "'unsigned long'";
case 'q': return "'long long'";
case 'Q': return "'unsigned long long'";
case 'f': return (is_complex ? "'complex float'" : "'float'");
case 'd': return (is_complex ? "'complex double'" : "'double'");
case 'g': return (is_complex ? "'complex long double'" : "'long double'");
case 'T': return "a struct";
case 'O': return "Python object";
case 'P': return "a pointer";
case 0: return "end";
default: return "unparseable format string";
}
}
static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': return 1;
case 'h': case 'H': return 2;
case 'i': case 'I': case 'l': case 'L': return 4;
case 'q': case 'Q': return 8;
case 'f': return (is_complex ? 8 : 4);
case 'd': return (is_complex ? 16 : 8);
case 'g': {
PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g')..");
return 0;
}
case 'O': case 'P': return sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) {
switch (ch) {
case 'c': case 'b': case 'B': return 1;
case 'h': case 'H': return sizeof(short);
case 'i': case 'I': return sizeof(int);
case 'l': case 'L': return sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(float) * (is_complex ? 2 : 1);
case 'd': return sizeof(double) * (is_complex ? 2 : 1);
case 'g': return sizeof(long double) * (is_complex ? 2 : 1);
case 'O': case 'P': return sizeof(void*);
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
typedef struct { char c; short x; } __Pyx_st_short;
typedef struct { char c; int x; } __Pyx_st_int;
typedef struct { char c; long x; } __Pyx_st_long;
typedef struct { char c; float x; } __Pyx_st_float;
typedef struct { char c; double x; } __Pyx_st_double;
typedef struct { char c; long double x; } __Pyx_st_longdouble;
typedef struct { char c; void *x; } __Pyx_st_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': return 1;
case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_st_float) - sizeof(float);
case 'd': return sizeof(__Pyx_st_double) - sizeof(double);
case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) {
switch (ch) {
case 'c': case 'b': case 'h': case 'i': case 'l': case 'q': return 'I';
case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U';
case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R');
case 'O': return 'O';
case 'P': return 'P';
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) {
if (ctx->head == NULL || ctx->head->field == &ctx->root) {
const char* expected;
const char* quote;
if (ctx->head == NULL) {
expected = "end";
quote = "";
} else {
expected = ctx->head->field->type->name;
quote = "'";
}
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected %s%s%s but got %s",
quote, expected, quote,
__Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex));
} else {
__Pyx_StructField* field = ctx->head->field;
__Pyx_StructField* parent = (ctx->head - 1)->field;
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'",
field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex),
parent->type->name, field->name);
}
}
static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
char group;
size_t size, offset;
if (ctx->enc_type == 0) return 0;
group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex);
do {
__Pyx_StructField* field = ctx->head->field;
__Pyx_TypeInfo* type = field->type;
if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') {
size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
} else {
size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex);
}
if (ctx->enc_packmode == '@') {
size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex);
size_t align_mod_offset;
if (align_at == 0) return -1;
align_mod_offset = ctx->fmt_offset % align_at;
if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset;
}
if (type->size != size || type->typegroup != group) {
if (type->typegroup == 'C' && type->fields != NULL) {
/* special case -- treat as struct rather than complex number */
size_t parent_offset = ctx->head->parent_offset + field->offset;
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = parent_offset;
continue;
}
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
offset = ctx->head->parent_offset + field->offset;
if (ctx->fmt_offset != offset) {
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch; next field is at offset %"PY_FORMAT_SIZE_T"d but %"PY_FORMAT_SIZE_T"d expected",
(Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset);
return -1;
}
ctx->fmt_offset += size;
--ctx->enc_count; /* Consume from buffer string */
/* Done checking, move to next field, pushing or popping struct stack if needed */
while (1) {
if (field == &ctx->root) {
ctx->head = NULL;
if (ctx->enc_count != 0) {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
break; /* breaks both loops as ctx->enc_count == 0 */
}
ctx->head->field = ++field;
if (field->type == NULL) {
--ctx->head;
field = ctx->head->field;
continue;
} else if (field->type->typegroup == 'S') {
size_t parent_offset = ctx->head->parent_offset + field->offset;
if (field->type->fields->type == NULL) continue; /* empty struct */
field = field->type->fields;
++ctx->head;
ctx->head->field = field;
ctx->head->parent_offset = parent_offset;
break;
} else {
break;
}
}
} while (ctx->enc_count);
ctx->enc_type = 0;
ctx->is_complex = 0;
return 0;
}
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) {
int got_Z = 0;
while (1) {
switch(*ts) {
case 0:
if (ctx->enc_type != 0 && ctx->head == NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
if (ctx->head != NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
return ts;
case ' ':
case 10:
case 13:
++ts;
break;
case '<':
if (!__Pyx_IsLittleEndian()) {
PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '>':
case '!':
if (__Pyx_IsLittleEndian()) {
PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '=':
case '@':
case '^':
ctx->new_packmode = *ts++;
break;
case 'T': /* substruct */
{
const char* ts_after_sub;
size_t i, struct_count = ctx->new_count;
ctx->new_count = 1;
++ts;
if (*ts != '{') {
PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'");
return NULL;
}
++ts;
ts_after_sub = ts;
for (i = 0; i != struct_count; ++i) {
ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts);
if (!ts_after_sub) return NULL;
}
ts = ts_after_sub;
}
break;
case '}': /* end of substruct; either repeat or move on */
++ts;
return ts;
case 'x':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->fmt_offset += ctx->new_count;
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->enc_packmode = ctx->new_packmode;
++ts;
break;
case 'Z':
got_Z = 1;
++ts;
if (*ts != 'f' && *ts != 'd' && *ts != 'g') {
__Pyx_BufFmt_RaiseUnexpectedChar('Z');
return NULL;
} /* fall through */
case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
case 'l': case 'L': case 'q': case 'Q':
case 'f': case 'd': case 'g':
case 'O':
if (ctx->enc_type == *ts && got_Z == ctx->is_complex &&
ctx->enc_packmode == ctx->new_packmode) {
/* Continue pooling same type */
ctx->enc_count += ctx->new_count;
} else {
/* New type */
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_count = ctx->new_count;
ctx->enc_packmode = ctx->new_packmode;
ctx->enc_type = *ts;
ctx->is_complex = got_Z;
}
++ts;
ctx->new_count = 1;
got_Z = 0;
break;
case ':':
++ts;
while(*ts != ':') ++ts;
++ts;
break;
default:
{
int number = __Pyx_BufFmt_ParseNumber(&ts);
if (number == -1) { /* First char was not a digit */
PyErr_Format(PyExc_ValueError,
"Does not understand character buffer dtype format string ('%c')", *ts);
return NULL;
}
ctx->new_count = (size_t)number;
}
}
}
}
static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) {
buf->buf = NULL;
buf->obj = NULL;
buf->strides = __Pyx_zeros;
buf->shape = __Pyx_zeros;
buf->suboffsets = __Pyx_minusones;
}
static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) {
if (obj == Py_None || obj == NULL) {
__Pyx_ZeroBuffer(buf);
return 0;
}
buf->buf = NULL;
if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail;
if (buf->ndim != nd) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
nd, buf->ndim);
goto fail;
}
if (!cast) {
__Pyx_BufFmt_Context ctx;
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail;
}
if ((unsigned)buf->itemsize != dtype->size) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%"PY_FORMAT_SIZE_T"d byte%s) does not match size of '%s' (%"PY_FORMAT_SIZE_T"d byte%s)",
buf->itemsize, (buf->itemsize > 1) ? "s" : "",
dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : "");
goto fail;
}
if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones;
return 0;
fail:;
__Pyx_ZeroBuffer(buf);
return -1;
}
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) {
if (info->buf == NULL) return;
if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL;
__Pyx_ReleaseBuffer(info);
}
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
if (unlikely(!type)) {
PyErr_Format(PyExc_SystemError, "Missing type object");
return 0;
}
if (likely(PyObject_TypeCheck(obj, type)))
return 1;
PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
Py_TYPE(obj)->tp_name, type->tp_name);
return 0;
}
static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyThreadState *tstate = PyThreadState_GET();
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) {
PyThreadState *tstate = PyThreadState_GET();
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
}
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
/* cause is unused */
Py_XINCREF(type);
Py_XINCREF(value);
Py_XINCREF(tb);
/* First, check the traceback argument, replacing None with NULL. */
if (tb == Py_None) {
Py_DECREF(tb);
tb = 0;
}
else if (tb != NULL && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
/* Next, replace a missing value with None */
if (value == NULL) {
value = Py_None;
Py_INCREF(value);
}
#if PY_VERSION_HEX < 0x02050000
if (!PyClass_Check(type))
#else
if (!PyType_Check(type))
#endif
{
/* Raising an instance. The value should be a dummy. */
if (value != Py_None) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
/* Normalize to raise <class>, <instance> */
Py_DECREF(value);
value = type;
#if PY_VERSION_HEX < 0x02050000
if (PyInstance_Check(type)) {
type = (PyObject*) ((PyInstanceObject*)type)->in_class;
Py_INCREF(type);
}
else {
type = 0;
PyErr_SetString(PyExc_TypeError,
"raise: exception must be an old-style class or instance");
goto raise_error;
}
#else
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
#endif
}
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else /* Python 3+ */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (!PyExceptionClass_Check(type)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
if (cause) {
PyObject *fixed_cause;
if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
}
else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
}
else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
if (!value) {
value = PyObject_CallObject(type, NULL);
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
PyThreadState *tstate = PyThreadState_GET();
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
}
bad:
return;
}
#endif
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
PyErr_Format(PyExc_ValueError,
"need more than %"PY_FORMAT_SIZE_T"d value%s to unpack",
index, (index == 1) ? "" : "s");
}
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
PyErr_Format(PyExc_ValueError,
"too many values to unpack (expected %"PY_FORMAT_SIZE_T"d)", expected);
}
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
}
static void __Pyx_UnpackTupleError(PyObject *t, Py_ssize_t index) {
if (t == Py_None) {
__Pyx_RaiseNoneNotIterableError();
} else if (PyTuple_GET_SIZE(t) < index) {
__Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(t));
} else {
__Pyx_RaiseTooManyValuesError(index);
}
}
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
#if PY_VERSION_HEX >= 0x02060000
if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
#endif
if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pf_5numpy_7ndarray___getbuffer__(obj, view, flags);
else {
PyErr_Format(PyExc_TypeError, "'%100s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
return -1;
}
}
static void __Pyx_ReleaseBuffer(Py_buffer *view) {
PyObject* obj = view->obj;
if (obj) {
#if PY_VERSION_HEX >= 0x02060000
if (PyObject_CheckBuffer(obj)) {PyBuffer_Release(view); return;}
#endif
if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) __pyx_pf_5numpy_7ndarray_1__releasebuffer__(obj, view);
Py_DECREF(obj);
view->obj = NULL;
}
}
#endif
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) {
PyObject *py_import = 0;
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
py_import = __Pyx_GetAttrString(__pyx_b, "__import__");
if (!py_import)
goto bad;
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict(__pyx_m);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
#if PY_VERSION_HEX >= 0x02050000
{
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, NULL);
Py_DECREF(py_level);
}
#else
if (level>0) {
PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4.");
goto bad;
}
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, NULL);
#endif
bad:
Py_XDECREF(empty_list);
Py_XDECREF(py_import);
Py_XDECREF(empty_dict);
return module;
}
static CYTHON_INLINE PyObject *__Pyx_PyInt_to_py_Py_intptr_t(Py_intptr_t val) {
const Py_intptr_t neg_one = (Py_intptr_t)-1, const_zero = (Py_intptr_t)0;
const int is_unsigned = const_zero < neg_one;
if ((sizeof(Py_intptr_t) == sizeof(char)) ||
(sizeof(Py_intptr_t) == sizeof(short))) {
return PyInt_FromLong((long)val);
} else if ((sizeof(Py_intptr_t) == sizeof(int)) ||
(sizeof(Py_intptr_t) == sizeof(long))) {
if (is_unsigned)
return PyLong_FromUnsignedLong((unsigned long)val);
else
return PyInt_FromLong((long)val);
} else if (sizeof(Py_intptr_t) == sizeof(PY_LONG_LONG)) {
if (is_unsigned)
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG)val);
else
return PyLong_FromLongLong((PY_LONG_LONG)val);
} else {
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
return _PyLong_FromByteArray(bytes, sizeof(Py_intptr_t),
little, !is_unsigned);
}
}
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return ::std::complex< float >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return x + y*(__pyx_t_float_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
__pyx_t_float_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
float denom = b.real * b.real + b.imag * b.imag;
z.real = (a.real * b.real + a.imag * b.imag) / denom;
z.imag = (a.imag * b.real - a.real * b.imag) / denom;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrtf(z.real*z.real + z.imag*z.imag);
#else
return hypotf(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
float r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
float denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
z = __Pyx_c_prodf(a, a);
return __Pyx_c_prodf(a, a);
case 3:
z = __Pyx_c_prodf(a, a);
return __Pyx_c_prodf(z, a);
case 4:
z = __Pyx_c_prodf(a, a);
return __Pyx_c_prodf(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
}
r = a.real;
theta = 0;
} else {
r = __Pyx_c_absf(a);
theta = atan2f(a.imag, a.real);
}
lnr = logf(r);
z_r = expf(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cosf(z_theta);
z.imag = z_r * sinf(z_theta);
return z;
}
#endif
#endif
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return ::std::complex< double >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return x + y*(__pyx_t_double_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
__pyx_t_double_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
double denom = b.real * b.real + b.imag * b.imag;
z.real = (a.real * b.real + a.imag * b.imag) / denom;
z.imag = (a.imag * b.real - a.real * b.imag) / denom;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrt(z.real*z.real + z.imag*z.imag);
#else
return hypot(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
double r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
double denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
z = __Pyx_c_prod(a, a);
return __Pyx_c_prod(a, a);
case 3:
z = __Pyx_c_prod(a, a);
return __Pyx_c_prod(z, a);
case 4:
z = __Pyx_c_prod(a, a);
return __Pyx_c_prod(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
}
r = a.real;
theta = 0;
} else {
r = __Pyx_c_abs(a);
theta = atan2(a.imag, a.real);
}
lnr = log(r);
z_r = exp(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cos(z_theta);
z.imag = z_r * sin(z_theta);
return z;
}
#endif
#endif
static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) {
const unsigned char neg_one = (unsigned char)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (sizeof(unsigned char) < sizeof(long)) {
long val = __Pyx_PyInt_AsLong(x);
if (unlikely(val != (long)(unsigned char)val)) {
if (!unlikely(val == -1 && PyErr_Occurred())) {
PyErr_SetString(PyExc_OverflowError,
(is_unsigned && unlikely(val < 0)) ?
"can't convert negative value to unsigned char" :
"value too large to convert to unsigned char");
}
return (unsigned char)-1;
}
return (unsigned char)val;
}
return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x);
}
static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) {
const unsigned short neg_one = (unsigned short)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (sizeof(unsigned short) < sizeof(long)) {
long val = __Pyx_PyInt_AsLong(x);
if (unlikely(val != (long)(unsigned short)val)) {
if (!unlikely(val == -1 && PyErr_Occurred())) {
PyErr_SetString(PyExc_OverflowError,
(is_unsigned && unlikely(val < 0)) ?
"can't convert negative value to unsigned short" :
"value too large to convert to unsigned short");
}
return (unsigned short)-1;
}
return (unsigned short)val;
}
return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x);
}
static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) {
const unsigned int neg_one = (unsigned int)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (sizeof(unsigned int) < sizeof(long)) {
long val = __Pyx_PyInt_AsLong(x);
if (unlikely(val != (long)(unsigned int)val)) {
if (!unlikely(val == -1 && PyErr_Occurred())) {
PyErr_SetString(PyExc_OverflowError,
(is_unsigned && unlikely(val < 0)) ?
"can't convert negative value to unsigned int" :
"value too large to convert to unsigned int");
}
return (unsigned int)-1;
}
return (unsigned int)val;
}
return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x);
}
static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject* x) {
const char neg_one = (char)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (sizeof(char) < sizeof(long)) {
long val = __Pyx_PyInt_AsLong(x);
if (unlikely(val != (long)(char)val)) {
if (!unlikely(val == -1 && PyErr_Occurred())) {
PyErr_SetString(PyExc_OverflowError,
(is_unsigned && unlikely(val < 0)) ?
"can't convert negative value to char" :
"value too large to convert to char");
}
return (char)-1;
}
return (char)val;
}
return (char)__Pyx_PyInt_AsLong(x);
}
static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject* x) {
const short neg_one = (short)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (sizeof(short) < sizeof(long)) {
long val = __Pyx_PyInt_AsLong(x);
if (unlikely(val != (long)(short)val)) {
if (!unlikely(val == -1 && PyErr_Occurred())) {
PyErr_SetString(PyExc_OverflowError,
(is_unsigned && unlikely(val < 0)) ?
"can't convert negative value to short" :
"value too large to convert to short");
}
return (short)-1;
}
return (short)val;
}
return (short)__Pyx_PyInt_AsLong(x);
}
static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject* x) {
const int neg_one = (int)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (sizeof(int) < sizeof(long)) {
long val = __Pyx_PyInt_AsLong(x);
if (unlikely(val != (long)(int)val)) {
if (!unlikely(val == -1 && PyErr_Occurred())) {
PyErr_SetString(PyExc_OverflowError,
(is_unsigned && unlikely(val < 0)) ?
"can't convert negative value to int" :
"value too large to convert to int");
}
return (int)-1;
}
return (int)val;
}
return (int)__Pyx_PyInt_AsLong(x);
}
static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) {
const signed char neg_one = (signed char)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (sizeof(signed char) < sizeof(long)) {
long val = __Pyx_PyInt_AsLong(x);
if (unlikely(val != (long)(signed char)val)) {
if (!unlikely(val == -1 && PyErr_Occurred())) {
PyErr_SetString(PyExc_OverflowError,
(is_unsigned && unlikely(val < 0)) ?
"can't convert negative value to signed char" :
"value too large to convert to signed char");
}
return (signed char)-1;
}
return (signed char)val;
}
return (signed char)__Pyx_PyInt_AsSignedLong(x);
}
static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) {
const signed short neg_one = (signed short)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (sizeof(signed short) < sizeof(long)) {
long val = __Pyx_PyInt_AsLong(x);
if (unlikely(val != (long)(signed short)val)) {
if (!unlikely(val == -1 && PyErr_Occurred())) {
PyErr_SetString(PyExc_OverflowError,
(is_unsigned && unlikely(val < 0)) ?
"can't convert negative value to signed short" :
"value too large to convert to signed short");
}
return (signed short)-1;
}
return (signed short)val;
}
return (signed short)__Pyx_PyInt_AsSignedLong(x);
}
static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) {
const signed int neg_one = (signed int)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (sizeof(signed int) < sizeof(long)) {
long val = __Pyx_PyInt_AsLong(x);
if (unlikely(val != (long)(signed int)val)) {
if (!unlikely(val == -1 && PyErr_Occurred())) {
PyErr_SetString(PyExc_OverflowError,
(is_unsigned && unlikely(val < 0)) ?
"can't convert negative value to signed int" :
"value too large to convert to signed int");
}
return (signed int)-1;
}
return (signed int)val;
}
return (signed int)__Pyx_PyInt_AsSignedLong(x);
}
static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) {
const int neg_one = (int)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (sizeof(int) < sizeof(long)) {
long val = __Pyx_PyInt_AsLong(x);
if (unlikely(val != (long)(int)val)) {
if (!unlikely(val == -1 && PyErr_Occurred())) {
PyErr_SetString(PyExc_OverflowError,
(is_unsigned && unlikely(val < 0)) ?
"can't convert negative value to int" :
"value too large to convert to int");
}
return (int)-1;
}
return (int)val;
}
return (int)__Pyx_PyInt_AsLong(x);
}
static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) {
const unsigned long neg_one = (unsigned long)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
#if PY_VERSION_HEX < 0x03000000
if (likely(PyInt_Check(x))) {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to unsigned long");
return (unsigned long)-1;
}
return (unsigned long)val;
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
if (unlikely(Py_SIZE(x) < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to unsigned long");
return (unsigned long)-1;
}
return (unsigned long)PyLong_AsUnsignedLong(x);
} else {
return (unsigned long)PyLong_AsLong(x);
}
} else {
unsigned long val;
PyObject *tmp = __Pyx_PyNumber_Int(x);
if (!tmp) return (unsigned long)-1;
val = __Pyx_PyInt_AsUnsignedLong(tmp);
Py_DECREF(tmp);
return val;
}
}
static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) {
const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
#if PY_VERSION_HEX < 0x03000000
if (likely(PyInt_Check(x))) {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to unsigned PY_LONG_LONG");
return (unsigned PY_LONG_LONG)-1;
}
return (unsigned PY_LONG_LONG)val;
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
if (unlikely(Py_SIZE(x) < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to unsigned PY_LONG_LONG");
return (unsigned PY_LONG_LONG)-1;
}
return (unsigned PY_LONG_LONG)PyLong_AsUnsignedLongLong(x);
} else {
return (unsigned PY_LONG_LONG)PyLong_AsLongLong(x);
}
} else {
unsigned PY_LONG_LONG val;
PyObject *tmp = __Pyx_PyNumber_Int(x);
if (!tmp) return (unsigned PY_LONG_LONG)-1;
val = __Pyx_PyInt_AsUnsignedLongLong(tmp);
Py_DECREF(tmp);
return val;
}
}
static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) {
const long neg_one = (long)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
#if PY_VERSION_HEX < 0x03000000
if (likely(PyInt_Check(x))) {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long)-1;
}
return (long)val;
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
if (unlikely(Py_SIZE(x) < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long)-1;
}
return (long)PyLong_AsUnsignedLong(x);
} else {
return (long)PyLong_AsLong(x);
}
} else {
long val;
PyObject *tmp = __Pyx_PyNumber_Int(x);
if (!tmp) return (long)-1;
val = __Pyx_PyInt_AsLong(tmp);
Py_DECREF(tmp);
return val;
}
}
static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) {
const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
#if PY_VERSION_HEX < 0x03000000
if (likely(PyInt_Check(x))) {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to PY_LONG_LONG");
return (PY_LONG_LONG)-1;
}
return (PY_LONG_LONG)val;
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
if (unlikely(Py_SIZE(x) < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to PY_LONG_LONG");
return (PY_LONG_LONG)-1;
}
return (PY_LONG_LONG)PyLong_AsUnsignedLongLong(x);
} else {
return (PY_LONG_LONG)PyLong_AsLongLong(x);
}
} else {
PY_LONG_LONG val;
PyObject *tmp = __Pyx_PyNumber_Int(x);
if (!tmp) return (PY_LONG_LONG)-1;
val = __Pyx_PyInt_AsLongLong(tmp);
Py_DECREF(tmp);
return val;
}
}
static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) {
const signed long neg_one = (signed long)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
#if PY_VERSION_HEX < 0x03000000
if (likely(PyInt_Check(x))) {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to signed long");
return (signed long)-1;
}
return (signed long)val;
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
if (unlikely(Py_SIZE(x) < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to signed long");
return (signed long)-1;
}
return (signed long)PyLong_AsUnsignedLong(x);
} else {
return (signed long)PyLong_AsLong(x);
}
} else {
signed long val;
PyObject *tmp = __Pyx_PyNumber_Int(x);
if (!tmp) return (signed long)-1;
val = __Pyx_PyInt_AsSignedLong(tmp);
Py_DECREF(tmp);
return val;
}
}
static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) {
const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
#if PY_VERSION_HEX < 0x03000000
if (likely(PyInt_Check(x))) {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to signed PY_LONG_LONG");
return (signed PY_LONG_LONG)-1;
}
return (signed PY_LONG_LONG)val;
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
if (unlikely(Py_SIZE(x) < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to signed PY_LONG_LONG");
return (signed PY_LONG_LONG)-1;
}
return (signed PY_LONG_LONG)PyLong_AsUnsignedLongLong(x);
} else {
return (signed PY_LONG_LONG)PyLong_AsLongLong(x);
}
} else {
signed PY_LONG_LONG val;
PyObject *tmp = __Pyx_PyNumber_Int(x);
if (!tmp) return (signed PY_LONG_LONG)-1;
val = __Pyx_PyInt_AsSignedLongLong(tmp);
Py_DECREF(tmp);
return val;
}
}
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
#if PY_VERSION_HEX < 0x02050000
return PyErr_Warn(NULL, message);
#else
return PyErr_WarnEx(NULL, message, 1);
#endif
}
return 0;
}
#ifndef __PYX_HAVE_RT_ImportType
#define __PYX_HAVE_RT_ImportType
static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name,
size_t size, int strict)
{
PyObject *py_module = 0;
PyObject *result = 0;
PyObject *py_name = 0;
char warning[200];
py_module = __Pyx_ImportModule(module_name);
if (!py_module)
goto bad;
#if PY_MAJOR_VERSION < 3
py_name = PyString_FromString(class_name);
#else
py_name = PyUnicode_FromString(class_name);
#endif
if (!py_name)
goto bad;
result = PyObject_GetAttr(py_module, py_name);
Py_DECREF(py_name);
py_name = 0;
Py_DECREF(py_module);
py_module = 0;
if (!result)
goto bad;
if (!PyType_Check(result)) {
PyErr_Format(PyExc_TypeError,
"%s.%s is not a type object",
module_name, class_name);
goto bad;
}
if (!strict && ((PyTypeObject *)result)->tp_basicsize > (Py_ssize_t)size) {
PyOS_snprintf(warning, sizeof(warning),
"%s.%s size changed, may indicate binary incompatibility",
module_name, class_name);
#if PY_VERSION_HEX < 0x02050000
if (PyErr_Warn(NULL, warning) < 0) goto bad;
#else
if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad;
#endif
}
else if (((PyTypeObject *)result)->tp_basicsize != (Py_ssize_t)size) {
PyErr_Format(PyExc_ValueError,
"%s.%s has the wrong size, try recompiling",
module_name, class_name);
goto bad;
}
return (PyTypeObject *)result;
bad:
Py_XDECREF(py_module);
Py_XDECREF(result);
return NULL;
}
#endif
#ifndef __PYX_HAVE_RT_ImportModule
#define __PYX_HAVE_RT_ImportModule
static PyObject *__Pyx_ImportModule(const char *name) {
PyObject *py_name = 0;
PyObject *py_module = 0;
#if PY_MAJOR_VERSION < 3
py_name = PyString_FromString(name);
#else
py_name = PyUnicode_FromString(name);
#endif
if (!py_name)
goto bad;
py_module = PyImport_Import(py_name);
Py_DECREF(py_name);
return py_module;
bad:
Py_XDECREF(py_name);
return 0;
}
#endif
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static void __Pyx_AddTraceback(const char *funcname, int __pyx_clineno,
int __pyx_lineno, const char *__pyx_filename) {
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
PyObject *py_globals = 0;
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
#if PY_MAJOR_VERSION < 3
py_srcfile = PyString_FromString(__pyx_filename);
#else
py_srcfile = PyUnicode_FromString(__pyx_filename);
#endif
if (!py_srcfile) goto bad;
if (__pyx_clineno) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno);
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno);
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
#else
py_funcname = PyUnicode_FromString(funcname);
#endif
}
if (!py_funcname) goto bad;
py_globals = PyModule_GetDict(__pyx_m);
if (!py_globals) goto bad;
py_code = PyCode_New(
0, /*int argcount,*/
#if PY_MAJOR_VERSION >= 3
0, /*int kwonlyargcount,*/
#endif
0, /*int nlocals,*/
0, /*int stacksize,*/
0, /*int flags,*/
__pyx_empty_bytes, /*PyObject *code,*/
__pyx_empty_tuple, /*PyObject *consts,*/
__pyx_empty_tuple, /*PyObject *names,*/
__pyx_empty_tuple, /*PyObject *varnames,*/
__pyx_empty_tuple, /*PyObject *freevars,*/
__pyx_empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
__pyx_lineno, /*int firstlineno,*/
__pyx_empty_bytes /*PyObject *lnotab*/
);
if (!py_code) goto bad;
py_frame = PyFrame_New(
PyThreadState_GET(), /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
py_globals, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
py_frame->f_lineno = __pyx_lineno;
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else /* Python 3+ has unicode identifiers */
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
++t;
}
return 0;
}
/* Type Conversion Functions */
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) {
PyNumberMethods *m;
const char *name = NULL;
PyObject *res = NULL;
#if PY_VERSION_HEX < 0x03000000
if (PyInt_Check(x) || PyLong_Check(x))
#else
if (PyLong_Check(x))
#endif
return Py_INCREF(x), x;
m = Py_TYPE(x)->tp_as_number;
#if PY_VERSION_HEX < 0x03000000
if (m && m->nb_int) {
name = "int";
res = PyNumber_Int(x);
}
else if (m && m->nb_long) {
name = "long";
res = PyNumber_Long(x);
}
#else
if (m && m->nb_int) {
name = "int";
res = PyNumber_Long(x);
}
#endif
if (res) {
#if PY_VERSION_HEX < 0x03000000
if (!PyInt_Check(res) && !PyLong_Check(res)) {
#else
if (!PyLong_Check(res)) {
#endif
PyErr_Format(PyExc_TypeError,
"__%s__ returned non-%s (type %.200s)",
name, name, Py_TYPE(res)->tp_name);
Py_DECREF(res);
return NULL;
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject* x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
#if PY_VERSION_HEX < 0x02050000
if (ival <= LONG_MAX)
return PyInt_FromLong((long)ival);
else {
unsigned char *bytes = (unsigned char *) &ival;
int one = 1; int little = (int)*(unsigned char*)&one;
return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0);
}
#else
return PyInt_FromSize_t(ival);
#endif
}
static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) {
unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x);
if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) {
return (size_t)-1;
} else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) {
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to size_t");
return (size_t)-1;
}
return (size_t)val;
}
#endif /* Py_PYTHON_H */
|
utilityNestedDisectionMetis.h | // ***********************************************************************
//
// Grappolo: A C++ library for graph clustering
// Mahantesh Halappanavar (hala@pnnl.gov)
// Pacific Northwest National Laboratory
//
// ***********************************************************************
//
// Copyright (2014) Battelle Memorial Institute
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// ************************************************************************
#ifndef _graph_NestDisect_
#define _graph_NestDisect_
/*
int METIS NodeND(idx t *nvtxs, idx t *xadj, idx t *adjncy, idx t *vwgt, idx t *options,
idx t *perm, idx t *iperm)
Description
This function computes fill reducing orderings of sparse matrices using the multilevel nested dissection algorithm.
Parameters
nvtxs: The number of vertices in the graph.
xadj, adjncy: The adjacency structure of the graph as described in Section 5.5.
vwgt (NULL): An array of size nvtxs specifying the weights of the vertices. If the graph is weighted, the nested dissection ordering computes vertex separators that minimize the sum of the weights of the vertices
on the separators. A NULL can be passed to indicate a graph with equal weight vertices (or unweighted).
options (NULL)
This is the array of options as described in Section 5.4. The following options are valid:
METIS_OPTION_CTYPE, METIS_OPTION_RTYPE, METIS_OPTION_NO2HOP,
METIS_OPTION_NSEPS, METIS_OPTION_NITER, METIS_OPTION_UFACTOR,
METIS_OPTION_COMPRESS, METIS_OPTION_CCORDER, METIS_OPTION_SEED,
METIS_OPTION_PFACTOR, METIS_OPTION_NUMBERING, METIS_OPTION_DBGLVL
perm, iperm: These are vectors, each of size nvtxs. Upon successful completion, they store the fill-reducing permutation and inverse-permutation. Let A be the original matrix and A0 be the permuted matrix. The
arrays perm and iperm are defined as follows.
Row (column) i of A0 is the perm[i] row (column) of A, and row (column) i of A is the iperm[i] row (column) of A0. The numbering of this vector starts from either 0 or 1, depending on the value of options[METIS OPTION NUMBERING].
Returns:
METIS OK Indicates that the function returned normally.
METIS ERROR INPUT Indicates an input error.
METIS ERROR MEMORY Indicates that it could not allocate the required memory.
METIS ERROR Indicates some other type of error.
*/
extern "C" {
#include "metis.h"
}
using namespace std;
/*
#ifdef __cplusplus
extern "C" {
#endif
//Nested dissection
int METIS_NodeND(idx t *nvtxs, idx t *xadj, idx t *adjncy, idx t *vwgt, idx t *options,
idx t *perm, idx t *iperm);
#ifdef __cplusplus
}
#endif
*/
//METIS Graph Partitioner:
void MetisNDReorder( graph *G, comm_type *old2NewMap ) {
printf("Within MetisNDReorder(): \n");
//Get the iterators for the graph:
comm_type NV = G->numVertices;
comm_type NE = G->numEdges;
comm_type *vtxPtr = G->edgeListPtrs;
edge *vtxInd = G->edgeList;
printf("|V|= %ld, |E|= %ld \n", NV, NE);
int status=0;
idx_t nvtxs = (idx_t) NV;
idx_t *xadj = (idx_t *) malloc ((NV+1) * sizeof(idx_t));
assert(xadj != 0);
#pragma omp parallel for
for(comm_type i=0; i<=NV; i++) {
xadj[i] = (idx_t) vtxPtr[i];
}
idx_t *adjncy = (idx_t *) malloc (2*NE * sizeof(idx_t));
assert(adjncy != 0);
#pragma omp parallel for
for(comm_type i=0; i<2*NE; i++) {
adjncy[i] = (idx_t) vtxInd[i].tail;
}
idx_t *adjwgt = (idx_t *) malloc (2*NE * sizeof(idx_t));
assert(adjwgt != 0);
#pragma omp parallel for
for(comm_type i=0; i<2*NE; i++) {
adjwgt[i] = (idx_t) vtxInd[i].weight;
}
idx_t *perm = (idx_t *) malloc (NV * sizeof(idx_t)); assert(perm != 0);
idx_t *iperm = (idx_t *) malloc (NV * sizeof(idx_t)); assert(iperm != 0);
real_t ubvec = 1.03;
idx_t options[METIS_NOPTIONS];
METIS_SetDefaultOptions(options);
options[METIS_OPTION_CTYPE] = METIS_CTYPE_SHEM; //Sorted heavy-edge matching
options[METIS_OPTION_IPTYPE] = METIS_IPTYPE_NODE; //Grows a bisection using a greedy strategy.
options[METIS_OPTION_RTYPE] = METIS_RTYPE_SEP1SIDED; //FM-based cut refinement.
options[METIS_OPTION_DBGLVL] = 1; //#different separators at each level of nested dissection.
options[METIS_OPTION_UFACTOR] = 200; //Maximum allowed load imbalance among partitions
options[METIS_OPTION_NO2HOP] = 0; //The 2–hop matching (0=perform; 1=Do not)
options[METIS_OPTION_COMPRESS] = 1; //Combine vertices with identical adjacency lists (0=do not)
options[METIS_OPTION_CCORDER] = 0; //Connected components identified and ordered separately (1=Yes)
options[METIS_OPTION_SEED] = 786; //Specifies the seed for the random number generator.
options[METIS_OPTION_NITER] = 10; //#iterations for the refinement algorithms
options[METIS_OPTION_NSEPS] = 1; //#different separators
options[METIS_OPTION_PFACTOR] = 10; //Min degree of the vertices that will be ordered last
options[METIS_OPTION_NUMBERING]= 0; //C-style numbering, starting from 0
/* int returnVal = METIS_PartGraphKway(&nvtxs, &ncon, xadj, adjncy, NULL, NULL, adjwgt,
&nparts, NULL, NULL, options, &objval, part); */
status = METIS_NodeND(&nvtxs, xadj, adjncy, NULL, options, perm, iperm);
if(status == METIS_OK)
printf("Nested dissection returned correctly. Will store the permutations in vectors perm and iperm.\n");
else {
if(status == METIS_ERROR_MEMORY)
printf("Metis could not allocate memory.\n");
else if(status == METIS_ERROR_INPUT)
printf("Metis had issues with input.\n");
else
printf("Some other Metis error: %ld\n", status);
}
#pragma omp parallel for
for(comm_type i=0; i<=NV; i++) {
old2NewMap[i] = (comm_type) perm[i]; //Do explicit typecasts
}
//Cleaup:
free(xadj); free(adjncy); free(adjwgt);
free(perm); free(iperm);
printf("Returning back from Metis\n");
}
#endif
|
SpatialMaxUnpooling.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "THNN/generic/SpatialMaxUnpooling.c"
#else
static void THNN_(SpatialMaxUnpooling_updateOutput_frame)(scalar_t *input_p, scalar_t *output_p,
THIndex_t *ind_p,
int nslices,
int iwidth, int iheight,
int owidth, int oheight)
{
int k;
int has_error = 0;
THIndex_t error_index = 0;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++)
{
scalar_t *output_p_k = output_p + k*owidth*oheight;
scalar_t *input_p_k = input_p + k*iwidth*iheight;
THIndex_t *ind_p_k = ind_p + k*iwidth*iheight;
int i, j;
THIndex_t maxp;
for(i = 0; i < iheight; i++)
{
for(j = 0; j < iwidth; j++)
{
maxp = ind_p_k[i*iwidth + j]; /* retrieve position of max */
if(maxp<0 || maxp>=owidth*oheight){
#pragma omp critical
{
has_error = 1;
error_index = maxp;
}
} else {
output_p_k[maxp] = input_p_k[i*iwidth + j]; /* update output */
}
}
}
}
if (has_error) {
THError("found an invalid max index %ld (output volumes are of size %dx%d)",
error_index, oheight, owidth);
}
}
void THNN_(SpatialMaxUnpooling_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
THIndexTensor *indices,
int owidth, int oheight)
{
int dimw = 2;
int dimh = 1;
int nbatch = 1;
int nslices;
int iheight;
int iwidth;
scalar_t *input_data;
scalar_t *output_data;
THIndex_t *indices_data;
AT_CHECK(!input->is_empty() && (input->dim() == 3 || input->dim() == 4),
"non-empty 3D or 4D (batch mode) tensor expected for input, but got sizes: ", input->sizes());
THNN_CHECK_SHAPE_INDICES(input, indices);
if (input->dim() == 4)
{
nbatch = input->size(0);
dimw++;
dimh++;
}
/* sizes */
nslices = input->size(dimh-1);
iheight = input->size(dimh);
iwidth = input->size(dimw);
/* get contiguous input and indices */
input = THTensor_(newContiguous)(input);
indices = THIndexTensor_(newContiguous)(indices);
/* resize output */
if (input->dim() == 3)
{
THTensor_(resize3d)(output, nslices, oheight, owidth);
THTensor_(zero)(output);
input_data = input->data<scalar_t>();
output_data = output->data<scalar_t>();
indices_data = THIndexTensor_(data)(indices);
THNN_(SpatialMaxUnpooling_updateOutput_frame)(input_data, output_data,
indices_data,
nslices,
iwidth, iheight,
owidth, oheight);
}
else
{
int p;
THTensor_(resize4d)(output, nbatch, nslices, oheight, owidth);
THTensor_(zero)(output);
input_data = input->data<scalar_t>();
output_data = output->data<scalar_t>();
indices_data = THIndexTensor_(data)(indices);
for (p = 0; p < nbatch; p++)
{
THNN_(SpatialMaxUnpooling_updateOutput_frame)(
input_data+p*nslices*iwidth*iheight,
output_data+p*nslices*owidth*oheight,
indices_data+p*nslices*iwidth*iheight,
nslices,
iwidth, iheight,
owidth, oheight);
}
}
/* cleanup */
c10::raw::intrusive_ptr::decref(input);
THIndexTensor_(free)(indices);
}
static void THNN_(SpatialMaxUnpooling_updateGradInput_frame)(scalar_t *gradInput_p, scalar_t *gradOutput_p,
THIndex_t *ind_p,
int nslices,
int iwidth, int iheight,
int owidth, int oheight)
{
int k;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++)
{
scalar_t *gradInput_p_k = gradInput_p + k*iwidth*iheight;
scalar_t *gradOutput_p_k = gradOutput_p + k*owidth*oheight;
THIndex_t *ind_p_k = ind_p + k*iwidth*iheight;
int i, j;
THIndex_t maxp;
for(i = 0; i < iheight; i++)
{
for(j = 0; j < iwidth; j++)
{
maxp = ind_p_k[i*iwidth + j]; /* retrieve position of max */
if(maxp < 0 || maxp >= owidth * oheight) {
THError("invalid max index %ld, owidth= %d, oheight= %d", maxp, owidth, oheight);
}
gradInput_p_k[i*iwidth + j] = gradOutput_p_k[maxp]; /* update gradient */
}
}
}
}
void THNN_(SpatialMaxUnpooling_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
THIndexTensor *indices,
int owidth, int oheight)
{
int dimw = 2;
int dimh = 1;
int nbatch = 1;
int nslices;
int iheight;
int iwidth;
scalar_t *gradInput_data;
scalar_t *gradOutput_data;
THIndex_t *indices_data;
THNN_CHECK_SHAPE_INDICES(input, indices);
/* get contiguous gradOutput and indices */
gradOutput = THTensor_(newContiguous)(gradOutput);
indices = THIndexTensor_(newContiguous)(indices);
/* resize */
THTensor_(resizeAs)(gradInput, input);
THTensor_(zero)(gradInput);
if (input->dim() == 4) {
nbatch = input->size(0);
dimw++;
dimh++;
}
/* sizes */
nslices = input->size(dimh-1);
iheight = input->size(dimh);
iwidth = input->size(dimw);
if(owidth!=gradOutput->size(dimw) || oheight!=gradOutput->size(dimh)){
THError("Inconsistent gradOutput size. oheight= %d, owidth= %d, gradOutput: %dx%d",
oheight, owidth, gradOutput->size(dimh), gradOutput->size(dimw));
}
/* get raw pointers */
gradInput_data = gradInput->data<scalar_t>();
gradOutput_data = gradOutput->data<scalar_t>();
indices_data = THIndexTensor_(data)(indices);
/* backprop */
if (input->dim() == 3)
{
THNN_(SpatialMaxUnpooling_updateGradInput_frame)(gradInput_data, gradOutput_data,
indices_data,
nslices,
iwidth, iheight,
owidth, oheight);
}
else
{
int p;
for (p = 0; p < nbatch; p++)
{
THNN_(SpatialMaxUnpooling_updateGradInput_frame)(gradInput_data+p*nslices*iwidth*iheight, gradOutput_data+p*nslices*owidth*oheight,
indices_data+p*nslices*iwidth*iheight,
nslices,
iwidth, iheight,
owidth, oheight);
}
}
/* cleanup */
c10::raw::intrusive_ptr::decref(gradOutput);
THIndexTensor_(free)(indices);
}
#endif
|
asa.c | /***********************************************************************
* Adaptive Simulated Annealing (ASA)
* Lester Ingber <ingber@ingber.com>
* Copyright (c) 1987-2015 Lester Ingber. All Rights Reserved.
* ASA-LICENSE file has the license that must be included with ASA code.
***********************************************************************/
#define ASA_ID "/* $Id: asa.c,v 30.18 2015/01/27 22:43:13 ingber Exp ingber $ */"
#include "asa.h"
char exit_msg[160]; /* temp storage for exit messages */
/***********************************************************************
* asa
* This procedure implements the full ASA function optimization.
***********************************************************************/
#if HAVE_ANSI
double
asa (double (*user_cost_function)
(double *, double *, double *, double *, double *, ALLOC_INT *, int *,
int *, int *, USER_DEFINES *),
double (*user_random_generator) (LONG_INT *), LONG_INT * seed,
double *parameter_initial_final, double *parameter_minimum,
double *parameter_maximum, double *tangents, double *curvature,
ALLOC_INT * number_parameters, int *parameter_type,
int *valid_state_generated_flag, int *exit_status,
USER_DEFINES * OPTIONS)
#else
double
asa (user_cost_function,
user_random_generator,
seed,
parameter_initial_final,
parameter_minimum,
parameter_maximum,
tangents,
curvature,
number_parameters,
parameter_type, valid_state_generated_flag, exit_status, OPTIONS)
double (*user_cost_function) ();
double (*user_random_generator) ();
LONG_INT *seed;
double *parameter_initial_final;
double *parameter_minimum;
double *parameter_maximum;
double *tangents;
double *curvature;
ALLOC_INT *number_parameters;
int *parameter_type;
int *valid_state_generated_flag;
int *exit_status;
USER_DEFINES *OPTIONS;
#endif /* HAVE_ANSI */
{
#if USER_REANNEAL_COST
#else
int immediate_flag; /* save Immediate_Exit */
#endif /* USER_REANNEAL_COST */
#if USER_INITIAL_COST_TEMP
#if USER_REANNEAL_COST
#else
int index_cost_constraint; /* index cost functions averaged */
#endif /* USER_REANNEAL_COST */
#else /* USER_INITIAL_COST_TEMP */
int index_cost_constraint; /* index cost functions averaged */
#endif /* USER_INITIAL_COST_TEMP */
int index_cost_repeat, /* test OPTIONS->Cost_Precision when =
OPTIONS->Maximum_Cost_Repeat */
tmp_var_int, tmp_var_int1, tmp_var_int2; /* temporary integers */
int generate_flg;
ALLOC_INT index_v, /* iteration index */
*start_sequence; /* initial OPTIONS->Sequential_Parameters
used if >= 0 */
double final_cost, /* best cost to return to user */
tmp_var_db, tmp_var_db1, tmp_var_db2; /* temporary doubles */
int *curvature_flag;
FILE *ptr_asa_out; /* file ptr to output file */
int ret1_flg;
/* The 3 states that are kept track of during the annealing process */
STATE *current_generated_state, *last_saved_state, *best_generated_state;
#if ASA_SAVE
FILE *ptr_save, *ptr_comm;
int asa_read;
char asa_save_comm[100];
#if ASA_SAVE_OPT
char read_option[80];
char read_if[4], read_FALSE[6], read_comm1[3], read_ASA_SAVE[9],
read_comm2[3];
int read_int;
#if INT_LONG
LONG_INT read_long;
#endif
double read_double;
FILE *ptr_save_opt;
#endif
#endif /* ASA_SAVE */
#if ASA_PIPE_FILE
FILE *ptr_asa_pipe;
#endif
#if ASA_EXIT_ANYTIME
FILE *ptr_exit_anytime;
#endif /* ASA_EXIT_ANYTIME */
int asa_exit_value;
int best_flag;
int fscanf_ret;
double xnumber_parameters[1];
/* The array of tangents (absolute value of the numerical derivatives),
and the maximum |tangent| of the array */
double *maximum_tangent;
/* ratio of acceptances to generated points - determines when to
test/reanneal */
double *accepted_to_generated_ratio;
/* temperature parameters */
double temperature_scale, *temperature_scale_parameters;
/* relative scalings of cost and parameters to temperature_scale */
double *temperature_scale_cost;
double *current_user_parameter_temp;
double *initial_user_parameter_temp;
double *current_cost_temperature;
double *initial_cost_temperature;
double log_new_temperature_ratio; /* current *temp = initial *temp *
exp(log_new_temperature_ratio) */
ALLOC_INT *index_exit_v; /* information for asa_exit */
/* counts of generated states and acceptances */
LONG_INT *index_parameter_generations;
LONG_INT *number_generated, *best_number_generated_saved;
LONG_INT *recent_number_generated, *number_accepted;
LONG_INT *recent_number_acceptances, *index_cost_acceptances;
LONG_INT *number_acceptances_saved, *best_number_accepted_saved;
/* Flag indicates that the parameters generated were
invalid according to the cost function validity criteria. */
LONG_INT *number_invalid_generated_states;
LONG_INT repeated_invalid_states;
#if ASA_QUEUE
int queue_new; /* flag to add new entry */
int *save_queue_flag; /* save valid_state_generated_flag */
LONG_INT queue; /* index of queue */
LONG_INT queue_v; /* index of parameters in queue */
LONG_INT save_queue_test; /* test if all parameters are present */
LONG_INT save_queue; /* last filled position in queue */
LONG_INT save_queue_indx; /* current position in queue */
double *save_queue_cost, *save_queue_param; /* saved states */
ALLOC_INT queue_size_tmp;
#endif /* ASA_QUEUE */
#if MULTI_MIN
int multi_index;
int multi_test, multi_test_cmp, multi_test_dim;
int *multi_sort;
double *multi_cost;
double **multi_params;
#endif /* MULTI_MIN */
#if ASA_PARALLEL
int EXIT_asa_parallel = 0;
LONG_INT tmp_var_lint;
LONG_INT *parallel_gen_ratio_block;
LONG_INT *parallel_sort;
LONG_INT i_prll, sort_index; /* count of parallel generated states */
STATE *gener_block_state;
int *generate_flg_par;
LONG_INT *number_invalid_generated_states_par;
LONG_INT *repeated_invalid_states_par;
double *tmp_var_db1_par;
double *tmp_var_db_par;
int *valid_state_generated_flag_par;
int valid_state_generated_flag_par_test;
#if ASA_QUEUE
int *queue_new_par;
LONG_INT *queue_v_par;
LONG_INT *save_queue_indx_par;
LONG_INT *save_queue_test_par;
LONG_INT *save_queue_par;
double *queue_par_cost;
int **save_queue_valid_state_flag_par;
double **save_queue_cost_par;
double **save_queue_param_par;
#endif /* ASA_QUEUE */
#endif /* ASA_PARALLEL */
/* used to index repeated and recursive calls to asa */
/* This assumes that multiple calls (>= 1) _or_ recursive
calls are being made to asa */
static int asa_open = FALSE;
static int number_asa_open = 0;
static int recursive_asa_open = 0;
/* initializations */
ret1_flg = 0;
generate_flg = 0;
if (generate_flg != 0)
generate_flg = 0;
fscanf_ret = 0; /* stop compiler warning */
if (fscanf_ret) {
;
}
if ((curvature_flag = (int *) calloc (1, sizeof (int))) == NULL) {
strcpy (exit_msg, "asa(): curvature_flag");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((maximum_tangent = (double *) calloc (1, sizeof (double))) == NULL) {
strcpy (exit_msg, "asa(): maximum_tangent");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((accepted_to_generated_ratio =
(double *) calloc (1, sizeof (double))) == NULL) {
strcpy (exit_msg, "asa(): accepted_to_generated_ratio");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((temperature_scale_cost =
(double *) calloc (1, sizeof (double))) == NULL) {
strcpy (exit_msg, "asa(): temperature_scale_cost");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((current_cost_temperature =
(double *) calloc (1, sizeof (double))) == NULL) {
strcpy (exit_msg, "asa(): current_cost_temperature");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((initial_cost_temperature =
(double *) calloc (1, sizeof (double))) == NULL) {
strcpy (exit_msg, "asa(): initial_cost_temperature");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((index_exit_v = (ALLOC_INT *) calloc (1, sizeof (ALLOC_INT))) == NULL) {
strcpy (exit_msg, "asa(): index_exit_v");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((start_sequence = (ALLOC_INT *) calloc (1, sizeof (ALLOC_INT))) == NULL) {
strcpy (exit_msg, "asa(): start_sequence");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((number_generated =
(ALLOC_INT *) calloc (1, sizeof (ALLOC_INT))) == NULL) {
strcpy (exit_msg, "asa(): number_generated");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((best_number_generated_saved =
(ALLOC_INT *) calloc (1, sizeof (ALLOC_INT))) == NULL) {
strcpy (exit_msg, "asa(): best_number_generated_saved");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((recent_number_generated =
(ALLOC_INT *) calloc (1, sizeof (ALLOC_INT))) == NULL) {
strcpy (exit_msg, "asa(): recent_number_generated");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((number_accepted =
(ALLOC_INT *) calloc (1, sizeof (ALLOC_INT))) == NULL) {
strcpy (exit_msg, "asa(): number_accepted");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((recent_number_acceptances =
(ALLOC_INT *) calloc (1, sizeof (ALLOC_INT))) == NULL) {
strcpy (exit_msg, "asa(): recent_number_acceptances");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((index_cost_acceptances =
(ALLOC_INT *) calloc (1, sizeof (ALLOC_INT))) == NULL) {
strcpy (exit_msg, "asa(): index_cost_acceptances");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((number_acceptances_saved =
(ALLOC_INT *) calloc (1, sizeof (ALLOC_INT))) == NULL) {
strcpy (exit_msg, "asa(): number_acceptances_saved");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((best_number_accepted_saved =
(ALLOC_INT *) calloc (1, sizeof (ALLOC_INT))) == NULL) {
strcpy (exit_msg, "asa(): best_number_accepted_saved");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((number_invalid_generated_states =
(ALLOC_INT *) calloc (1, sizeof (ALLOC_INT))) == NULL) {
strcpy (exit_msg, "asa(): number_invalid_generated_states");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((current_generated_state =
(STATE *) calloc (1, sizeof (STATE))) == NULL) {
strcpy (exit_msg, "asa(): current_generated_state");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((last_saved_state = (STATE *) calloc (1, sizeof (STATE))) == NULL) {
strcpy (exit_msg, "asa(): last_saved_state");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((best_generated_state = (STATE *) calloc (1, sizeof (STATE))) == NULL) {
strcpy (exit_msg, "asa(): best_generated_state");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
#if ASA_PARALLEL
if ((gener_block_state =
(STATE *) calloc (OPTIONS->Gener_Block_Max, sizeof (STATE))) == NULL) {
strcpy (exit_msg, "asa(): gener_block_state");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
gener_block_state_qsort = gener_block_state;
if ((parallel_sort =
(LONG_INT *) calloc (OPTIONS->Gener_Block_Max,
sizeof (LONG_INT))) == NULL) {
strcpy (exit_msg, "asa(): parallel_sort");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((generate_flg_par =
(int *) calloc (OPTIONS->Gener_Block_Max, sizeof (int))) == NULL) {
strcpy (exit_msg, "asa(): generate_flg_par");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((valid_state_generated_flag_par =
(int *) calloc (OPTIONS->Gener_Block_Max, sizeof (int))) == NULL) {
strcpy (exit_msg, "asa(): valid_state_generated_flag_par");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((number_invalid_generated_states_par =
(LONG_INT *) calloc (OPTIONS->Gener_Block_Max,
sizeof (LONG_INT))) == NULL) {
strcpy (exit_msg, "asa(): number_invalid_generated_states_par");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((repeated_invalid_states_par =
(LONG_INT *) calloc (OPTIONS->Gener_Block_Max,
sizeof (LONG_INT))) == NULL) {
strcpy (exit_msg, "asa(): repeated_invalid_states_par");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((tmp_var_db1_par =
(double *) calloc (OPTIONS->Gener_Block_Max,
sizeof (double))) == NULL) {
strcpy (exit_msg, "asa(): tmp_var_db1_par");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((tmp_var_db_par =
(double *) calloc (OPTIONS->Gener_Block_Max,
sizeof (double))) == NULL) {
strcpy (exit_msg, "asa(): tmp_var_db_par");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((parallel_gen_ratio_block =
(LONG_INT *) calloc (OPTIONS->Gener_Mov_Avr,
sizeof (LONG_INT))) == NULL) {
strcpy (exit_msg, "asa(): parallel_gen_ratio_block");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
for (i_prll = 0; i_prll < OPTIONS->Gener_Mov_Avr; ++i_prll) {
parallel_gen_ratio_block[i_prll] = OPTIONS->Gener_Block;
}
#endif /* ASA_PARALLEL */
fscanf_ret = 0;
/* set default */
ptr_asa_out = (FILE *) NULL;
OPTIONS->Immediate_Exit = FALSE;
if (asa_open == FALSE) {
asa_open = TRUE;
++number_asa_open;
#if ASA_PRINT
if (number_asa_open == 1) {
/* open the output file */
#if USER_ASA_OUT
if (!strcmp (OPTIONS->Asa_Out_File, "STDOUT")) {
#if INCL_STDOUT
ptr_asa_out = stdout;
#endif /* INCL_STDOUT */
} else {
#if ASA_SAVE
ptr_asa_out = fopen (OPTIONS->Asa_Out_File, "a");
#else
ptr_asa_out = fopen (OPTIONS->Asa_Out_File, "w");
#endif
}
#else /* USER_ASA_OUT */
if (!strcmp (ASA_OUT, "STDOUT")) {
#if INCL_STDOUT
ptr_asa_out = stdout;
#endif /* INCL_STDOUT */
} else {
#if ASA_SAVE
ptr_asa_out = fopen (ASA_OUT, "a");
#else
ptr_asa_out = fopen (ASA_OUT, "w");
#endif
}
#endif /* USER_ASA_OUT */
} else {
#if USER_ASA_OUT
if (!strcmp (OPTIONS->Asa_Out_File, "STDOUT")) {
#if INCL_STDOUT
ptr_asa_out = stdout;
#endif /* INCL_STDOUT */
} else {
ptr_asa_out = fopen (OPTIONS->Asa_Out_File, "a");
}
#else
if (!strcmp (ASA_OUT, "STDOUT")) {
#if INCL_STDOUT
ptr_asa_out = stdout;
#endif /* INCL_STDOUT */
} else {
ptr_asa_out = fopen (ASA_OUT, "a");
}
#endif
fprintf (ptr_asa_out, "\n\n\t\t number_asa_open = %d\n",
number_asa_open);
fflush (ptr_asa_out);
}
#endif /* ASA_PRINT */
} else {
++recursive_asa_open;
#if ASA_PRINT
if (recursive_asa_open == 1) {
/* open the output file */
#if ASA_SAVE
#if USER_ASA_OUT
if (!strcmp (OPTIONS->Asa_Out_File, "STDOUT")) {
#if INCL_STDOUT
ptr_asa_out = stdout;
#endif /* INCL_STDOUT */
} else {
ptr_asa_out = fopen (OPTIONS->Asa_Out_File, "a");
}
#else
if (!strcmp (ASA_OUT, "STDOUT")) {
#if INCL_STDOUT
ptr_asa_out = stdout;
#endif /* INCL_STDOUT */
} else {
ptr_asa_out = fopen (ASA_OUT, "a");
}
#endif
#else /* ASA_SAVE */
#if USER_ASA_OUT
if (!strcmp (OPTIONS->Asa_Out_File, "STDOUT")) {
#if INCL_STDOUT
ptr_asa_out = stdout;
#endif /* INCL_STDOUT */
} else {
ptr_asa_out = fopen (OPTIONS->Asa_Out_File, "w");
}
#else
if (!strcmp (ASA_OUT, "STDOUT")) {
#if INCL_STDOUT
ptr_asa_out = stdout;
#endif /* INCL_STDOUT */
} else {
ptr_asa_out = fopen (ASA_OUT, "w");
}
#endif
#endif /* ASA_SAVE */
} else {
#if USER_ASA_OUT
if (!strcmp (OPTIONS->Asa_Out_File, "STDOUT")) {
#if INCL_STDOUT
ptr_asa_out = stdout;
#endif /* INCL_STDOUT */
} else {
ptr_asa_out = fopen (OPTIONS->Asa_Out_File, "a");
}
#else
if (!strcmp (ASA_OUT, "STDOUT")) {
#if INCL_STDOUT
ptr_asa_out = stdout;
#endif /* INCL_STDOUT */
} else {
ptr_asa_out = fopen (ASA_OUT, "a");
}
#endif
fprintf (ptr_asa_out, "\n\n\t\t recursive_asa_open = %d\n",
recursive_asa_open);
}
#endif /* ASA_PRINT */
}
#if ASA_PIPE_FILE
ptr_asa_pipe = fopen ("asa_pipe", "a");
fprintf (ptr_asa_pipe, "%s", "%generate");
fprintf (ptr_asa_pipe, "\t%s", "accept");
fprintf (ptr_asa_pipe, "\t%s", "best_cost");
VFOR (index_v)
#if INT_ALLOC
fprintf (ptr_asa_pipe, "\t%s-%d", "best_param", index_v);
#else
#if INT_LONG
fprintf (ptr_asa_pipe, "\t%s-%ld", "best_param", index_v);
#else
fprintf (ptr_asa_pipe, "\t%s-%d", "best_param", index_v);
#endif
#endif
fprintf (ptr_asa_pipe, "\t%s", "curr_cost");
VFOR (index_v)
#if INT_ALLOC
fprintf (ptr_asa_pipe, "\t%s-%d", "curr_param", index_v);
#else
#if INT_LONG
fprintf (ptr_asa_pipe, "\t%s-%ld", "curr_param", index_v);
#else
fprintf (ptr_asa_pipe, "\t%s-%d", "curr_param", index_v);
#endif
#endif
fprintf (ptr_asa_pipe, "\t%s", "cost_temp");
VFOR (index_v)
#if INT_ALLOC
fprintf (ptr_asa_pipe, "\t%s-%d", "param_temp", index_v);
#else
#if INT_LONG
fprintf (ptr_asa_pipe, "\t%s-%ld", "param_temp", index_v);
#else
fprintf (ptr_asa_pipe, "\t%s-%d", "param_temp", index_v);
#endif
#endif
fprintf (ptr_asa_pipe, "\t%s", "last_cost");
fprintf (ptr_asa_pipe, "\n");
fflush (ptr_asa_pipe);
#endif /* ASA_PIPE_FILE */
#if ASA_EXIT_ANYTIME
if ((ptr_exit_anytime = fopen ("asa_exit_anytime", "w")) == NULL) {
fprintf (ptr_exit_anytime,
"force IMMEDIATE_EXIT by removing this file if ASA_EXIT_ANYTIME is TRUE\n");
fclose (ptr_exit_anytime);
} else {
fclose (ptr_exit_anytime);
}
#endif /* ASA_EXIT_ANYTIME */
#if ASA_PRINT
/* print header information as defined by user */
print_asa_options (ptr_asa_out, OPTIONS);
#if TIME_CALC
/* print starting time */
print_time ("start_asa", ptr_asa_out);
#endif
fflush (ptr_asa_out);
#endif /* ASA_PRINT */
/* set indices and counts to 0 */
*best_number_generated_saved =
*number_generated =
*recent_number_generated = *recent_number_acceptances = 0;
*index_cost_acceptances =
*best_number_accepted_saved =
*number_accepted = *number_acceptances_saved = 0;
index_cost_repeat = 0;
OPTIONS->N_Accepted = *number_accepted;
OPTIONS->N_Generated = *number_generated;
#if ASA_SAMPLE
OPTIONS->N_Generated = 0;
OPTIONS->Average_Weights = 1.0;
#endif
/* do not calculate curvatures initially */
*curvature_flag = FALSE;
/* allocate storage for all parameters */
if ((current_generated_state->parameter =
(double *) calloc (*number_parameters, sizeof (double))) == NULL) {
strcpy (exit_msg, "asa(): current_generated_state->parameter");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
ret1_flg = 1;
goto RET1_asa;
}
if ((last_saved_state->parameter =
(double *) calloc (*number_parameters, sizeof (double))) == NULL) {
strcpy (exit_msg, "asa(): last_saved_state->parameter");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
ret1_flg = 1;
goto RET1_asa;
}
if ((best_generated_state->parameter =
(double *) calloc (*number_parameters, sizeof (double))) == NULL) {
strcpy (exit_msg, "asa(): best_generated_state->parameter");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
ret1_flg = 1;
goto RET1_asa;
}
#if ASA_PARALLEL
for (i_prll = 0; i_prll < OPTIONS->Gener_Block_Max; ++i_prll) {
if ((gener_block_state[i_prll].parameter =
(double *) calloc (*number_parameters, sizeof (double))) == NULL) {
strcpy (exit_msg, "asa(): gener_block_state[i_prll].parameter");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
ret1_flg = 1;
goto RET1_asa;
} else {
;
}
}
OPTIONS->parallel_id = -1;
#endif /* ASA_PARALLEL */
OPTIONS->Best_Cost = &(best_generated_state->cost);
OPTIONS->Best_Parameters = best_generated_state->parameter;
OPTIONS->Last_Cost = &(last_saved_state->cost);
OPTIONS->Last_Parameters = last_saved_state->parameter;
if ((initial_user_parameter_temp =
(double *) calloc (*number_parameters, sizeof (double))) == NULL) {
strcpy (exit_msg, "asa(): initial_user_parameter_temp");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
ret1_flg = 1;
goto RET1_asa;
}
if ((index_parameter_generations =
(ALLOC_INT *) calloc (*number_parameters,
sizeof (ALLOC_INT))) == NULL) {
strcpy (exit_msg, "asa(): index_parameter_generations");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
ret1_flg = 1;
goto RET1_asa;
}
/* set all temperatures */
if ((current_user_parameter_temp =
(double *) calloc (*number_parameters, sizeof (double))) == NULL) {
strcpy (exit_msg, "asa(): current_user_parameter_temp");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
ret1_flg = 1;
goto RET1_asa;
}
#if USER_INITIAL_PARAMETERS_TEMPS
VFOR (index_v)
current_user_parameter_temp[index_v] =
initial_user_parameter_temp[index_v] =
OPTIONS->User_Parameter_Temperature[index_v];
#else
VFOR (index_v)
current_user_parameter_temp[index_v] =
initial_user_parameter_temp[index_v] =
OPTIONS->Initial_Parameter_Temperature;
#endif
if ((temperature_scale_parameters =
(double *) calloc (*number_parameters, sizeof (double))) == NULL) {
strcpy (exit_msg, "asa(): temperature_scale_parameters");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
ret1_flg = 1;
goto RET1_asa;
}
#if ASA_QUEUE
if (OPTIONS->Queue_Size > 0) {
queue_size_tmp = OPTIONS->Queue_Size;
} else {
queue_size_tmp = 1;
}
if ((save_queue_flag =
(int *) calloc (queue_size_tmp, sizeof (int))) == NULL) {
strcpy (exit_msg, "asa(): save_queue_flag");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((save_queue_cost =
(double *) calloc (queue_size_tmp, sizeof (double))) == NULL) {
strcpy (exit_msg, "asa(): save_queue_cost");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((save_queue_param =
(double *) calloc ((*number_parameters) * queue_size_tmp,
sizeof (double))) == NULL) {
strcpy (exit_msg, "asa(): save_queue_param");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
#if ASA_PARALLEL
if (OPTIONS->Queue_Size > 0) {
queue_size_tmp = OPTIONS->Queue_Size;
} else {
queue_size_tmp = 1;
}
if ((queue_par_cost =
(double *) calloc (OPTIONS->Gener_Block_Max,
sizeof (double))) == NULL) {
strcpy (exit_msg, "asa(): queue_par_cost");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((queue_new_par =
(int *) calloc (OPTIONS->Gener_Block_Max, sizeof (int))) == NULL) {
strcpy (exit_msg, "asa(): queue_new_par");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((queue_v_par =
(LONG_INT *) calloc (OPTIONS->Gener_Block_Max,
sizeof (LONG_INT))) == NULL) {
strcpy (exit_msg, "asa(): queue_v_par");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((save_queue_indx_par =
(LONG_INT *) calloc (OPTIONS->Gener_Block_Max,
sizeof (LONG_INT))) == NULL) {
strcpy (exit_msg, "asa(): save_queue_indx_par");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((save_queue_test_par =
(LONG_INT *) calloc (OPTIONS->Gener_Block_Max,
sizeof (LONG_INT))) == NULL) {
strcpy (exit_msg, "asa(): save_queue_test_par");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((save_queue_par =
(LONG_INT *) calloc (OPTIONS->Gener_Block_Max,
sizeof (LONG_INT))) == NULL) {
strcpy (exit_msg, "asa(): save_queue_par");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((save_queue_valid_state_flag_par =
(int **) calloc (OPTIONS->Gener_Block_Max, sizeof (int *))) == NULL) {
strcpy (exit_msg, "asa(): *save_queue_valid_state_flag_par");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((save_queue_cost_par =
(double **) calloc (OPTIONS->Gener_Block_Max,
sizeof (double *))) == NULL) {
strcpy (exit_msg, "asa(): *save_queue_cost_par");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((save_queue_param_par =
(double **) calloc (OPTIONS->Gener_Block_Max,
sizeof (double *))) == NULL) {
strcpy (exit_msg, "asa(): *save_queue_param_par");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
for (i_prll = 0; i_prll < OPTIONS->Gener_Block_Max; ++i_prll) {
if ((save_queue_valid_state_flag_par[i_prll] =
(int *) calloc (queue_size_tmp, sizeof (int))) == NULL) {
strcpy (exit_msg, "asa(): save_queue_valid_state_flag_par[i_prll]");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((save_queue_cost_par[i_prll] =
(double *) calloc (queue_size_tmp, sizeof (double))) == NULL) {
strcpy (exit_msg, "asa(): save_queue_cost_par[i_prll]");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
if ((save_queue_param_par[i_prll] =
(double *) calloc ((*number_parameters) * queue_size_tmp,
sizeof (double))) == NULL) {
strcpy (exit_msg, "asa(): save_queue_param_par[i_prll]");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
return (-1);
}
}
#endif /* ASA_PARALLEL */
#endif /* ASA_QUEUE */
#if MULTI_MIN
if ((multi_cost =
(double *) calloc (OPTIONS->Multi_Number + 1,
sizeof (double))) == NULL) {
strcpy (exit_msg, "asa(): *multi_cost");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
ret1_flg = 1;
goto RET1_asa;
}
multi_cost_qsort = multi_cost;
if ((multi_sort =
(int *) calloc (OPTIONS->Multi_Number + 1, sizeof (int))) == NULL) {
strcpy (exit_msg, "asa(): *multi_sort");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
ret1_flg = 1;
goto RET1_asa;
}
if ((multi_params =
(double **) calloc (OPTIONS->Multi_Number + 1,
sizeof (double *))) == NULL) {
strcpy (exit_msg, "asa(): *multi_params");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
ret1_flg = 1;
goto RET1_asa;
}
for (multi_index = 0; multi_index <= OPTIONS->Multi_Number; ++multi_index) {
if ((multi_params[multi_index] =
(double *) calloc (*number_parameters, sizeof (double))) == NULL) {
strcpy (exit_msg, "asa(): multi_params[multi_index]");
Exit_ASA (exit_msg);
*exit_status = CALLOC_FAILED;
ret1_flg = 1;
goto RET1_asa;
}
}
#endif /* MULTI_MIN */
RET1_asa:
if (ret1_flg == 1) {
#if ASA_PIPE_FILE
fclose (ptr_asa_pipe);
#endif
free (accepted_to_generated_ratio);
free (best_generated_state);
free (best_number_accepted_saved);
free (best_number_generated_saved);
free (current_cost_temperature);
free (current_generated_state);
free (curvature_flag);
free (index_cost_acceptances);
free (index_exit_v);
free (initial_cost_temperature);
free (last_saved_state);
free (maximum_tangent);
free (number_acceptances_saved);
free (number_accepted);
free (number_generated);
free (number_invalid_generated_states);
free (recent_number_acceptances);
free (recent_number_generated);
free (start_sequence);
free (temperature_scale_cost);
return (-1);
}
#if USER_INITIAL_COST_TEMP
#if USER_ACCEPTANCE_TEST
OPTIONS->Cost_Temp_Curr = OPTIONS->Cost_Temp_Init =
#endif
*initial_cost_temperature = *current_cost_temperature =
OPTIONS->User_Cost_Temperature[0];
#endif
/* set parameters to the initial parameter values */
VFOR (index_v)
last_saved_state->parameter[index_v] =
current_generated_state->parameter[index_v] =
parameter_initial_final[index_v];
#if USER_ACCEPTANCE_TEST
OPTIONS->Random_Seed = seed;
OPTIONS->Random_Seed[0] = *seed;
OPTIONS->User_Acceptance_Flag = TRUE;
OPTIONS->Cost_Acceptance_Flag = FALSE;
#endif
#if ASA_PRINT
#if INT_LONG
fprintf (ptr_asa_out, "Initial Random Seed = %ld\n\n", *seed);
#else
fprintf (ptr_asa_out, "Initial Random Seed = %d\n\n", *seed);
#endif
#endif /* ASA_PRINT */
/* save initial user value of OPTIONS->Sequential_Parameters */
*start_sequence = OPTIONS->Sequential_Parameters;
#if ASA_PRINT
#if INT_ALLOC
fprintf (ptr_asa_out, "*number_parameters = %d\n\n", *number_parameters);
#else
#if INT_LONG
fprintf (ptr_asa_out, "*number_parameters = %ld\n\n", *number_parameters);
#else
fprintf (ptr_asa_out, "*number_parameters = %d\n\n", *number_parameters);
#endif
#endif
/* print the min, max, current values, and types of parameters */
fprintf (ptr_asa_out,
"index_v parameter_minimum parameter_maximum parameter_value parameter_type \n");
#if ASA_PRINT_INTERMED
VFOR (index_v) fprintf (ptr_asa_out,
#if INT_ALLOC
" %-8d %-*.*g \t\t %-*.*g \t %-*.*g %-7d\n",
#else
#if INT_LONG
" %-8ld %-*.*g \t\t %-*.*g \t %-*.*g %-7d\n",
#else
" %-8d %-*.*g \t\t %-*.*g \t %-*.*g %-7d\n",
#endif
#endif
index_v,
G_FIELD, G_PRECISION, parameter_minimum[index_v],
G_FIELD, G_PRECISION, parameter_maximum[index_v],
G_FIELD, G_PRECISION,
current_generated_state->parameter[index_v],
parameter_type[index_v]);
fprintf (ptr_asa_out, "\n\n");
#endif /* ASA_PRINT_INTERMED */
/* Print out user-defined OPTIONS */
#if DELTA_PARAMETERS
VFOR (index_v) fprintf (ptr_asa_out,
#if INT_ALLOC
"OPTIONS->User_Delta_Parameter[%d] = %*.*g\n",
#else
#if INT_LONG
"OPTIONS->User_Delta_Parameter[%ld] = %*.*g\n",
#else
"OPTIONS->User_Delta_Parameter[%d] = %*.*g\n",
#endif
#endif
index_v,
G_FIELD, G_PRECISION,
OPTIONS->User_Delta_Parameter[index_v]);
fprintf (ptr_asa_out, "\n");
#endif /* DELTA_PARAMETERS */
#if QUENCH_PARAMETERS
VFOR (index_v) fprintf (ptr_asa_out,
#if INT_ALLOC
"OPTIONS->User_Quench_Param_Scale[%d] = %*.*g\n",
#else
#if INT_LONG
"OPTIONS->User_Quench_Param_Scale[%ld] = %*.*g\n",
#else
"OPTIONS->User_Quench_Param_Scale[%d] = %*.*g\n",
#endif
#endif
index_v,
G_FIELD, G_PRECISION,
OPTIONS->User_Quench_Param_Scale[index_v]);
#endif /* QUENCH_PARAMETERS */
#if QUENCH_COST
fprintf (ptr_asa_out,
"\nOPTIONS->User_Quench_Cost_Scale = %*.*g\n\n",
G_FIELD, G_PRECISION, OPTIONS->User_Quench_Cost_Scale[0]);
#endif /* QUENCH_COST */
#if USER_INITIAL_PARAMETERS_TEMPS
VFOR (index_v) fprintf (ptr_asa_out,
#if INT_ALLOC
"OPTIONS->User_Parameter_Temperature[%d] = %*.*g\n",
#else
#if INT_LONG
"OPTIONS->User_Parameter_Temperature[%ld] = %*.*g\n",
#else
"OPTIONS->User_Parameter_Temperature[%d] = %*.*g\n",
#endif
#endif
index_v,
G_FIELD, G_PRECISION,
initial_user_parameter_temp[index_v]);
#endif /* USER_INITIAL_PARAMETERS_TEMPS */
#if RATIO_TEMPERATURE_SCALES
VFOR (index_v) fprintf (ptr_asa_out,
#if INT_ALLOC
"OPTIONS->User_Temperature_Ratio[%d] = %*.*g\n",
#else
#if INT_LONG
"OPTIONS->User_Temperature_Ratio[%ld] = %*.*g\n",
#else
"OPTIONS->User_Temperature_Ratio[%d] = %*.*g\n",
#endif
#endif
index_v,
G_FIELD, G_PRECISION,
OPTIONS->User_Temperature_Ratio[index_v]);
#endif /* RATIO_TEMPERATURE_SCALES */
#if USER_INITIAL_COST_TEMP
fprintf (ptr_asa_out,
"OPTIONS->User_Cost_Temperature[0] = %*.*g\n",
G_FIELD, G_PRECISION, *initial_cost_temperature);
#endif /* USER_INITIAL_COST_TEMP */
fflush (ptr_asa_out);
#endif /* ASA_PRINT */
#if MULTI_MIN
#if ASA_PRINT
fprintf (ptr_asa_out, "\n");
fprintf (ptr_asa_out, "Multi_Number = %d\n", OPTIONS->Multi_Number);
fprintf (ptr_asa_out, "Multi_Specify = %d\n", OPTIONS->Multi_Specify);
#if ASA_RESOLUTION
#else
VFOR (index_v) {
fprintf (ptr_asa_out,
#if INT_ALLOC
"Multi_Grid[%d] = %*.*g\n",
#else
#if INT_LONG
"Multi_Grid[%ld] = %*.*g\n",
#else
"Multi_Grid[%d] = %*.*g\n",
#endif
#endif
index_v, G_FIELD, G_PRECISION, OPTIONS->Multi_Grid[index_v]);
}
#endif /* ASA_RESOLUTION */
fprintf (ptr_asa_out, "\n");
fflush (ptr_asa_out);
#endif /* ASA_PRINT */
#endif /* MULTI_MIN */
#if ASA_PARALLEL
#if ASA_PRINT
fprintf (ptr_asa_out,
#if INT_LONG
"Initial ASA_PARALLEL OPTIONS->\n\t Gener_Block = %ld\n \t Gener_Block_Max = %ld\n \t Gener_Mov_Avr= %d\n\n",
#else
"ASA_PARALLEL OPTIONS->\n\t Gener_Block = %d\n \t Gener_Block_Max = %d\n \t Gener_Mov_Avr= %d\n\n",
#endif
OPTIONS->Gener_Block, OPTIONS->Gener_Block_Max,
OPTIONS->Gener_Mov_Avr);
#endif
#endif /* ASA_PARALLEL */
#if ASA_SAMPLE
#if ASA_PRINT
fprintf (ptr_asa_out, "OPTIONS->Limit_Weights = %*.*g\n\n",
G_FIELD, G_PRECISION, OPTIONS->Limit_Weights);
#endif
#endif
if (OPTIONS->Asa_Recursive_Level > asa_recursive_max)
asa_recursive_max = OPTIONS->Asa_Recursive_Level;
#if ASA_SAVE
if (OPTIONS->Asa_Recursive_Level > 0)
sprintf (asa_save_comm, "asa_save_%d", OPTIONS->Asa_Recursive_Level);
else
sprintf (asa_save_comm, "asa_save");
if ((ptr_save = fopen (asa_save_comm, "r")) == NULL) {
asa_read = FALSE;
} else {
#if ASA_PRINT
fprintf (ptr_asa_out, "\n\n\trestart after ASA_SAVE\n\n");
#endif
fclose (ptr_save);
asa_read = TRUE;
/* give some value to avoid any problems with other OPTIONS */
#if USER_ACCEPTANCE_TEST
OPTIONS->Cost_Temp_Curr = OPTIONS->Cost_Temp_Init =
#endif
current_generated_state->cost
= *initial_cost_temperature = *current_cost_temperature = 3.1416;
}
#endif
tmp_var_int = cost_function_test (current_generated_state->cost,
current_generated_state->parameter,
parameter_minimum,
parameter_maximum, number_parameters,
xnumber_parameters);
/* compute temperature scales */
tmp_var_db1 = -F_LOG ((OPTIONS->Temperature_Ratio_Scale));
tmp_var_db2 = F_LOG (OPTIONS->Temperature_Anneal_Scale);
temperature_scale =
tmp_var_db1 * F_EXP (-tmp_var_db2 / *xnumber_parameters);
/* set here in case not used */
tmp_var_db = ZERO;
#if QUENCH_PARAMETERS
#if RATIO_TEMPERATURE_SCALES
VFOR (index_v) temperature_scale_parameters[index_v] = tmp_var_db1 * F_EXP
#if QUENCH_PARAMETERS_SCALE
(-(tmp_var_db2 * OPTIONS->User_Quench_Param_Scale[index_v])
#else
(-(tmp_var_db2)
#endif
/ *xnumber_parameters)
* OPTIONS->User_Temperature_Ratio[index_v];
#else
VFOR (index_v) temperature_scale_parameters[index_v] = tmp_var_db1 * F_EXP
#if QUENCH_PARAMETERS_SCALE
(-(tmp_var_db2 * OPTIONS->User_Quench_Param_Scale[index_v])
#else
(-(tmp_var_db2)
#endif
/ *xnumber_parameters);
#endif /* RATIO_TEMPERATURE_SCALES */
#else /* QUENCH_PARAMETERS */
#if RATIO_TEMPERATURE_SCALES
VFOR (index_v)
temperature_scale_parameters[index_v] =
tmp_var_db1 * F_EXP (-(tmp_var_db2) / *xnumber_parameters)
* OPTIONS->User_Temperature_Ratio[index_v];
#else
VFOR (index_v)
temperature_scale_parameters[index_v] =
tmp_var_db1 * F_EXP (-(tmp_var_db2) / *xnumber_parameters);
#endif /* RATIO_TEMPERATURE_SCALES */
#endif /* QUENCH_PARAMETERS */
#if USER_ACCEPTANCE_TEST
OPTIONS->Cost_Temp_Scale =
#endif
*temperature_scale_cost =
#if QUENCH_COST
#if QUENCH_COST_SCALE
tmp_var_db1 * F_EXP (-(tmp_var_db2 * OPTIONS->User_Quench_Cost_Scale[0])
#else
tmp_var_db1 * F_EXP (-(tmp_var_db2)
#endif
/ *xnumber_parameters) *
OPTIONS->Cost_Parameter_Scale_Ratio;
#else /* QUENCH_COST */
tmp_var_db1 * F_EXP (-(tmp_var_db2)
/ *xnumber_parameters) *
OPTIONS->Cost_Parameter_Scale_Ratio;
#endif /* QUENCH_COST */
/* set the initial index of parameter generations to 1 */
VFOR (index_v) index_parameter_generations[index_v] = 1;
/* test user-defined options before calling cost function */
tmp_var_int = asa_test_asa_options (seed,
parameter_initial_final,
parameter_minimum,
parameter_maximum,
tangents,
curvature,
number_parameters,
parameter_type,
valid_state_generated_flag,
exit_status, ptr_asa_out, OPTIONS);
if (tmp_var_int > 0) {
#if ASA_PRINT
fprintf (ptr_asa_out, "total number invalid OPTIONS = %d\n", tmp_var_int);
fflush (ptr_asa_out);
#endif
*exit_status = INVALID_USER_INPUT;
goto EXIT_asa;
}
#if USER_INITIAL_COST_TEMP
#else
#if ASA_SAVE
if (asa_read == TRUE)
OPTIONS->Number_Cost_Samples = 1;
#endif
/* calculate the average cost over samplings of the cost function */
if (OPTIONS->Number_Cost_Samples < -1) {
tmp_var_db1 = ZERO;
tmp_var_db2 = ZERO;
tmp_var_int = -OPTIONS->Number_Cost_Samples;
} else {
tmp_var_db1 = ZERO;
tmp_var_int = OPTIONS->Number_Cost_Samples;
}
OPTIONS->Locate_Cost = 0; /* initial cost temp */
for (index_cost_constraint = 0;
index_cost_constraint < tmp_var_int; ++index_cost_constraint) {
*number_invalid_generated_states = 0;
repeated_invalid_states = 0;
OPTIONS->Sequential_Parameters = *start_sequence - 1;
do {
#if ASA_EXIT_ANYTIME
if ((ptr_exit_anytime = fopen ("asa_exit_anytime", "r")) == NULL) {
*exit_status = IMMEDIATE_EXIT;
goto EXIT_asa;
} else {
fclose (ptr_exit_anytime);
}
#endif /* ASA_EXIT_ANYTIME */
++(*number_invalid_generated_states);
generate_flg = generate_new_state (user_random_generator,
seed,
parameter_minimum,
parameter_maximum,
current_user_parameter_temp,
#if USER_GENERATING_FUNCTION
initial_user_parameter_temp,
temperature_scale_parameters,
#endif
number_parameters,
parameter_type,
current_generated_state,
last_saved_state, OPTIONS);
*valid_state_generated_flag = TRUE;
#if USER_ACCEPTANCE_TEST
OPTIONS->User_Acceptance_Flag = TRUE;
OPTIONS->Cost_Acceptance_Flag = FALSE;
#endif
tmp_var_db =
user_cost_function (current_generated_state->parameter,
parameter_minimum,
parameter_maximum,
tangents,
curvature,
number_parameters,
parameter_type,
valid_state_generated_flag, exit_status, OPTIONS);
if (cost_function_test
(tmp_var_db, current_generated_state->parameter,
parameter_minimum, parameter_maximum, number_parameters,
xnumber_parameters) == 0) {
*exit_status = INVALID_COST_FUNCTION;
goto EXIT_asa;
}
++repeated_invalid_states;
if (repeated_invalid_states > OPTIONS->Limit_Invalid_Generated_States) {
*exit_status = TOO_MANY_INVALID_STATES;
goto EXIT_asa;
}
}
while (*valid_state_generated_flag == FALSE);
--(*number_invalid_generated_states);
if (OPTIONS->Number_Cost_Samples < -1) {
tmp_var_db1 += tmp_var_db;
tmp_var_db2 += (tmp_var_db * tmp_var_db);
} else {
tmp_var_db1 += fabs (tmp_var_db);
}
}
if (OPTIONS->Number_Cost_Samples < -1) {
tmp_var_db1 /= (double) tmp_var_int;
tmp_var_db2 /= (double) tmp_var_int;
tmp_var_db = sqrt (fabs ((tmp_var_db2 - tmp_var_db1 * tmp_var_db1)
* ((double) tmp_var_int
/ ((double) tmp_var_int - ONE))))
+ (double) EPS_DOUBLE;
} else {
tmp_var_db = tmp_var_db1 / (double) tmp_var_int;
}
#if USER_ACCEPTANCE_TEST
OPTIONS->Cost_Temp_Curr = OPTIONS->Cost_Temp_Init =
#endif
*initial_cost_temperature = *current_cost_temperature = tmp_var_db;
if (fabs (*initial_cost_temperature) <= SMALL_FLOAT) {
*initial_cost_temperature = *current_cost_temperature = 2.718;
#if ASA_PRINT
fprintf (ptr_asa_out,
"*** invalid too small cost temp = %g, set to = %g ***\n",
tmp_var_db, *initial_cost_temperature);
fflush (ptr_asa_out);
#endif
}
#endif /* USER_INITIAL_COST_TEMP */
/* set all parameters to the initial parameter values */
VFOR (index_v)
best_generated_state->parameter[index_v] =
last_saved_state->parameter[index_v] =
current_generated_state->parameter[index_v] =
parameter_initial_final[index_v];
OPTIONS->Locate_Cost = 1; /* initial cost value */
/* if using user's initial parameters */
if (OPTIONS->User_Initial_Parameters == TRUE) {
*valid_state_generated_flag = TRUE;
#if USER_ACCEPTANCE_TEST
OPTIONS->User_Acceptance_Flag = TRUE;
OPTIONS->Cost_Acceptance_Flag = FALSE;
#endif
#if ASA_SAVE
if (asa_read == FALSE)
#endif
current_generated_state->cost =
user_cost_function (current_generated_state->parameter,
parameter_minimum,
parameter_maximum,
tangents,
curvature,
number_parameters,
parameter_type,
valid_state_generated_flag, exit_status, OPTIONS);
if (cost_function_test
(current_generated_state->cost, current_generated_state->parameter,
parameter_minimum, parameter_maximum, number_parameters,
xnumber_parameters) == 0) {
*exit_status = INVALID_COST_FUNCTION;
goto EXIT_asa;
}
#if ASA_PRINT
if (*valid_state_generated_flag == FALSE)
fprintf (ptr_asa_out,
"user's initial parameters generated FALSE *valid_state_generated_flag\n");
#endif
} else {
/* let asa generate valid initial parameters */
repeated_invalid_states = 0;
OPTIONS->Sequential_Parameters = *start_sequence - 1;
do {
#if ASA_EXIT_ANYTIME
if ((ptr_exit_anytime = fopen ("asa_exit_anytime", "r")) == NULL) {
*exit_status = IMMEDIATE_EXIT;
goto EXIT_asa;
} else {
fclose (ptr_exit_anytime);
}
#endif /* ASA_EXIT_ANYTIME */
++(*number_invalid_generated_states);
generate_flg = generate_new_state (user_random_generator,
seed,
parameter_minimum,
parameter_maximum,
current_user_parameter_temp,
#if USER_GENERATING_FUNCTION
initial_user_parameter_temp,
temperature_scale_parameters,
#endif
number_parameters,
parameter_type,
current_generated_state,
last_saved_state, OPTIONS);
*valid_state_generated_flag = TRUE;
#if USER_ACCEPTANCE_TEST
OPTIONS->User_Acceptance_Flag = TRUE;
OPTIONS->Cost_Acceptance_Flag = FALSE;
#endif
current_generated_state->cost =
user_cost_function (current_generated_state->parameter,
parameter_minimum,
parameter_maximum,
tangents,
curvature,
number_parameters,
parameter_type,
valid_state_generated_flag, exit_status, OPTIONS);
if (cost_function_test
(current_generated_state->cost,
current_generated_state->parameter, parameter_minimum,
parameter_maximum, number_parameters, xnumber_parameters) == 0) {
*exit_status = INVALID_COST_FUNCTION;
goto EXIT_asa;
}
++repeated_invalid_states;
if (repeated_invalid_states > OPTIONS->Limit_Invalid_Generated_States) {
*exit_status = TOO_MANY_INVALID_STATES;
goto EXIT_asa;
}
}
while (*valid_state_generated_flag == FALSE);
--(*number_invalid_generated_states);
} /* OPTIONS->User_Initial_Parameters */
/* set all states to the last one generated */
VFOR (index_v) {
#if DROPPED_PARAMETERS
/* ignore parameters that have too small a range */
if (PARAMETER_RANGE_TOO_SMALL (index_v))
continue;
#endif
best_generated_state->parameter[index_v] =
last_saved_state->parameter[index_v] =
current_generated_state->parameter[index_v];
}
/* set all costs to the last one generated */
best_generated_state->cost = last_saved_state->cost =
current_generated_state->cost;
*accepted_to_generated_ratio = ONE;
/* do not calculate curvatures initially */
*curvature_flag = FALSE;
#if ASA_PRINT
fprintf (ptr_asa_out,
"temperature_scale = %*.*g\n",
G_FIELD, G_PRECISION, temperature_scale);
#if RATIO_TEMPERATURE_SCALES
#if ASA_PRINT_INTERMED
VFOR (index_v) {
fprintf (ptr_asa_out,
#if INT_ALLOC
"temperature_scale_parameters[%d] = %*.*g\n",
#else
#if INT_LONG
"temperature_scale_parameters[%ld] = %*.*g\n",
#else
"temperature_scale_parameters[%d] = %*.*g\n",
#endif
#endif
index_v,
G_FIELD, G_PRECISION, temperature_scale_parameters[index_v]);
}
#endif
#else
fprintf (ptr_asa_out,
"temperature_scale_parameters[0] = %*.*g\n",
G_FIELD, G_PRECISION, temperature_scale_parameters[0]);
#endif /* RATIO_TEMPERATURE_SCALES */
fprintf (ptr_asa_out,
"*temperature_scale_cost = %*.*g\n",
G_FIELD, G_PRECISION, *temperature_scale_cost);
fprintf (ptr_asa_out, "\n\n");
#if ASA_PRINT_INTERMED
print_state (parameter_minimum,
parameter_maximum,
tangents,
curvature,
current_cost_temperature,
current_user_parameter_temp,
accepted_to_generated_ratio,
number_parameters,
curvature_flag,
number_accepted,
index_cost_acceptances,
number_generated,
number_invalid_generated_states,
last_saved_state, best_generated_state, ptr_asa_out, OPTIONS);
#endif
fprintf (ptr_asa_out, "\n");
fflush (ptr_asa_out);
#endif
#if ASA_SAMPLE
#if ASA_PRINT
fprintf (ptr_asa_out,
":SAMPLE: n_accept cost cost_temp bias_accept aver_weight\n");
fprintf (ptr_asa_out,
":SAMPLE: index param[] temp[] bias_gener[] range[]\n");
#endif
#endif
/* reset the current cost and the number of generations performed */
*number_invalid_generated_states = 0;
*best_number_generated_saved =
*number_generated = *recent_number_generated = 0;
OPTIONS->N_Generated = *number_generated;
VFOR (index_v) {
/* ignore parameters that have too small a range */
if (PARAMETER_RANGE_TOO_SMALL (index_v))
continue;
index_parameter_generations[index_v] = 1;
}
#if USER_ACCEPTANCE_TEST
OPTIONS->User_Acceptance_Flag = FALSE;
OPTIONS->Cost_Acceptance_Flag = FALSE;
#endif
#if ASA_QUEUE
#if ASA_PRINT
#if INT_ALLOC
fprintf (ptr_asa_out, "OPTIONS->Queue_Size = %d\n", OPTIONS->Queue_Size);
#else
#if INT_LONG
fprintf (ptr_asa_out, "OPTIONS->Queue_Size = %ld\n", OPTIONS->Queue_Size);
#else
fprintf (ptr_asa_out, "OPTIONS->Queue_Size = %d\n", OPTIONS->Queue_Size);
#endif
#endif
VFOR (index_v) {
fprintf (ptr_asa_out,
#if INT_ALLOC
"Queue_Resolution[%d] = %*.*g\n",
#else
#if INT_LONG
"Queue_Resolution[%ld] = %*.*g\n",
#else
"Queue_Resolution[%d] = %*.*g\n",
#endif
#endif
index_v,
G_FIELD, G_PRECISION, OPTIONS->Queue_Resolution[index_v]);
}
#endif /* ASA_PRINT */
/* fill arrays to check allocated memory */
for (queue = 0; queue < (LONG_INT) queue_size_tmp; ++queue) {
VFOR (index_v) {
if (PARAMETER_RANGE_TOO_SMALL (index_v)) {
continue;
}
queue_v = index_v + queue * (LONG_INT) (*number_parameters);
save_queue_param[queue_v] = current_generated_state->parameter[index_v];
}
save_queue_cost[queue] = current_generated_state->cost;
save_queue_flag[queue] = *valid_state_generated_flag;
}
save_queue = save_queue_indx = 0;
#if ASA_PARALLEL
for (i_prll = 0; i_prll < OPTIONS->Gener_Block_Max; ++i_prll) {
for (queue = 0; queue < (LONG_INT) queue_size_tmp; ++queue) {
VFOR (index_v) {
if (PARAMETER_RANGE_TOO_SMALL (index_v)) {
continue;
}
queue_v_par[i_prll] =
index_v + queue * (LONG_INT) (*number_parameters);
save_queue_param_par[i_prll][queue_v_par[i_prll]] =
current_generated_state->parameter[index_v];
}
save_queue_cost_par[i_prll][queue] = current_generated_state->cost;
save_queue_valid_state_flag_par[i_prll][queue] =
*valid_state_generated_flag;
}
save_queue_par[i_prll] = save_queue_indx_par[i_prll] = 0;
}
#endif /* ASA_PARALLEL */
#endif /* ASA_QUEUE */
#if ASA_RESOLUTION
#if ASA_PRINT
VFOR (index_v) {
fprintf (ptr_asa_out,
#if INT_ALLOC
"Coarse_Resolution[%d] = %*.*g\n",
#else
#if INT_LONG
"Coarse_Resolution[%ld] = %*.*g\n",
#else
"Coarse_Resolution[%d] = %*.*g\n",
#endif
#endif
index_v,
G_FIELD, G_PRECISION, OPTIONS->Coarse_Resolution[index_v]);
}
#endif /* ASA_PRINT */
#endif /* ASA_RESOLUTION */
#if MULTI_MIN
multi_sort[OPTIONS->Multi_Number] = OPTIONS->Multi_Number;
multi_cost[OPTIONS->Multi_Number] = current_generated_state->cost;
VFOR (index_v) {
multi_params[OPTIONS->Multi_Number][index_v] =
current_generated_state->parameter[index_v];
}
for (multi_index = 0; multi_index < OPTIONS->Multi_Number; ++multi_index) {
multi_sort[multi_index] = multi_index;
multi_cost[multi_index] = OPTIONS->Multi_Cost[multi_index] =
current_generated_state->cost;
VFOR (index_v) {
multi_params[multi_index][index_v] =
OPTIONS->Multi_Params[multi_index][index_v] =
current_generated_state->parameter[index_v];
}
}
#endif /* MULTI_MIN */
/* this test is after MULTI_MIN so that params are not all just set to 0 */
if (*initial_cost_temperature < (double) EPS_DOUBLE) {
#if ASA_PRINT
fprintf (ptr_asa_out, "*initial_cost_temperature (= %g) < EPS_DOUBLE\n",
*initial_cost_temperature);
fflush (ptr_asa_out);
#endif
*exit_status = INVALID_COST_FUNCTION;
goto EXIT_asa;
}
OPTIONS->Sequential_Parameters = *start_sequence - 1;
/* MAIN ANNEALING LOOP */
while (((*number_accepted <= OPTIONS->Limit_Acceptances)
|| (OPTIONS->Limit_Acceptances == 0))
&& ((*number_generated <= OPTIONS->Limit_Generated)
|| (OPTIONS->Limit_Generated == 0))) {
tmp_var_db1 = -F_LOG ((OPTIONS->Temperature_Ratio_Scale));
/* compute temperature scales */
tmp_var_db2 = F_LOG (OPTIONS->Temperature_Anneal_Scale);
temperature_scale = tmp_var_db1 *
F_EXP (-tmp_var_db2 / *xnumber_parameters);
#if QUENCH_PARAMETERS
#if RATIO_TEMPERATURE_SCALES
VFOR (index_v)
temperature_scale_parameters[index_v] = tmp_var_db1 * F_EXP
#if QUENCH_PARAMETERS_SCALE
(-(tmp_var_db2 * OPTIONS->User_Quench_Param_Scale[index_v])
#else
(-(tmp_var_db2)
#endif
/ *xnumber_parameters)
* OPTIONS->User_Temperature_Ratio[index_v];
#else
VFOR (index_v)
temperature_scale_parameters[index_v] = tmp_var_db1 * F_EXP
#if QUENCH_PARAMETERS_SCALE
(-(tmp_var_db2 * OPTIONS->User_Quench_Param_Scale[index_v])
#else
(-(tmp_var_db2)
#endif
/ *xnumber_parameters);
#endif /* RATIO_TEMPERATURE_SCALES */
#else /* QUENCH_PARAMETERS */
#if RATIO_TEMPERATURE_SCALES
VFOR (index_v)
temperature_scale_parameters[index_v] =
tmp_var_db1 * F_EXP (-(tmp_var_db2) / *xnumber_parameters)
* OPTIONS->User_Temperature_Ratio[index_v];
#else
VFOR (index_v)
temperature_scale_parameters[index_v] =
tmp_var_db1 * F_EXP (-(tmp_var_db2) / *xnumber_parameters);
#endif /* RATIO_TEMPERATURE_SCALES */
#endif /* QUENCH_PARAMETERS */
#if USER_ACCEPTANCE_TEST
OPTIONS->Cost_Temp_Scale =
#endif
*temperature_scale_cost =
#if QUENCH_COST
#if QUENCH_COST_SCALE
tmp_var_db1 * F_EXP (-(tmp_var_db2 * OPTIONS->User_Quench_Cost_Scale[0])
#else
tmp_var_db1 * F_EXP (-(tmp_var_db2)
#endif
/ *xnumber_parameters) *
OPTIONS->Cost_Parameter_Scale_Ratio;
#else /* QUENCH_COST */
tmp_var_db1 * F_EXP (-(tmp_var_db2)
/ *xnumber_parameters) *
OPTIONS->Cost_Parameter_Scale_Ratio;
#endif /* QUENCH_COST */
/* CALCULATE NEW TEMPERATURES */
/* calculate new parameter temperatures */
VFOR (index_v) {
/* skip parameters with too small a range */
if (PARAMETER_RANGE_TOO_SMALL (index_v))
continue;
log_new_temperature_ratio =
-temperature_scale_parameters[index_v] *
F_POW ((double) index_parameter_generations[index_v],
#if QUENCH_PARAMETERS
OPTIONS->User_Quench_Param_Scale[index_v]
#else /* QUENCH_PARAMETERS */
ONE
#endif /* QUENCH_PARAMETERS */
/ *xnumber_parameters);
/* check (and correct) for too large an exponent */
log_new_temperature_ratio = EXPONENT_CHECK (log_new_temperature_ratio);
current_user_parameter_temp[index_v] =
initial_user_parameter_temp[index_v]
* F_EXP (log_new_temperature_ratio);
#if NO_PARAM_TEMP_TEST
if (current_user_parameter_temp[index_v] < (double) EPS_DOUBLE)
current_user_parameter_temp[index_v] = (double) EPS_DOUBLE;
#else
/* check for too small a parameter temperature */
if (current_user_parameter_temp[index_v] < (double) EPS_DOUBLE) {
*exit_status = P_TEMP_TOO_SMALL;
*index_exit_v = index_v;
goto EXIT_asa;
}
#endif
}
/* calculate new cost temperature */
log_new_temperature_ratio =
-*temperature_scale_cost * F_POW ((double) *index_cost_acceptances,
#if QUENCH_COST
OPTIONS->User_Quench_Cost_Scale[0]
#else
ONE
#endif
/ *xnumber_parameters);
log_new_temperature_ratio = EXPONENT_CHECK (log_new_temperature_ratio);
#if USER_ACCEPTANCE_TEST
OPTIONS->Cost_Temp_Curr = OPTIONS->Cost_Temp_Init =
#endif
*current_cost_temperature = *initial_cost_temperature
* F_EXP (log_new_temperature_ratio);
#if NO_COST_TEMP_TEST
if (*current_cost_temperature < (double) EPS_DOUBLE)
#if USER_ACCEPTANCE_TEST
OPTIONS->Cost_Temp_Curr =
#endif
*current_cost_temperature = (double) EPS_DOUBLE;
#else
/* check for too small a cost temperature */
if (*current_cost_temperature < (double) EPS_DOUBLE) {
*exit_status = C_TEMP_TOO_SMALL;
goto EXIT_asa;
}
#endif
#if ASA_SAVE
if (asa_read == TRUE && OPTIONS->Asa_Recursive_Level == asa_recursive_max) {
if (OPTIONS->Asa_Recursive_Level > 0)
sprintf (asa_save_comm, "asa_save_%d", OPTIONS->Asa_Recursive_Level);
else
sprintf (asa_save_comm, "asa_save");
ptr_save = fopen (asa_save_comm, "r");
fread (number_parameters, sizeof (ALLOC_INT), 1, ptr_save);
fread (xnumber_parameters, sizeof (double), 1, ptr_save);
fread (parameter_minimum, sizeof (double),
*number_parameters, ptr_save);
fread (parameter_maximum, sizeof (double),
*number_parameters, ptr_save);
fread (tangents, sizeof (double), *number_parameters, ptr_save);
fread (current_user_parameter_temp, sizeof (double),
*number_parameters, ptr_save);
fread (initial_user_parameter_temp, sizeof (double),
*number_parameters, ptr_save);
fread (temperature_scale_parameters, sizeof (double),
*number_parameters, ptr_save);
fread (parameter_type, sizeof (int), *number_parameters, ptr_save);
fread (&index_cost_repeat, sizeof (int), 1, ptr_save);
fread (&asa_open, sizeof (int), 1, ptr_save);
fread (&number_asa_open, sizeof (int), 1, ptr_save);
fread (&recursive_asa_open, sizeof (int), 1, ptr_save);
fread (current_cost_temperature, sizeof (double), 1, ptr_save);
fread (initial_cost_temperature, sizeof (double), 1, ptr_save);
fread (temperature_scale_cost, sizeof (double), 1, ptr_save);
fread (accepted_to_generated_ratio, sizeof (double), 1, ptr_save);
fread (curvature_flag, sizeof (int), 1, ptr_save);
fread (seed, sizeof (LONG_INT), 1, ptr_save);
fread (number_generated, sizeof (LONG_INT), 1, ptr_save);
fread (number_accepted, sizeof (LONG_INT), 1, ptr_save);
fread (number_acceptances_saved, sizeof (LONG_INT), 1, ptr_save);
fread (recent_number_acceptances, sizeof (LONG_INT), 1, ptr_save);
fread (recent_number_generated, sizeof (LONG_INT), 1, ptr_save);
fread (number_invalid_generated_states, sizeof (LONG_INT), 1, ptr_save);
fread (index_cost_acceptances, sizeof (LONG_INT), 1, ptr_save);
fread (best_number_generated_saved, sizeof (LONG_INT), 1, ptr_save);
fread (best_number_accepted_saved, sizeof (LONG_INT), 1, ptr_save);
fread (index_parameter_generations, sizeof (LONG_INT),
*number_parameters, ptr_save);
fread (current_generated_state->parameter,
sizeof (double), *number_parameters, ptr_save);
fread (last_saved_state->parameter,
sizeof (double), *number_parameters, ptr_save);
fread (best_generated_state->parameter,
sizeof (double), *number_parameters, ptr_save);
fread (&(current_generated_state->cost), sizeof (double), 1, ptr_save);
fread (&(last_saved_state->cost), sizeof (double), 1, ptr_save);
fread (&(best_generated_state->cost), sizeof (double), 1, ptr_save);
fread (&(OPTIONS->Limit_Acceptances), sizeof (LONG_INT), 1, ptr_save);
fread (&(OPTIONS->Limit_Generated), sizeof (LONG_INT), 1, ptr_save);
fread (&(OPTIONS->Limit_Invalid_Generated_States), sizeof (int),
1, ptr_save);
fread (&(OPTIONS->Accepted_To_Generated_Ratio), sizeof (double),
1, ptr_save);
fread (&(OPTIONS->Cost_Precision), sizeof (double), 1, ptr_save);
fread (&(OPTIONS->Maximum_Cost_Repeat), sizeof (int), 1, ptr_save);
fread (&(OPTIONS->Number_Cost_Samples), sizeof (int), 1, ptr_save);
fread (&(OPTIONS->Temperature_Ratio_Scale), sizeof (double),
1, ptr_save);
fread (&(OPTIONS->Cost_Parameter_Scale_Ratio), sizeof (double),
1, ptr_save);
fread (&(OPTIONS->Temperature_Anneal_Scale), sizeof (double),
1, ptr_save);
fread (&(OPTIONS->Include_Integer_Parameters), sizeof (int),
1, ptr_save);
fread (&(OPTIONS->User_Initial_Parameters), sizeof (int), 1, ptr_save);
fread (&(OPTIONS->Sequential_Parameters), sizeof (ALLOC_INT), 1,
ptr_save);
fread (&(OPTIONS->Initial_Parameter_Temperature), sizeof (double), 1,
ptr_save);
fread (&(OPTIONS->Acceptance_Frequency_Modulus), sizeof (int), 1,
ptr_save);
fread (&(OPTIONS->Generated_Frequency_Modulus), sizeof (int), 1,
ptr_save);
fread (&(OPTIONS->Reanneal_Cost), sizeof (int), 1, ptr_save);
fread (&(OPTIONS->Reanneal_Parameters), sizeof (int), 1, ptr_save);
fread (&(OPTIONS->Delta_X), sizeof (double), 1, ptr_save);
fread (&(OPTIONS->User_Tangents), sizeof (int), 1, ptr_save);
#if USER_INITIAL_COST_TEMP
fread (&(OPTIONS->User_Cost_Temperature), sizeof (double), 1, ptr_save);
#endif
#if RATIO_TEMPERATURE_SCALES
fread (OPTIONS->User_Temperature_Ratio, sizeof (double),
*number_parameters, ptr_save);
#endif
#if USER_INITIAL_PARAMETERS_TEMPS
fread (OPTIONS->User_Parameter_Temperature, sizeof (double),
*number_parameters, ptr_save);
#endif
#if DELTA_PARAMETERS
fread (OPTIONS->User_Delta_Parameter, sizeof (double),
*number_parameters, ptr_save);
#endif
#if QUENCH_PARAMETERS
fread (OPTIONS->User_Quench_Param_Scale, sizeof (double),
*number_parameters, ptr_save);
#endif
#if QUENCH_COST
fread (OPTIONS->User_Quench_Cost_Scale, sizeof (double), 1, ptr_save);
#endif
fread (&(OPTIONS->N_Accepted), sizeof (LONG_INT), 1, ptr_save);
fread (&(OPTIONS->N_Generated), sizeof (LONG_INT), 1, ptr_save);
fread (&(OPTIONS->Locate_Cost), sizeof (int), 1, ptr_save);
fread (&(OPTIONS->Immediate_Exit), sizeof (int), 1, ptr_save);
#if OPTIONAL_DATA_DBL
fread (&(OPTIONS->Asa_Data_Dim_Dbl), sizeof (ALLOC_INT), 1, ptr_save);
fread (OPTIONS->Asa_Data_Dbl, sizeof (double),
OPTIONS->Asa_Data_Dim_Dbl, ptr_save);
#endif
fread (&(OPTIONS->Random_Array_Dim), sizeof (ALLOC_INT), 1, ptr_save);
fread (OPTIONS->Random_Array, sizeof (double),
OPTIONS->Random_Array_Dim, ptr_save);
fread (&(OPTIONS->Asa_Recursive_Level), sizeof (int), 1, ptr_save);
#if OPTIONAL_DATA_INT
fread (&(OPTIONS->Asa_Data_Dim_Int), sizeof (ALLOC_INT), 1, ptr_save);
fread (OPTIONS->Asa_Data_Int, sizeof (LONG_INT),
OPTIONS->Asa_Data_Dim_Int, ptr_save);
#endif
#if OPTIONAL_DATA_PTR
fread (&(OPTIONS->Asa_Data_Dim_Ptr), sizeof (ALLOC_INT), 1, ptr_save);
if (OPTIONS->Asa_Recursive_Level == 0)
fread (OPTIONS->Asa_Data_Ptr, sizeof (OPTIONAL_PTR_TYPE),
OPTIONS->Asa_Data_Dim_Ptr, ptr_save);
#if ASA_TEMPLATE_SELFOPT
if (OPTIONS->Asa_Recursive_Level == 1)
fread (OPTIONS->Asa_Data_Ptr, sizeof (RECUR_OPTIONAL_PTR_TYPE),
OPTIONS->Asa_Data_Dim_Ptr, ptr_save);
#endif
#endif
#if USER_ASA_OUT
fread (OPTIONS->Asa_Out_File, sizeof (char), 80, ptr_save);
#endif
#if USER_ASA_USR_OUT
fread (OPTIONS->Asa_Usr_Out_File, sizeof (char), 80, ptr_save);
#endif
#if USER_COST_SCHEDULE
fread (&(OPTIONS->Cost_Schedule), sizeof (char), 1, ptr_save);
#endif
#if USER_ACCEPT_ASYMP_EXP
fread (&(OPTIONS->Asymp_Exp_Param), sizeof (double), 1, ptr_save);
#endif
#if USER_ACCEPTANCE_TEST
fread (&(OPTIONS->Acceptance_Test), sizeof (char), 1, ptr_save);
fread (&(OPTIONS->User_Acceptance_Flag), sizeof (int), 1, ptr_save);
fread (&(OPTIONS->Cost_Acceptance_Flag), sizeof (int), 1, ptr_save);
fread (&(OPTIONS->Cost_Temp_Curr), sizeof (double), 1, ptr_save);
fread (&(OPTIONS->Cost_Temp_Init), sizeof (double), 1, ptr_save);
fread (&(OPTIONS->Cost_Temp_Scale), sizeof (double), 1, ptr_save);
#endif
#if USER_GENERATING_FUNCTION
fread (&(OPTIONS->Generating_Distrib), sizeof (char), 1, ptr_save);
#endif
#if USER_REANNEAL_COST
fread (&(OPTIONS->Reanneal_Cost_Function), sizeof (char), 1, ptr_save);
#endif
#if USER_REANNEAL_PARAMETERS
fread (&(OPTIONS->Reanneal_Params_Function), sizeof (char),
1, ptr_save);
#endif
#if ASA_SAMPLE
fread (&(OPTIONS->Bias_Acceptance), sizeof (double), 1, ptr_save);
fread (OPTIONS->Bias_Generated, sizeof (double),
*number_parameters, ptr_save);
fread (&(OPTIONS->Average_Weights), sizeof (double), 1, ptr_save);
fread (&(OPTIONS->Limit_Weights), sizeof (double), 1, ptr_save);
#endif
#if ASA_QUEUE
fread (&save_queue, sizeof (LONG_INT), 1, ptr_save);
fread (&save_queue_indx, sizeof (LONG_INT), 1, ptr_save);
fread (&(OPTIONS->Queue_Size), sizeof (ALLOC_INT), 1, ptr_save);
fread (save_queue_flag, sizeof (int), save_queue, ptr_save);
fread (save_queue_cost, sizeof (double), save_queue, ptr_save);
fread (save_queue_param, sizeof (double),
(*number_parameters) * (OPTIONS->Queue_Size), ptr_save);
#if ASA_RESOLUTION
#else
fread (OPTIONS->Queue_Resolution, sizeof (double),
*number_parameters, ptr_save);
#endif
#endif /* ASA_QUEUE */
#if ASA_RESOLUTION
fread (OPTIONS->Coarse_Resolution, sizeof (double),
*number_parameters, ptr_save);
#endif
#if FITLOC
fread (&(OPTIONS->Fit_Local), sizeof (int), 1, ptr_save);
fread (&(OPTIONS->Iter_Max), sizeof (int), 1, ptr_save);
fread (&(OPTIONS->Penalty), sizeof (double), 1, ptr_save);
#endif
#if ASA_FUZZY
fread (&(OPTIONS->NoOfSamples), sizeof (int), 1, ptr_save);
fread (&(OPTIONS->ThresholdDeviation), sizeof (double), 1, ptr_save);
fread (&(OPTIONS->Performance_Target), sizeof (double), 1, ptr_save);
fread (&(OPTIONS->Factor_a), sizeof (double), 1, ptr_save);
#endif
#if MULTI_MIN
fread (OPTIONS->Multi_Number, sizeof (int), 1, ptr_save);
fread (OPTIONS->Multi_Grid,
sizeof (double), *number_parameters, ptr_save);
fread (&(OPTIONS->Multi_Specify), sizeof (int), 1, ptr_save);
for (multi_index = 0; multi_index < OPTIONS->Multi_Number;
++multi_index) {
fread (&(OPTIONS->Multi_Cost[multi_index]), sizeof (double), 1,
ptr_save);
fread (&(OPTIONS->Multi_Params[multi_index]), sizeof (double),
*number_parameters, ptr_save);
}
#endif
#if ASA_PARALLEL
for (i_prll = 0; i_prll < OPTIONS->Gener_Block_Max; ++i_prll) {
fread (gener_block_state[i_prll].parameter,
sizeof (double), *number_parameters, ptr_save);
fread (&(gener_block_state[i_prll].cost),
sizeof (double), 1, ptr_save);
#if USER_ACCEPTANCE_TEST
fread (&
(gener_block_state[i_prll].par_user_accept_flag),
sizeof (int), 1, ptr_save);
fread (&
(gener_block_state[i_prll].par_cost_accept_flag),
sizeof (int), 1, ptr_save);
#endif
}
fread (&(OPTIONS->Gener_Mov_Avr), sizeof (int), 1, ptr_save);
fread (&(OPTIONS->Gener_Block), sizeof (LONG_INT), 1, ptr_save);
fread (&(OPTIONS->Gener_Block_Max), sizeof (LONG_INT), 1, ptr_save);
#endif /* ASA_PARALLEL */
fclose (ptr_save);
asa_read = FALSE;
#if ASA_PRINT
print_state (parameter_minimum,
parameter_maximum,
tangents,
curvature,
current_cost_temperature,
current_user_parameter_temp,
accepted_to_generated_ratio,
number_parameters,
curvature_flag,
number_accepted,
index_cost_acceptances,
number_generated,
number_invalid_generated_states,
last_saved_state,
best_generated_state, ptr_asa_out, OPTIONS);
#endif /* ASA_PRINT */
#include "asa_opt"
#if ASA_SAVE_OPT
if ((ptr_save_opt = fopen ("asa_save_opt", "r")) == NULL) {
#if INCL_STDOUT
printf ("\n\n*** WARNING fopen asa_save_opt failed *** \n\n");
#endif /* INCL_STDOUT */
#if ASA_PRINT
fprintf (ptr_asa_out,
"\n\n*** WARNING fopen asa_save_opt failed *** \n\n");
fflush (ptr_asa_out);
#endif
} else {
fscanf_ret = fscanf (ptr_save_opt, "%s%s%s%s%s",
read_if, read_FALSE, read_comm1, read_ASA_SAVE,
read_comm2);
if (strcmp (read_if, "#if") || strcmp (read_FALSE, "FALSE")
|| strcmp (read_comm1, "/*")
|| strcmp (read_ASA_SAVE, "ASA_SAVE")
|| strcmp (read_comm2, "*/")) {
#if INCL_STDOUT
printf ("\n\n*** EXIT not asa_save_opt for this version *** \n\n");
#endif /* INCL_STDOUT */
#if ASA_PRINT
fprintf (ptr_asa_out,
"\n\n*** not asa_save_opt for this version *** \n\n");
fflush (ptr_asa_out);
#endif
*exit_status = INVALID_USER_INPUT;
goto EXIT_asa;
}
#if INT_LONG
fscanf_ret = fscanf (ptr_save_opt, "%s", read_option);
fscanf_ret = fscanf (ptr_save_opt, "%ld", &read_long);
OPTIONS->Limit_Acceptances = read_long;
fscanf_ret = fscanf (ptr_save_opt, "%s", read_option);
fscanf_ret = fscanf (ptr_save_opt, "%ld", &read_long);
OPTIONS->Limit_Generated = read_long;
#else
fscanf_ret = fscanf (ptr_save_opt, "%s", read_option);
fscanf_ret = fscanf (ptr_save_opt, "%d", &read_int);
OPTIONS->Limit_Acceptances = read_int;
fscanf_ret = fscanf (ptr_save_opt, "%s", read_option);
fscanf_ret = fscanf (ptr_save_opt, "%d", &read_int);
OPTIONS->Limit_Generated = read_int;
#endif
fscanf_ret = fscanf (ptr_save_opt, "%s", read_option);
fscanf_ret = fscanf (ptr_save_opt, "%d", &read_int);
OPTIONS->Limit_Invalid_Generated_States = read_int;
fscanf_ret = fscanf (ptr_save_opt, "%s", read_option);
fscanf_ret = fscanf (ptr_save_opt, "%lf", &read_double);
OPTIONS->Accepted_To_Generated_Ratio = read_double;
fscanf_ret = fscanf (ptr_save_opt, "%s", read_option);
fscanf_ret = fscanf (ptr_save_opt, "%lf", &read_double);
OPTIONS->Cost_Precision = read_double;
fscanf_ret = fscanf (ptr_save_opt, "%s", read_option);
fscanf_ret = fscanf (ptr_save_opt, "%d", &read_int);
OPTIONS->Maximum_Cost_Repeat = read_int;
fscanf_ret = fscanf (ptr_save_opt, "%s", read_option);
fscanf_ret = fscanf (ptr_save_opt, "%d", &read_int);
OPTIONS->Number_Cost_Samples = read_int;
fscanf_ret = fscanf (ptr_save_opt, "%s", read_option);
fscanf_ret = fscanf (ptr_save_opt, "%lf", &read_double);
OPTIONS->Temperature_Ratio_Scale = read_double;
fscanf_ret = fscanf (ptr_save_opt, "%s", read_option);
fscanf_ret = fscanf (ptr_save_opt, "%lf", &read_double);
OPTIONS->Cost_Parameter_Scale_Ratio = read_double;
fscanf_ret = fscanf (ptr_save_opt, "%s", read_option);
fscanf_ret = fscanf (ptr_save_opt, "%lf", &read_double);
OPTIONS->Temperature_Anneal_Scale = read_double;
fscanf_ret = fscanf (ptr_save_opt, "%s", read_option);
fscanf_ret = fscanf (ptr_save_opt, "%d", &read_int);
OPTIONS->Include_Integer_Parameters = read_int;
fscanf_ret = fscanf (ptr_save_opt, "%s", read_option);
fscanf_ret = fscanf (ptr_save_opt, "%d", &read_int);
OPTIONS->User_Initial_Parameters = read_int;
#if INT_ALLOC
fscanf_ret = fscanf (ptr_save_opt, "%s", read_option);
fscanf_ret = fscanf (ptr_save_opt, "%d", &read_int);
OPTIONS->Sequential_Parameters = read_int;
#else
#if INT_LONG
fscanf_ret = fscanf (ptr_save_opt, "%s", read_option);
fscanf_ret = fscanf (ptr_save_opt, "%ld", &read_long);
OPTIONS->Sequential_Parameters = read_long;
#else
fscanf_ret = fscanf (ptr_save_opt, "%s", read_option);
fscanf_ret = fscanf (ptr_save_opt, "%d", &read_int);
OPTIONS->Sequential_Parameters = read_int;
#endif
#endif
fscanf_ret = fscanf (ptr_save_opt, "%s", read_option);
fscanf_ret = fscanf (ptr_save_opt, "%lf", &read_double);
OPTIONS->Initial_Parameter_Temperature = read_double;
fscanf_ret = fscanf (ptr_save_opt, "%s", read_option);
fscanf_ret = fscanf (ptr_save_opt, "%d", &read_int);
OPTIONS->Acceptance_Frequency_Modulus = read_int;
fscanf_ret = fscanf (ptr_save_opt, "%s", read_option);
fscanf_ret = fscanf (ptr_save_opt, "%d", &read_int);
OPTIONS->Generated_Frequency_Modulus = read_int;
fscanf_ret = fscanf (ptr_save_opt, "%s", read_option);
fscanf_ret = fscanf (ptr_save_opt, "%d", &read_int);
OPTIONS->Reanneal_Cost = read_int;
fscanf_ret = fscanf (ptr_save_opt, "%s", read_option);
fscanf_ret = fscanf (ptr_save_opt, "%d", &read_int);
OPTIONS->Reanneal_Parameters = read_int;
fscanf_ret = fscanf (ptr_save_opt, "%s", read_option);
fscanf_ret = fscanf (ptr_save_opt, "%lf", &read_double);
OPTIONS->Delta_X = read_double;
fscanf_ret = fscanf (ptr_save_opt, "%s", read_option);
fscanf_ret = fscanf (ptr_save_opt, "%d", &read_int);
OPTIONS->User_Tangents = read_int;
fscanf_ret = fscanf (ptr_save_opt, "%s", read_option);
fscanf_ret = fscanf (ptr_save_opt, "%d", &read_int);
OPTIONS->Curvature_0 = read_int;
fclose (ptr_save_opt);
}
#endif /* ASA_SAVE_OPT */
goto SAVED_asa;
}
#endif /* ASA_SAVE */
if (OPTIONS->Locate_Cost < 0) {
OPTIONS->Locate_Cost = 12; /* generate new state from new best */
} else {
OPTIONS->Locate_Cost = 2; /* generate new state */
}
#if USER_ACCEPTANCE_TEST
OPTIONS->User_Acceptance_Flag = FALSE;
OPTIONS->Cost_Acceptance_Flag = FALSE;
#endif
#if ASA_EXIT_ANYTIME
if ((ptr_exit_anytime = fopen ("asa_exit_anytime", "r")) == NULL) {
goto EXIT_asa;
} else {
fclose (ptr_exit_anytime);
}
#endif /* ASA_EXIT_ANYTIME */
/* GENERATE NEW PARAMETERS */
/* generate a new valid set of parameters */
#if ASA_PARALLEL
/* While this section of code is set to run under OpenMP using the gcc
* compiler, you may change/add lines of code in this entire ASA_PARALLEL
* section to correspond to your choice of parallel algorithm and
* compiler. The entire ASA_PARALLEL section makes assignments to indexed
* variables to afford flexibility for other such algorithms. */
/* Note that here the do loop around generated states that tests for
* invalid states is taken over, not within, blocks of parallel
* calculated cost functions for these generated states, below. */
repeated_invalid_states = 0;
do {
valid_state_generated_flag_par_test = 0;
for (i_prll = 0; i_prll < OPTIONS->Gener_Block; ++i_prll) {
valid_state_generated_flag_par[i_prll] = TRUE;
number_invalid_generated_states_par[i_prll] =
*number_invalid_generated_states;
#if USER_ACCEPTANCE_TEST
gener_block_state[i_prll].par_user_accept_flag =
OPTIONS->User_Acceptance_Flag;
gener_block_state[i_prll].par_cost_accept_flag =
OPTIONS->Cost_Acceptance_Flag;
#endif
}
for (i_prll = 0; i_prll < OPTIONS->Gener_Block; ++i_prll) {
generate_flg_par[i_prll] =
generate_new_state_par (user_random_generator, seed,
parameter_minimum, parameter_maximum,
current_user_parameter_temp,
#if USER_GENERATING_FUNCTION
initial_user_parameter_temp,
temperature_scale_parameters,
#endif
number_parameters,
parameter_type,
i_prll,
gener_block_state, last_saved_state,
OPTIONS);
#if ASA_QUEUE
/* Binary trees do not seem necessary since we are assuming
that the cost function calculation is the bottleneck.
However, see the MISC.DIR/asa_contrib file for
source code for doubly-linked and hashed lists. */
save_queue = (LONG_INT) OPTIONS->Queue_Size;
if (OPTIONS->Queue_Size == 0) {
queue_new_par[i_prll] = 1;
} else {
queue_new_par[i_prll] = 1;
for (queue = 0; queue < save_queue; ++queue) {
save_queue_test_par[i_prll] = 0;
VFOR (index_v) {
if (PARAMETER_RANGE_TOO_SMALL (index_v)) {
++(save_queue_test_par[i_prll]);
} else {
queue_v_par[i_prll] =
index_v + queue * (LONG_INT) (*number_parameters);
tmp_var_db_par[i_prll] =
fabs (gener_block_state[i_prll].parameter
[index_v] -
save_queue_param_par[i_prll][queue_v_par[i_prll]]);
if (
#if ASA_RESOLUTION
/* Coarse_Resolution used in gener_block_state */
tmp_var_db_par[i_prll]
< EPS_DOUBLE
#else
tmp_var_db_par[i_prll]
< (OPTIONS->Queue_Resolution[index_v] + EPS_DOUBLE)
#endif /* ASA_RESOLUTION */
) {
++(save_queue_test_par[i_prll]);
}
}
}
if (save_queue_test_par[i_prll] == *number_parameters) {
queue_par_cost[i_prll] = save_queue_cost_par[i_prll][queue];
queue_new_par[i_prll] = 0;
valid_state_generated_flag_par[i_prll] =
save_queue_valid_state_flag_par[i_prll][queue];
if (valid_state_generated_flag_par[i_prll] == FALSE) {
#if ASA_PRINT_MORE
#if INT_LONG
fprintf (ptr_asa_out,
"ASA_QUEUE: %ld BlockID: %ld \t previous invalid state",
OPTIONS->N_Generated, i_prll);
#else
fprintf (ptr_asa_out,
"ASA_QUEUE: %d BlockID: %d \t previous invalid state",
OPTIONS->N_Generated, i_prll);
#endif
#endif /* ASA_PRINT_MORE */
} else if (valid_state_generated_flag_par[i_prll] == TRUE) {
#if ASA_PRINT_MORE
#if INT_LONG
fprintf (ptr_asa_out, "ASA_QUEUE: %ld BlockID %ld \t %*.*g\n",
OPTIONS->N_Generated, i_prll,
G_FIELD, G_PRECISION, tmp_var_db_par[i_prll]);
#else
fprintf (ptr_asa_out, "ASA_QUEUE: BlockID %d %d \t %*.*g\n",
OPTIONS->N_Generated, i_prll,
G_FIELD, G_PRECISION, tmp_var_db_par[i_prll]);
#endif
#endif /* ASA_PRINT_MORE */
}
break;
}
}
}
#endif /* ASA_QUEUE */
}
/* *** ENTER CODE TO SPAWN OFF PARALLEL GENERATED STATES *** */
#ifdef _OPENMP
#pragma omp parallel for
#endif /* _OPENMP */
for (i_prll = 0; i_prll < OPTIONS->Gener_Block; ++i_prll) {
#if ASA_QUEUE
if (queue_new_par[i_prll] == 0) {
gener_block_state[i_prll].cost = queue_par_cost[i_prll];
} else {
#endif /* ASA_QUEUE */
OPTIONS->parallel_id = i_prll;
gener_block_state[i_prll].cost =
user_cost_function (gener_block_state[i_prll].parameter,
parameter_minimum,
parameter_maximum,
tangents,
curvature,
number_parameters,
parameter_type,
&(valid_state_generated_flag_par[i_prll]),
exit_status, OPTIONS);
tmp_var_db1_par[i_prll] =
cost_function_test (gener_block_state[i_prll].cost,
gener_block_state[i_prll].parameter,
parameter_minimum, parameter_maximum,
number_parameters, xnumber_parameters);
if (tmp_var_db1_par[i_prll] == 0) {
EXIT_asa_parallel = 1;
}
#if ASA_QUEUE
}
#endif /* ASA_QUEUE */
}
/* *** EXIT CODE SPAWNING OFF PARALLEL GENERATED STATES *** */
if (EXIT_asa_parallel == 1) {
*exit_status = INVALID_COST_FUNCTION;
goto EXIT_asa;
}
#if ASA_QUEUE
for (i_prll = 0; i_prll < OPTIONS->Gener_Block; ++i_prll) {
if (valid_state_generated_flag_par[i_prll] == FALSE) {
++valid_state_generated_flag_par_test;
}
if (queue_new_par[i_prll] == 1) {
if (OPTIONS->Queue_Size > 0) { /* in case recursive use */
VFOR (index_v) {
if (PARAMETER_RANGE_TOO_SMALL (index_v)) {
continue;
}
queue_v_par[i_prll] = index_v + save_queue_indx_par[i_prll]
* (LONG_INT) (*number_parameters);
save_queue_param_par[i_prll][queue_v_par[i_prll]] =
gener_block_state[i_prll].parameter[index_v];
}
save_queue_cost_par[i_prll][save_queue_indx_par[i_prll]] =
gener_block_state[i_prll].cost;
save_queue_valid_state_flag_par[i_prll][save_queue_indx_par
[i_prll]]
= valid_state_generated_flag_par[i_prll];
++(save_queue_par[i_prll]);
if (save_queue_par[i_prll] == (LONG_INT) OPTIONS->Queue_Size)
--(save_queue_par[i_prll]);
++(save_queue_indx_par[i_prll]);
if (save_queue_indx_par[i_prll] == (LONG_INT) OPTIONS->Queue_Size)
save_queue_indx_par[i_prll] = 0;
}
}
}
#endif /* ASA_QUEUE */
repeated_invalid_states += valid_state_generated_flag_par_test;
}
while (valid_state_generated_flag_par_test >= OPTIONS->Gener_Block);
if (repeated_invalid_states > OPTIONS->Limit_Invalid_Generated_States) {
*exit_status = TOO_MANY_INVALID_STATES;
goto EXIT_asa;
}
#else /* ASA_PARALLEL */
repeated_invalid_states = 0;
do {
++(*number_invalid_generated_states);
generate_flg = generate_new_state (user_random_generator,
seed,
parameter_minimum,
parameter_maximum,
current_user_parameter_temp,
#if USER_GENERATING_FUNCTION
initial_user_parameter_temp,
temperature_scale_parameters,
#endif
number_parameters,
parameter_type,
current_generated_state,
last_saved_state, OPTIONS);
*valid_state_generated_flag = TRUE;
#if ASA_QUEUE
/* Binary trees do not seem necessary since we are assuming
that the cost function calculation is the bottleneck.
However, see the MISC.DIR/asa_contrib file for
source code for doubly-linked and hashed lists. */
save_queue = (LONG_INT) OPTIONS->Queue_Size;
if (OPTIONS->Queue_Size == 0) {
queue_new = 1;
} else {
queue_new = 1;
for (queue = 0; queue < save_queue; ++queue) {
save_queue_test = 0;
VFOR (index_v) {
if (PARAMETER_RANGE_TOO_SMALL (index_v)) {
++save_queue_test;
} else {
queue_v = index_v + queue * (LONG_INT) (*number_parameters);
if (
#if ASA_RESOLUTION
/* Coarse_Resolution used in current_generated_state */
fabs (current_generated_state->parameter[index_v] -
save_queue_param[queue_v]) < EPS_DOUBLE
#else
fabs (current_generated_state->parameter[index_v] -
save_queue_param[queue_v]) <
(OPTIONS->Queue_Resolution[index_v] + EPS_DOUBLE)
#endif /* ASA_RESOLUTION */
) {
++save_queue_test;
}
}
}
if (save_queue_test == *number_parameters) {
tmp_var_db = save_queue_cost[queue];
queue_new = 0;
*valid_state_generated_flag = save_queue_flag[queue];
if (*valid_state_generated_flag == FALSE) {
#if ASA_PRINT_MORE
#if INT_LONG
fprintf (ptr_asa_out,
"ASA_QUEUE: %ld \t previous invalid state",
OPTIONS->N_Generated);
#else
fprintf (ptr_asa_out,
"ASA_QUEUE: %d \t previous invalid state",
OPTIONS->N_Generated);
#endif
#endif /* ASA_PRINT_MORE */
} else {
#if ASA_PRINT_MORE
#if INT_LONG
fprintf (ptr_asa_out, "ASA_QUEUE: %ld \t %*.*g\n",
OPTIONS->N_Generated,
G_FIELD, G_PRECISION, tmp_var_db);
#else
fprintf (ptr_asa_out, "ASA_QUEUE: %d \t %*.*g\n",
OPTIONS->N_Generated,
G_FIELD, G_PRECISION, tmp_var_db);
#endif
#endif /* ASA_PRINT_MORE */
}
break;
}
}
}
if (queue_new == 1) {
tmp_var_db =
user_cost_function (current_generated_state->parameter,
parameter_minimum,
parameter_maximum,
tangents,
curvature,
number_parameters,
parameter_type,
valid_state_generated_flag,
exit_status, OPTIONS);
if (cost_function_test (tmp_var_db,
current_generated_state->parameter,
parameter_minimum,
parameter_maximum,
number_parameters, xnumber_parameters) == 0) {
*exit_status = INVALID_COST_FUNCTION;
goto EXIT_asa;
}
if (OPTIONS->Queue_Size > 0) { /* in case recursive use */
VFOR (index_v) {
if (PARAMETER_RANGE_TOO_SMALL (index_v)) {
continue;
}
queue_v = index_v + save_queue_indx
* (LONG_INT) (*number_parameters);
save_queue_param[queue_v] =
current_generated_state->parameter[index_v];
}
save_queue_cost[save_queue_indx] = tmp_var_db;
save_queue_flag[save_queue_indx]
= *valid_state_generated_flag;
++save_queue;
if (save_queue == (LONG_INT) OPTIONS->Queue_Size)
--save_queue;
++save_queue_indx;
if (save_queue_indx == (LONG_INT) OPTIONS->Queue_Size)
save_queue_indx = 0;
}
}
#else /* ASA_QUEUE */
tmp_var_db =
user_cost_function (current_generated_state->parameter,
parameter_minimum,
parameter_maximum,
tangents,
curvature,
number_parameters,
parameter_type,
valid_state_generated_flag, exit_status, OPTIONS);
if (cost_function_test
(tmp_var_db, current_generated_state->parameter,
parameter_minimum, parameter_maximum, number_parameters,
xnumber_parameters) == 0) {
*exit_status = INVALID_COST_FUNCTION;
goto EXIT_asa;
}
#endif /* ASA_QUEUE */
current_generated_state->cost = tmp_var_db;
++repeated_invalid_states;
if (repeated_invalid_states > OPTIONS->Limit_Invalid_Generated_States) {
*exit_status = TOO_MANY_INVALID_STATES;
goto EXIT_asa;
}
}
while (*valid_state_generated_flag == FALSE);
--(*number_invalid_generated_states);
#endif /* ASA_PARALLEL */
/* ACCEPT/REJECT NEW PARAMETERS */
#if ASA_PARALLEL
for (sort_index = 0; sort_index < OPTIONS->Gener_Block; ++sort_index) {
parallel_sort[sort_index] = sort_index;
}
qsort (parallel_sort, OPTIONS->Gener_Block, sizeof (LONG_INT),
sort_parallel);
for (sort_index = 0; sort_index < OPTIONS->Gener_Block; ++sort_index) {
i_prll = parallel_sort[sort_index];
if (valid_state_generated_flag_par[i_prll] == FALSE) {
continue;
}
current_generated_state->cost = gener_block_state[i_prll].cost;
#if USER_ACCEPTANCE_TEST
OPTIONS->User_Acceptance_Flag =
gener_block_state[i_prll].par_user_accept_flag;
OPTIONS->Cost_Acceptance_Flag =
gener_block_state[i_prll].par_cost_accept_flag;
#endif
VFOR (index_v) {
/* ignore parameters with too small a range */
if (PARAMETER_RANGE_TOO_SMALL (index_v))
continue;
current_generated_state->parameter[index_v] =
gener_block_state[i_prll].parameter[index_v];
}
#endif /* ASA_PARALLEL */
/* decide whether to accept/reject the new state */
accept_new_state (user_random_generator,
seed,
parameter_minimum,
parameter_maximum, current_cost_temperature,
#if ASA_SAMPLE
current_user_parameter_temp,
#endif
number_parameters,
recent_number_acceptances,
number_accepted,
index_cost_acceptances,
number_acceptances_saved,
recent_number_generated,
number_generated,
index_parameter_generations,
current_generated_state, last_saved_state,
#if ASA_SAMPLE
ptr_asa_out,
#endif
OPTIONS);
#if ASA_PARALLEL
#else
#if ASA_PIPE_FILE
#if INT_ALLOC
fprintf (ptr_asa_pipe, "%d", *number_generated);
#else
#if INT_LONG
fprintf (ptr_asa_pipe, "%ld", *number_generated);
#else
fprintf (ptr_asa_pipe, "%d", *number_generated);
#endif
#endif
#if INT_ALLOC
fprintf (ptr_asa_pipe, "\t%d", *number_accepted);
#else
#if INT_LONG
fprintf (ptr_asa_pipe, "\t%ld", *number_accepted);
#else
fprintf (ptr_asa_pipe, "\t%d", *number_accepted);
#endif
#endif
fprintf (ptr_asa_pipe, "\t%g", best_generated_state->cost);
VFOR (index_v)
fprintf (ptr_asa_pipe, "\t%g",
best_generated_state->parameter[index_v]);
fprintf (ptr_asa_pipe, "\t%g", current_generated_state->cost);
VFOR (index_v)
fprintf (ptr_asa_pipe, "\t%g",
current_generated_state->parameter[index_v]);
fprintf (ptr_asa_pipe, "\t%g", *current_cost_temperature);
VFOR (index_v)
fprintf (ptr_asa_pipe, "\t%g", current_user_parameter_temp[index_v]);
fprintf (ptr_asa_pipe, "\t%g", last_saved_state->cost);
fprintf (ptr_asa_pipe, "\n");
fflush (ptr_asa_pipe);
#endif /* ASA_PIPE_FILE */
#if INCL_STDOUT
#if ASA_PIPE
#if INT_ALLOC
printf ("%d", *number_generated);
#else
#if INT_LONG
printf ("%ld", *number_generated);
#else
printf ("%d", *number_generated);
#endif
#endif
#if INT_ALLOC
printf ("\t%d", *number_accepted);
#else
#if INT_LONG
printf ("\t%ld", *number_accepted);
#else
printf ("\t%d", *number_accepted);
#endif
#endif
printf ("\t%g", best_generated_state->cost);
VFOR (index_v)
printf ("\t%g", best_generated_state->parameter[index_v]);
printf ("\t%g", current_generated_state->cost);
VFOR (index_v)
printf ("\t%g", current_generated_state->parameter[index_v]);
printf ("\t%g", *current_cost_temperature);
VFOR (index_v)
printf ("\t%g", current_user_parameter_temp[index_v]);
printf ("\t%g", last_saved_state->cost);
printf ("\n");
#endif /* ASA_PIPE */
#endif /* INCL_STDOUT */
#endif /* ASA_PARALLEL */
/* calculate the ratio of acceptances to generated states */
*accepted_to_generated_ratio =
(double) (*recent_number_acceptances + 1) /
(double) (*recent_number_generated + 1);
#if MULTI_MIN
if (((OPTIONS->Multi_Specify == 0)
&& (current_generated_state->cost <= best_generated_state->cost))
|| ((OPTIONS->Multi_Specify == 1)
&& (current_generated_state->cost <
best_generated_state->cost))) {
#if ASA_RESOLUTION
VFOR (index_v) {
if (OPTIONS->Multi_Grid[index_v] <
OPTIONS->Coarse_Resolution[index_v])
OPTIONS->Multi_Grid[index_v] =
OPTIONS->Coarse_Resolution[index_v];
}
#endif /* ASA_RESOLUTION */
VFOR (index_v) {
if (OPTIONS->Multi_Grid[index_v] < EPS_DOUBLE)
OPTIONS->Multi_Grid[index_v] = EPS_DOUBLE;
}
multi_test = 0;
for (multi_index = 0; multi_index < OPTIONS->Multi_Number;
++multi_index) {
multi_test_cmp = 0;
multi_test_dim = 0;
VFOR (index_v) {
if (PARAMETER_RANGE_TOO_SMALL (index_v))
continue;
++multi_test_dim;
if (fabs (current_generated_state->parameter[index_v]
- OPTIONS->Multi_Params[multi_index][index_v])
< OPTIONS->Multi_Grid[index_v] - EPS_DOUBLE)
++multi_test_cmp;
}
if (multi_test_cmp == multi_test_dim)
multi_test = 1;
if (OPTIONS->Multi_Specify == 1)
break;
}
if (multi_test == 0) {
multi_cost[OPTIONS->Multi_Number] = current_generated_state->cost;
VFOR (index_v) {
multi_params[OPTIONS->Multi_Number][index_v] =
current_generated_state->parameter[index_v];
}
for (multi_index = 0; multi_index < OPTIONS->Multi_Number;
++multi_index) {
multi_cost[multi_index] = OPTIONS->Multi_Cost[multi_index];
VFOR (index_v) {
multi_params[multi_index][index_v] =
OPTIONS->Multi_Params[multi_index][index_v];
}
}
qsort (multi_sort, OPTIONS->Multi_Number + 1, sizeof (int),
multi_compare);
for (multi_index = 0; multi_index < OPTIONS->Multi_Number;
++multi_index) {
OPTIONS->Multi_Cost[multi_index] =
multi_cost[multi_sort[multi_index]];
VFOR (index_v) {
OPTIONS->Multi_Params[multi_index][index_v] =
multi_params[multi_sort[multi_index]][index_v];
}
}
}
}
#endif /* MULTI_MIN */
/* CHECK FOR NEW MINIMUM */
if (current_generated_state->cost < best_generated_state->cost) {
best_flag = 1;
} else {
best_flag = 0;
}
#if MULTI_MIN
if (((OPTIONS->Multi_Specify == 0)
&& (current_generated_state->cost <= best_generated_state->cost))
|| ((OPTIONS->Multi_Specify == 1)
&& (current_generated_state->cost <
best_generated_state->cost)))
#else
if (current_generated_state->cost < best_generated_state->cost)
#endif /* MULTI_MIN */
{
/* NEW MINIMUM FOUND */
OPTIONS->Locate_Cost = -1;
/* reset the recent acceptances and generated counts */
*recent_number_acceptances = *recent_number_generated = 0;
if (best_flag == 1) {
*best_number_generated_saved = *number_generated;
*best_number_accepted_saved = *number_accepted;
}
index_cost_repeat = 0;
/* copy the current state into the best_generated state */
best_generated_state->cost = current_generated_state->cost;
VFOR (index_v) {
#if DROPPED_PARAMETERS
/* ignore parameters that have too small a range */
if (PARAMETER_RANGE_TOO_SMALL (index_v))
continue;
#endif
best_generated_state->parameter[index_v] =
current_generated_state->parameter[index_v];
}
/* printout the new minimum state and value */
#if ASA_PRINT
fprintf (ptr_asa_out,
#if INT_LONG
"best...->cost=%-*.*g *number_accepted=%ld *number_generated=%ld\n",
G_FIELD, G_PRECISION, best_generated_state->cost,
#else
"best...->cost=%-*.*g *number_accepted=%d *number_generated=%d\n",
G_FIELD, G_PRECISION, best_generated_state->cost,
#endif /* INT_LONG */
*number_accepted, *number_generated);
#if ASA_PRINT_MORE
if (best_flag == 1) {
fprintf (ptr_asa_out, "\nnew best\n");
}
#endif /* ASA_PRINT_MORE */
#if ASA_PARALLEL
/* print OPTIONS->Gener_Block just used */
fprintf (ptr_asa_out,
#if INT_LONG
"OPTIONS->Gener_Block = %ld\n",
#else
"OPTIONS->Gener_Block = %d\n",
#endif /* INT_LONG */
OPTIONS->Gener_Block);
#endif /* ASA_PARALLEL */
if (best_flag == 1) {
#if ASA_PRINT_MORE
#if INT_ALLOC
fprintf (ptr_asa_out, "Present Random Seed = %d\n", *seed);
#else
#if INT_LONG
fprintf (ptr_asa_out, "Present Random Seed = %ld\n", *seed);
#else
fprintf (ptr_asa_out, "Present Random Seed = %d\n", *seed);
#endif
#endif
print_state (parameter_minimum,
parameter_maximum,
tangents,
curvature,
current_cost_temperature,
current_user_parameter_temp,
accepted_to_generated_ratio,
number_parameters,
curvature_flag,
number_accepted,
index_cost_acceptances,
number_generated,
number_invalid_generated_states,
last_saved_state,
best_generated_state, ptr_asa_out, OPTIONS);
#endif /* ASA_PRINT_MORE */
}
fflush (ptr_asa_out);
#endif /* ASA_PRINT */
}
#if ASA_PARALLEL
}
#endif /* ASA_PARALLEL */
#if ASA_SAVE
/* These writes are put here with these tests, instead of just
after a new best state is found, to prevent any confusion with
any parallel code that might be added by users. */
if (*recent_number_acceptances == 0
&& *recent_number_generated == 0
&& *best_number_generated_saved == *number_generated
&& *best_number_accepted_saved == *number_accepted
&& OPTIONS->Asa_Recursive_Level == asa_recursive_max
&& index_cost_repeat == 0) {
if (OPTIONS->Asa_Recursive_Level > 0)
sprintf (asa_save_comm, "asa_save_%d", OPTIONS->Asa_Recursive_Level);
else
sprintf (asa_save_comm, "asa_save");
ptr_save = fopen (asa_save_comm, "w");
fwrite (number_parameters, sizeof (ALLOC_INT), 1, ptr_save);
fwrite (xnumber_parameters, sizeof (double), 1, ptr_save);
fwrite (parameter_minimum, sizeof (double), *number_parameters,
ptr_save);
fwrite (parameter_maximum, sizeof (double), *number_parameters,
ptr_save);
fwrite (tangents, sizeof (double), *number_parameters, ptr_save);
fwrite (current_user_parameter_temp, sizeof (double),
*number_parameters, ptr_save);
fwrite (initial_user_parameter_temp, sizeof (double),
*number_parameters, ptr_save);
fwrite (temperature_scale_parameters, sizeof (double),
*number_parameters, ptr_save);
fwrite (parameter_type, sizeof (int), *number_parameters, ptr_save);
fwrite (&index_cost_repeat, sizeof (int), 1, ptr_save);
fwrite (&asa_open, sizeof (int), 1, ptr_save);
fwrite (&number_asa_open, sizeof (int), 1, ptr_save);
fwrite (&recursive_asa_open, sizeof (int), 1, ptr_save);
fwrite (current_cost_temperature, sizeof (double), 1, ptr_save);
fwrite (initial_cost_temperature, sizeof (double), 1, ptr_save);
fwrite (temperature_scale_cost, sizeof (double), 1, ptr_save);
fwrite (accepted_to_generated_ratio, sizeof (double), 1, ptr_save);
fwrite (curvature_flag, sizeof (int), 1, ptr_save);
fwrite (seed, sizeof (LONG_INT), 1, ptr_save);
fwrite (number_generated, sizeof (LONG_INT), 1, ptr_save);
fwrite (number_accepted, sizeof (LONG_INT), 1, ptr_save);
fwrite (number_acceptances_saved, sizeof (LONG_INT), 1, ptr_save);
fwrite (recent_number_acceptances, sizeof (LONG_INT), 1, ptr_save);
fwrite (recent_number_generated, sizeof (LONG_INT), 1, ptr_save);
fwrite (number_invalid_generated_states, sizeof (LONG_INT), 1,
ptr_save);
fwrite (index_cost_acceptances, sizeof (LONG_INT), 1, ptr_save);
fwrite (best_number_generated_saved, sizeof (LONG_INT), 1, ptr_save);
fwrite (best_number_accepted_saved, sizeof (LONG_INT), 1, ptr_save);
fwrite (index_parameter_generations, sizeof (LONG_INT),
*number_parameters, ptr_save);
fwrite (current_generated_state->parameter,
sizeof (double), *number_parameters, ptr_save);
fwrite (last_saved_state->parameter,
sizeof (double), *number_parameters, ptr_save);
fwrite (best_generated_state->parameter,
sizeof (double), *number_parameters, ptr_save);
fwrite (&(current_generated_state->cost), sizeof (double), 1, ptr_save);
fwrite (&(last_saved_state->cost), sizeof (double), 1, ptr_save);
fwrite (&(best_generated_state->cost), sizeof (double), 1, ptr_save);
fwrite (&(OPTIONS->Limit_Acceptances), sizeof (LONG_INT), 1, ptr_save);
fwrite (&(OPTIONS->Limit_Generated), sizeof (LONG_INT), 1, ptr_save);
fwrite (&(OPTIONS->Limit_Invalid_Generated_States), sizeof (int),
1, ptr_save);
fwrite (&(OPTIONS->Accepted_To_Generated_Ratio), sizeof (double),
1, ptr_save);
fwrite (&(OPTIONS->Cost_Precision), sizeof (double), 1, ptr_save);
fwrite (&(OPTIONS->Maximum_Cost_Repeat), sizeof (int), 1, ptr_save);
fwrite (&(OPTIONS->Number_Cost_Samples), sizeof (int), 1, ptr_save);
fwrite (&(OPTIONS->Temperature_Ratio_Scale), sizeof (double),
1, ptr_save);
fwrite (&(OPTIONS->Cost_Parameter_Scale_Ratio), sizeof (double),
1, ptr_save);
fwrite (&(OPTIONS->Temperature_Anneal_Scale), sizeof (double),
1, ptr_save);
fwrite (&(OPTIONS->Include_Integer_Parameters), sizeof (int),
1, ptr_save);
fwrite (&(OPTIONS->User_Initial_Parameters), sizeof (int), 1, ptr_save);
fwrite (&(OPTIONS->Sequential_Parameters), sizeof (ALLOC_INT), 1,
ptr_save);
fwrite (&(OPTIONS->Initial_Parameter_Temperature), sizeof (double), 1,
ptr_save);
fwrite (&(OPTIONS->Acceptance_Frequency_Modulus), sizeof (int), 1,
ptr_save);
fwrite (&(OPTIONS->Generated_Frequency_Modulus), sizeof (int), 1,
ptr_save);
fwrite (&(OPTIONS->Reanneal_Cost), sizeof (int), 1, ptr_save);
fwrite (&(OPTIONS->Reanneal_Parameters), sizeof (int), 1, ptr_save);
fwrite (&(OPTIONS->Delta_X), sizeof (double), 1, ptr_save);
fwrite (&(OPTIONS->User_Tangents), sizeof (int), 1, ptr_save);
#if USER_INITIAL_COST_TEMP
fwrite (&(OPTIONS->User_Cost_Temperature), sizeof (double), 1,
ptr_save);
#endif
#if RATIO_TEMPERATURE_SCALES
fwrite (OPTIONS->User_Temperature_Ratio, sizeof (double),
*number_parameters, ptr_save);
#endif
#if USER_INITIAL_PARAMETERS_TEMPS
fwrite (OPTIONS->User_Parameter_Temperature, sizeof (double),
*number_parameters, ptr_save);
#endif
#if DELTA_PARAMETERS
fwrite (OPTIONS->User_Delta_Parameter, sizeof (double),
*number_parameters, ptr_save);
#endif
#if QUENCH_PARAMETERS
fwrite (OPTIONS->User_Quench_Param_Scale, sizeof (double),
*number_parameters, ptr_save);
#endif
#if QUENCH_COST
fwrite (OPTIONS->User_Quench_Cost_Scale, sizeof (double), 1, ptr_save);
#endif
fwrite (&(OPTIONS->N_Accepted), sizeof (LONG_INT), 1, ptr_save);
fwrite (&(OPTIONS->N_Generated), sizeof (LONG_INT), 1, ptr_save);
fwrite (&(OPTIONS->Locate_Cost), sizeof (int), 1, ptr_save);
fwrite (&(OPTIONS->Immediate_Exit), sizeof (int), 1, ptr_save);
#if OPTIONAL_DATA_DBL
fwrite (&(OPTIONS->Asa_Data_Dim_Dbl), sizeof (ALLOC_INT), 1, ptr_save);
fwrite (OPTIONS->Asa_Data_Dbl, sizeof (double),
OPTIONS->Asa_Data_Dim_Dbl, ptr_save);
#endif
fwrite (&(OPTIONS->Random_Array_Dim), sizeof (ALLOC_INT), 1, ptr_save);
fwrite (OPTIONS->Random_Array, sizeof (double),
OPTIONS->Random_Array_Dim, ptr_save);
fwrite (&(OPTIONS->Asa_Recursive_Level), sizeof (int), 1, ptr_save);
#if OPTIONAL_DATA_INT
fwrite (&(OPTIONS->Asa_Data_Dim_Int), sizeof (ALLOC_INT), 1, ptr_save);
fwrite (OPTIONS->Asa_Data_Int, sizeof (LONG_INT),
OPTIONS->Asa_Data_Dim_Int, ptr_save);
#endif
#if OPTIONAL_DATA_PTR
fwrite (&(OPTIONS->Asa_Data_Dim_Ptr), sizeof (ALLOC_INT), 1, ptr_save);
if (OPTIONS->Asa_Recursive_Level == 0)
fwrite (OPTIONS->Asa_Data_Ptr, sizeof (OPTIONAL_PTR_TYPE),
OPTIONS->Asa_Data_Dim_Ptr, ptr_save);
#if ASA_TEMPLATE_SELFOPT
if (OPTIONS->Asa_Recursive_Level == 1)
fwrite (OPTIONS->Asa_Data_Ptr, sizeof (RECUR_OPTIONAL_PTR_TYPE),
OPTIONS->Asa_Data_Dim_Ptr, ptr_save);
#endif
#endif
#if USER_ASA_OUT
fwrite (OPTIONS->Asa_Out_File, sizeof (char), 80, ptr_save);
#endif
#if USER_ASA_OUT
fwrite (OPTIONS->Asa_Usr_Out_File, sizeof (char), 80, ptr_save);
#endif
#if USER_COST_SCHEDULE
fwrite (&(OPTIONS->Cost_Schedule), sizeof (char), 1, ptr_save);
#endif
#if USER_ACCEPT_ASYMP_EXP
fwrite (&(OPTIONS->Asymp_Exp_Param), sizeof (double), 1, ptr_save);
#endif
#if USER_ACCEPTANCE_TEST
fwrite (&(OPTIONS->Acceptance_Test), sizeof (char), 1, ptr_save);
fwrite (&(OPTIONS->User_Acceptance_Flag), sizeof (int), 1, ptr_save);
fwrite (&(OPTIONS->Cost_Acceptance_Flag), sizeof (int), 1, ptr_save);
fwrite (&(OPTIONS->Cost_Temp_Curr), sizeof (double), 1, ptr_save);
fwrite (&(OPTIONS->Cost_Temp_Init), sizeof (double), 1, ptr_save);
fwrite (&(OPTIONS->Cost_Temp_Scale), sizeof (double), 1, ptr_save);
#endif
#if USER_GENERATING_FUNCTION
fwrite (&(OPTIONS->Generating_Distrib), sizeof (char), 1, ptr_save);
#endif
#if USER_REANNEAL_COST
fwrite (&(OPTIONS->Reanneal_Cost_Function), sizeof (char), 1, ptr_save);
#endif
#if USER_REANNEAL_PARAMETERS
fwrite (&(OPTIONS->Reanneal_Params_Function), sizeof (char), 1,
ptr_save);
#endif
#if ASA_SAMPLE
fwrite (&(OPTIONS->Bias_Acceptance), sizeof (double), 1, ptr_save);
fwrite (OPTIONS->Bias_Generated, sizeof (double),
*number_parameters, ptr_save);
fwrite (&(OPTIONS->Average_Weights), sizeof (double), 1, ptr_save);
fwrite (&(OPTIONS->Limit_Weights), sizeof (double), 1, ptr_save);
#endif
#if ASA_QUEUE
fwrite (&save_queue, sizeof (LONG_INT), 1, ptr_save);
fwrite (&save_queue_indx, sizeof (LONG_INT), 1, ptr_save);
fwrite (&(OPTIONS->Queue_Size), sizeof (ALLOC_INT), 1, ptr_save);
fwrite (save_queue_flag, sizeof (int), save_queue, ptr_save);
fwrite (save_queue_cost, sizeof (double), save_queue, ptr_save);
fwrite (save_queue_param, sizeof (double),
(*number_parameters) * (OPTIONS->Queue_Size), ptr_save);
#if ASA_RESOLUTION
#else
fwrite (OPTIONS->Queue_Resolution, sizeof (double),
*number_parameters, ptr_save);
#endif
#endif /* ASA_QUEUE */
#if ASA_RESOLUTION
fwrite (OPTIONS->Coarse_Resolution, sizeof (double),
*number_parameters, ptr_save);
#endif
#if ASA_FUZZY
fwrite (&(OPTIONS->NoOfSamples), sizeof (int), 1, ptr_save);
fwrite (&(OPTIONS->ThresholdDeviation), sizeof (double), 1, ptr_save);
fwrite (&(OPTIONS->Performance_Target), sizeof (double), 1, ptr_save);
fwrite (&(OPTIONS->Factor_a), sizeof (double), 1, ptr_save);
#endif
#if FITLOC
fwrite (&(OPTIONS->Fit_Local), sizeof (int), 1, ptr_save);
fwrite (&(OPTIONS->Iter_Max), sizeof (int), 1, ptr_save);
fwrite (&(OPTIONS->Penalty), sizeof (double), 1, ptr_save);
#endif
#if MULTI_MIN
fwrite (OPTIONS->Multi_Number, sizeof (int), 1, ptr_save);
fwrite (OPTIONS->Multi_Grid,
sizeof (double), *number_parameters, ptr_save);
fwrite (&(OPTIONS->Multi_Specify), sizeof (int), 1, ptr_save);
for (multi_index = 0; multi_index < OPTIONS->Multi_Number;
++multi_index) {
fwrite (&(OPTIONS->Multi_Cost[multi_index]), sizeof (double), 1,
ptr_save);
fwrite (&(OPTIONS->Multi_Params[multi_index]), sizeof (double),
*number_parameters, ptr_save);
}
#endif
#if ASA_PARALLEL
for (i_prll = 0; i_prll < OPTIONS->Gener_Block_Max; ++i_prll) {
fwrite (gener_block_state[i_prll].parameter,
sizeof (double), *number_parameters, ptr_save);
fwrite (&(gener_block_state[i_prll].cost),
sizeof (double), 1, ptr_save);
#if USER_ACCEPTANCE_TEST
fwrite (&
(gener_block_state[i_prll].par_user_accept_flag),
sizeof (int), 1, ptr_save);
fwrite (&(gener_block_state[i_prll].par_cost_accept_flag),
sizeof (int), 1, ptr_save);
#endif
}
fwrite (&(OPTIONS->Gener_Mov_Avr), sizeof (int), 1, ptr_save);
fwrite (&(OPTIONS->Gener_Block), sizeof (LONG_INT), 1, ptr_save);
fwrite (&(OPTIONS->Gener_Block_Max), sizeof (LONG_INT), 1, ptr_save);
#endif /* ASA_PARALLEL */
fclose (ptr_save);
SAVED_asa:
;
#if SYSTEM_CALL
#if ASA_SAVE_BACKUP
#if INT_LONG
if (OPTIONS->Asa_Recursive_Level > 0)
sprintf (asa_save_comm, "/bin/cp asa_save_%d asa_save_%d.%ld",
OPTIONS->Asa_Recursive_Level,
OPTIONS->Asa_Recursive_Level, OPTIONS->N_Accepted);
else
sprintf (asa_save_comm, "/bin/cp asa_save asa_save.%ld",
OPTIONS->N_Accepted);
#else
if (OPTIONS->Asa_Recursive_Level > 0)
sprintf (asa_save_comm, "/bin/cp asa_save_%d asa_save_%d.%d",
OPTIONS->Asa_Recursive_Level,
OPTIONS->Asa_Recursive_Level, OPTIONS->N_Accepted);
else
sprintf (asa_save_comm, "/bin/cp asa_save asa_save.%d",
OPTIONS->N_Accepted);
#endif
ptr_comm = popen (asa_save_comm, "r");
pclose (ptr_comm);
#else /* ASA_SAVE_BACKUP */
/* extra protection in case run aborts during write */
if (OPTIONS->Asa_Recursive_Level > 0)
sprintf (asa_save_comm, "/bin/cp asa_save_%d asa_save_%d.old",
OPTIONS->Asa_Recursive_Level, OPTIONS->Asa_Recursive_Level);
else
sprintf (asa_save_comm, "/bin/cp asa_save asa_save.old");
ptr_comm = popen (asa_save_comm, "r");
pclose (ptr_comm);
#endif /* ASA_SAVE_BACKUP */
#endif /* SYSTEM_CALL */
}
#endif /* ASA_SAVE */
if (OPTIONS->Immediate_Exit == TRUE) {
*exit_status = IMMEDIATE_EXIT;
goto EXIT_asa;
}
/* PERIODIC TESTING/REANNEALING/PRINTING SECTION */
if (OPTIONS->Acceptance_Frequency_Modulus == 0)
tmp_var_int1 = FALSE;
else if ((int) (*number_accepted %
((LONG_INT) OPTIONS->Acceptance_Frequency_Modulus)) == 0
&& *number_acceptances_saved == *number_accepted)
tmp_var_int1 = TRUE;
else
tmp_var_int1 = FALSE;
if (OPTIONS->Generated_Frequency_Modulus == 0)
tmp_var_int2 = FALSE;
else if ((int) (*number_generated %
((LONG_INT) OPTIONS->Generated_Frequency_Modulus)) == 0)
tmp_var_int2 = TRUE;
else
tmp_var_int2 = FALSE;
if (tmp_var_int1 == TRUE || tmp_var_int2 == TRUE
|| (*accepted_to_generated_ratio
< OPTIONS->Accepted_To_Generated_Ratio)) {
#if ASA_PARALLEL
if (OPTIONS->Gener_Mov_Avr > 0) {
for (i_prll = 1; i_prll < OPTIONS->Gener_Mov_Avr; ++i_prll) {
parallel_gen_ratio_block[i_prll - 1] =
parallel_gen_ratio_block[i_prll];
}
parallel_gen_ratio_block[OPTIONS->Gener_Mov_Avr - 1] =
*recent_number_generated;
tmp_var_lint = 0;
for (i_prll = 0; i_prll < OPTIONS->Gener_Mov_Avr; ++i_prll) {
tmp_var_lint += parallel_gen_ratio_block[i_prll];
}
OPTIONS->Gener_Block = (LONG_INT)
((double) tmp_var_lint / (double) (OPTIONS->Gener_Mov_Avr));
OPTIONS->Gener_Block =
MIN (OPTIONS->Gener_Block, OPTIONS->Gener_Block_Max);
}
#endif /* ASA_PARALLEL */
if (*accepted_to_generated_ratio <
(OPTIONS->Accepted_To_Generated_Ratio))
*recent_number_acceptances = *recent_number_generated = 0;
/* if best.cost repeats OPTIONS->Maximum_Cost_Repeat then exit */
if (OPTIONS->Maximum_Cost_Repeat != 0) {
if (fabs (last_saved_state->cost - best_generated_state->cost)
< OPTIONS->Cost_Precision) {
++index_cost_repeat;
if (index_cost_repeat == (OPTIONS->Maximum_Cost_Repeat)) {
*exit_status = COST_REPEATING;
goto EXIT_asa;
}
} else {
index_cost_repeat = 0;
}
}
if (OPTIONS->Reanneal_Parameters == TRUE) {
OPTIONS->Locate_Cost = 3; /* reanneal parameters */
/* calculate tangents, not curvatures, to reanneal */
*curvature_flag = FALSE;
cost_derivatives (user_cost_function,
parameter_minimum,
parameter_maximum,
tangents,
curvature,
maximum_tangent,
number_parameters,
parameter_type,
exit_status,
curvature_flag,
valid_state_generated_flag,
number_invalid_generated_states,
current_generated_state,
best_generated_state, ptr_asa_out, OPTIONS);
if (*exit_status == INVALID_COST_FUNCTION_DERIV) {
goto EXIT_asa;
}
}
#if USER_REANNEAL_COST
#else
if (OPTIONS->Reanneal_Cost == 0 || OPTIONS->Reanneal_Cost == 1) {
;
} else {
immediate_flag = OPTIONS->Immediate_Exit;
if (OPTIONS->Reanneal_Cost < -1) {
tmp_var_int = -OPTIONS->Reanneal_Cost;
} else {
tmp_var_int = OPTIONS->Reanneal_Cost;
}
tmp_var_db1 = ZERO;
tmp_var_db2 = ZERO;
for (index_cost_constraint = 0;
index_cost_constraint < tmp_var_int; ++index_cost_constraint) {
OPTIONS->Locate_Cost = 4; /* reanneal cost */
*number_invalid_generated_states = 0;
repeated_invalid_states = 0;
OPTIONS->Sequential_Parameters = *start_sequence - 1;
do {
#if ASA_EXIT_ANYTIME
if ((ptr_exit_anytime = fopen ("asa_exit_anytime", "r")) == NULL) {
*exit_status = IMMEDIATE_EXIT;
goto EXIT_asa;
} else {
fclose (ptr_exit_anytime);
}
#endif /* ASA_EXIT_ANYTIME */
++(*number_invalid_generated_states);
generate_flg = generate_new_state (user_random_generator,
seed,
parameter_minimum,
parameter_maximum,
current_user_parameter_temp,
#if USER_GENERATING_FUNCTION
initial_user_parameter_temp,
temperature_scale_parameters,
#endif
number_parameters,
parameter_type,
current_generated_state,
last_saved_state, OPTIONS);
*valid_state_generated_flag = TRUE;
#if USER_ACCEPTANCE_TEST
OPTIONS->User_Acceptance_Flag = TRUE;
OPTIONS->Cost_Acceptance_Flag = FALSE;
#endif
#if ASA_QUEUE
if (OPTIONS->Queue_Size == 0) {
queue_new = 1;
} else {
queue_new = 1;
for (queue = 0; queue < save_queue; ++queue) {
save_queue_test = 0;
VFOR (index_v) {
if (PARAMETER_RANGE_TOO_SMALL (index_v)) {
++save_queue_test;
} else {
queue_v =
index_v + queue * (LONG_INT) (*number_parameters);
if (
#if ASA_RESOLUTION
/* Coarse_Resolution used in current_generated_state */
fabs (current_generated_state->parameter[index_v] -
save_queue_param[queue_v]) < EPS_DOUBLE
#else
fabs (current_generated_state->parameter[index_v] -
save_queue_param[queue_v]) <
(OPTIONS->Queue_Resolution[index_v] + EPS_DOUBLE)
#endif /* ASA_RESOLUTION */
) {
++save_queue_test;
}
}
}
if (save_queue_test == *number_parameters) {
tmp_var_db = save_queue_cost[queue];
queue_new = 0;
*valid_state_generated_flag = save_queue_flag[queue];
if (*valid_state_generated_flag == FALSE) {
#if ASA_PRINT_MORE
#if INT_LONG
fprintf (ptr_asa_out,
"ASA_QUEUE: %ld \t previous invalid state",
OPTIONS->N_Generated);
#else
fprintf (ptr_asa_out,
"ASA_QUEUE: %d \t previous invalid state",
OPTIONS->N_Generated);
#endif
#endif /* ASA_PRINT_MORE */
} else {
#if ASA_PRINT_MORE
#if INT_LONG
fprintf (ptr_asa_out, "ASA_QUEUE: %ld \t %*.*g\n",
OPTIONS->N_Generated,
G_FIELD, G_PRECISION, tmp_var_db);
#else
fprintf (ptr_asa_out, "ASA_QUEUE: %d \t %*.*g\n",
OPTIONS->N_Generated,
G_FIELD, G_PRECISION, tmp_var_db);
#endif
#endif /* ASA_PRINT_MORE */
}
break;
}
}
}
if (queue_new == 1) {
tmp_var_db =
user_cost_function (current_generated_state->parameter,
parameter_minimum, parameter_maximum,
tangents, curvature, number_parameters,
parameter_type,
valid_state_generated_flag, exit_status,
OPTIONS);
if (cost_function_test
(tmp_var_db, current_generated_state->parameter,
parameter_minimum, parameter_maximum, number_parameters,
xnumber_parameters) == 0) {
*exit_status = INVALID_COST_FUNCTION;
goto EXIT_asa;
}
if (OPTIONS->Queue_Size > 0) {
VFOR (index_v) {
if (PARAMETER_RANGE_TOO_SMALL (index_v)) {
continue;
}
queue_v = index_v + save_queue
* (LONG_INT) (*number_parameters);
save_queue_param[queue_v] =
current_generated_state->parameter[index_v];
}
save_queue_cost[save_queue] = tmp_var_db;
save_queue_flag[save_queue]
= *valid_state_generated_flag;
++save_queue;
if (save_queue == (LONG_INT) OPTIONS->Queue_Size)
--save_queue;
++save_queue_indx;
if (save_queue_indx == (LONG_INT) OPTIONS->Queue_Size)
save_queue_indx = 0;
}
}
#else /* ASA_QUEUE */
tmp_var_db =
user_cost_function (current_generated_state->parameter,
parameter_minimum, parameter_maximum,
tangents, curvature, number_parameters,
parameter_type, valid_state_generated_flag,
exit_status, OPTIONS);
if (cost_function_test
(tmp_var_db, current_generated_state->parameter,
parameter_minimum, parameter_maximum, number_parameters,
xnumber_parameters) == 0) {
*exit_status = INVALID_COST_FUNCTION;
goto EXIT_asa;
}
#endif /* ASA_QUEUE */
++repeated_invalid_states;
if (repeated_invalid_states >
OPTIONS->Limit_Invalid_Generated_States) {
*exit_status = TOO_MANY_INVALID_STATES;
goto EXIT_asa;
}
}
while (*valid_state_generated_flag == FALSE);
--(*number_invalid_generated_states);
tmp_var_db1 += tmp_var_db;
tmp_var_db2 += (tmp_var_db * tmp_var_db);
}
tmp_var_db1 /= (double) tmp_var_int;
tmp_var_db2 /= (double) tmp_var_int;
tmp_var_db =
sqrt (fabs
((tmp_var_db2 -
tmp_var_db1 * tmp_var_db1) * ((double) tmp_var_int /
((double) tmp_var_int -
ONE))));
if (OPTIONS->Reanneal_Cost < -1) {
*current_cost_temperature = *initial_cost_temperature =
tmp_var_db + (double) EPS_DOUBLE;
} else {
*initial_cost_temperature = tmp_var_db + (double) EPS_DOUBLE;
}
OPTIONS->Immediate_Exit = immediate_flag;
}
#endif /* USER_REANNEAL_COST */
reanneal (parameter_minimum,
parameter_maximum,
tangents,
maximum_tangent,
current_cost_temperature,
initial_cost_temperature,
temperature_scale_cost,
current_user_parameter_temp,
initial_user_parameter_temp,
temperature_scale_parameters,
number_parameters,
parameter_type,
index_cost_acceptances,
index_parameter_generations,
last_saved_state, best_generated_state, OPTIONS);
#if ASA_PRINT_INTERMED
#if ASA_PRINT
print_state (parameter_minimum,
parameter_maximum,
tangents,
curvature,
current_cost_temperature,
current_user_parameter_temp,
accepted_to_generated_ratio,
number_parameters,
curvature_flag,
number_accepted,
index_cost_acceptances,
number_generated,
number_invalid_generated_states,
last_saved_state,
best_generated_state, ptr_asa_out, OPTIONS);
fprintf (ptr_asa_out, "\n");
fflush (ptr_asa_out);
#endif
#endif
}
}
/* FINISHED ANNEALING and MINIMIZATION */
*exit_status = NORMAL_EXIT;
EXIT_asa:
asa_exit_value = asa_exit (user_cost_function,
&final_cost,
parameter_initial_final,
parameter_minimum,
parameter_maximum,
tangents,
curvature,
maximum_tangent,
current_cost_temperature,
initial_user_parameter_temp,
current_user_parameter_temp,
accepted_to_generated_ratio,
number_parameters,
parameter_type,
valid_state_generated_flag,
exit_status,
index_exit_v,
start_sequence,
number_accepted,
best_number_accepted_saved,
index_cost_acceptances,
number_generated,
number_invalid_generated_states,
index_parameter_generations,
best_number_generated_saved,
current_generated_state,
last_saved_state,
best_generated_state, ptr_asa_out, OPTIONS);
if (asa_exit_value == 9) {
*exit_status = CALLOC_FAILED;
return (-1);
}
free (curvature_flag);
free (maximum_tangent);
free (accepted_to_generated_ratio);
free (temperature_scale_cost);
free (current_cost_temperature);
free (initial_cost_temperature);
free (number_generated);
free (best_number_generated_saved);
free (recent_number_generated);
free (number_accepted);
free (recent_number_acceptances);
free (index_cost_acceptances);
free (number_acceptances_saved);
free (best_number_accepted_saved);
free (number_invalid_generated_states);
free (current_generated_state->parameter);
free (last_saved_state->parameter);
free (best_generated_state->parameter);
free (current_generated_state);
free (last_saved_state);
free (best_generated_state);
#if ASA_QUEUE
free (save_queue_flag);
free (save_queue_cost);
free (save_queue_param);
#endif /* ASA_QUEUE */
#if MULTI_MIN
for (multi_index = 0; multi_index <= OPTIONS->Multi_Number; ++multi_index)
free (multi_params[multi_index]);
free (multi_params);
free (multi_sort);
free (multi_cost);
#endif
#if ASA_PARALLEL
for (i_prll = 0; i_prll < OPTIONS->Gener_Block_Max; ++i_prll) {
free (gener_block_state[i_prll].parameter);
}
free (gener_block_state);
free (parallel_sort);
free (parallel_gen_ratio_block);
free (generate_flg_par);
free (number_invalid_generated_states_par);
free (repeated_invalid_states_par);
free (tmp_var_db1_par);
free (tmp_var_db_par);
free (valid_state_generated_flag_par);
#if ASA_QUEUE
free (queue_par_cost);
for (i_prll = 0; i_prll < OPTIONS->Gener_Block_Max; ++i_prll) {
free (save_queue_param_par[i_prll]);
free (save_queue_valid_state_flag_par[i_prll]);
free (save_queue_cost_par[i_prll]);
}
free (save_queue_valid_state_flag_par);
free (save_queue_cost_par);
free (save_queue_param_par);
free (queue_new_par);
free (queue_v_par);
free (save_queue_indx_par);
free (save_queue_test_par);
free (save_queue_par);
#endif /* ASA_QUEUE */
#endif /* ASA_PARALLEL */
#if ASA_PIPE_FILE
fclose (ptr_asa_pipe);
#endif
free (initial_user_parameter_temp);
free (index_exit_v);
free (start_sequence);
free (index_parameter_generations);
free (current_user_parameter_temp);
free (temperature_scale_parameters);
if (recursive_asa_open == 0)
asa_open = FALSE;
return (final_cost);
}
/***********************************************************************
* asa_exit
* This procedures copies the best parameters and cost into
* final_cost and parameter_initial_final
***********************************************************************/
#if HAVE_ANSI
int
asa_exit (double (*user_cost_function)
(double *, double *, double *, double *, double *, ALLOC_INT *,
int *, int *, int *, USER_DEFINES *), double *final_cost,
double *parameter_initial_final, double *parameter_minimum,
double *parameter_maximum, double *tangents, double *curvature,
double *maximum_tangent, double *current_cost_temperature,
double *initial_user_parameter_temp,
double *current_user_parameter_temp,
double *accepted_to_generated_ratio,
ALLOC_INT * number_parameters, int *parameter_type,
int *valid_state_generated_flag, int *exit_status,
ALLOC_INT * index_exit_v, ALLOC_INT * start_sequence,
LONG_INT * number_accepted,
LONG_INT * best_number_accepted_saved,
LONG_INT * index_cost_acceptances, LONG_INT * number_generated,
LONG_INT * number_invalid_generated_states,
LONG_INT * index_parameter_generations,
LONG_INT * best_number_generated_saved,
STATE * current_generated_state, STATE * last_saved_state,
STATE * best_generated_state, FILE * ptr_asa_out,
USER_DEFINES * OPTIONS)
#else
int
asa_exit (user_cost_function,
final_cost,
parameter_initial_final,
parameter_minimum,
parameter_maximum,
tangents,
curvature,
maximum_tangent,
current_cost_temperature,
initial_user_parameter_temp,
current_user_parameter_temp,
accepted_to_generated_ratio,
number_parameters,
parameter_type,
valid_state_generated_flag,
exit_status,
index_exit_v,
start_sequence,
number_accepted,
best_number_accepted_saved,
index_cost_acceptances,
number_generated,
number_invalid_generated_states,
index_parameter_generations,
best_number_generated_saved,
current_generated_state,
last_saved_state, best_generated_state, ptr_asa_out, OPTIONS)
double (*user_cost_function) ();
double *final_cost;
double *parameter_initial_final;
double *parameter_minimum;
double *parameter_maximum;
double *tangents;
double *curvature;
double *maximum_tangent;
double *current_cost_temperature;
double *initial_user_parameter_temp;
double *current_user_parameter_temp;
double *accepted_to_generated_ratio;
ALLOC_INT *number_parameters;
int *parameter_type;
int *valid_state_generated_flag;
int *exit_status;
ALLOC_INT *index_exit_v;
ALLOC_INT *start_sequence;
LONG_INT *number_accepted;
LONG_INT *best_number_accepted_saved;
LONG_INT *index_cost_acceptances;
LONG_INT *number_generated;
LONG_INT *number_invalid_generated_states;
LONG_INT *index_parameter_generations;
LONG_INT *best_number_generated_saved;
STATE *current_generated_state;
STATE *last_saved_state;
STATE *best_generated_state;
FILE *ptr_asa_out;
USER_DEFINES *OPTIONS;
#endif
{
ALLOC_INT index_v; /* iteration index */
int curvatureFlag;
int exit_exit_status, tmp_locate;
#if MULTI_MIN
int multi_index;
#endif
tmp_locate = OPTIONS->Locate_Cost;
if (tmp_locate) { /* stop compiler warning */
;
}
exit_exit_status = 0;
/* return final function minimum and associated parameters */
*final_cost = best_generated_state->cost;
VFOR (index_v) {
parameter_initial_final[index_v] =
best_generated_state->parameter[index_v];
}
OPTIONS->N_Accepted = *best_number_accepted_saved;
OPTIONS->N_Generated = *best_number_generated_saved;
#if MULTI_MIN
for (multi_index = OPTIONS->Multi_Number - 1; multi_index >= 0;
--multi_index) {
best_generated_state->cost = OPTIONS->Multi_Cost[multi_index];
VFOR (index_v) {
best_generated_state->parameter[index_v] =
OPTIONS->Multi_Params[multi_index][index_v];
}
#if ASA_PRINT
fprintf (ptr_asa_out, "\n\t\t multi_index = %d\n", multi_index);
#endif /* ASA_PRINT */
#endif /* MULTI_MIN */
if (*exit_status != TOO_MANY_INVALID_STATES
&& *exit_status != IMMEDIATE_EXIT
&& *exit_status != INVALID_USER_INPUT
&& *exit_status != INVALID_COST_FUNCTION
&& *exit_status != INVALID_COST_FUNCTION_DERIV) {
if (OPTIONS->Curvature_0 != TRUE)
OPTIONS->Locate_Cost = 5; /* calc curvatures while exiting asa */
/* calculate curvatures and tangents at best point */
curvatureFlag = TRUE;
cost_derivatives (user_cost_function,
parameter_minimum,
parameter_maximum,
tangents,
curvature,
maximum_tangent,
number_parameters,
parameter_type,
&exit_exit_status,
&curvatureFlag,
valid_state_generated_flag,
number_invalid_generated_states,
current_generated_state,
best_generated_state, ptr_asa_out, OPTIONS);
}
#if ASA_PRINT
if (exit_exit_status == INVALID_COST_FUNCTION_DERIV)
fprintf (ptr_asa_out, "\n\n in asa_exit: INVALID_COST_FUNCTION_DERIV");
if (*exit_status != INVALID_USER_INPUT
&& *exit_status != INVALID_COST_FUNCTION
&& *exit_status != INVALID_COST_FUNCTION_DERIV)
print_state (parameter_minimum,
parameter_maximum,
tangents,
curvature,
current_cost_temperature,
current_user_parameter_temp,
accepted_to_generated_ratio,
number_parameters,
&curvatureFlag,
number_accepted,
index_cost_acceptances,
number_generated,
number_invalid_generated_states,
last_saved_state,
best_generated_state, ptr_asa_out, OPTIONS);
#endif /* ASA_PRINT */
#if MULTI_MIN
}
best_generated_state->cost = OPTIONS->Multi_Cost[0];
VFOR (index_v) {
best_generated_state->parameter[index_v] =
OPTIONS->Multi_Params[0][index_v];
}
#endif /* MULTI_MIN */
#if ASA_PRINT
switch (*exit_status) {
case NORMAL_EXIT:
fprintf (ptr_asa_out,
"\n\n NORMAL_EXIT exit_status = %d\n", *exit_status);
break;
case P_TEMP_TOO_SMALL:
fprintf (ptr_asa_out,
"\n\n P_TEMP_TOO_SMALL exit_status = %d\n", *exit_status);
fprintf (ptr_asa_out,
#if INT_ALLOC
"current_user_parameter_temp[%d] too small = %*.*g\n",
#else
#if INT_LONG
"current_user_parameter_temp[%ld] too small = %*.*g\n",
#else
"current_user_parameter_temp[%d] too small = %*.*g\n",
#endif
#endif
*index_exit_v,
G_FIELD, G_PRECISION,
current_user_parameter_temp[*index_exit_v]);
break;
case C_TEMP_TOO_SMALL:
fprintf (ptr_asa_out,
"\n\n C_TEMP_TOO_SMALL exit_status = %d\n", *exit_status);
fprintf (ptr_asa_out,
"*current_cost_temperature too small = %*.*g\n",
G_FIELD, G_PRECISION, *current_cost_temperature);
break;
case COST_REPEATING:
fprintf (ptr_asa_out,
"\n\n COST_REPEATING exit_status = %d\n", *exit_status);
break;
case TOO_MANY_INVALID_STATES:
fprintf (ptr_asa_out,
"\n\n TOO_MANY_INVALID_STATES exit_status = %d\n",
*exit_status);
break;
case IMMEDIATE_EXIT:
fprintf (ptr_asa_out,
"\n\n IMMEDIATE_EXIT exit_status = %d\n", *exit_status);
break;
case INVALID_USER_INPUT:
fprintf (ptr_asa_out,
"\n\n INVALID_USER_INPUT exit_status = %d\n", *exit_status);
break;
case INVALID_COST_FUNCTION:
fprintf (ptr_asa_out,
"\n\n INVALID_COST_FUNCTION exit_status = %d\n", *exit_status);
break;
case INVALID_COST_FUNCTION_DERIV:
fprintf (ptr_asa_out,
"\n\n INVALID_COST_FUNCTION_DERIV exit_status = %d\n",
*exit_status);
break;
default:
fprintf (ptr_asa_out, "\n\n ERR: no exit code available = %d\n",
*exit_status);
}
switch (OPTIONS->Locate_Cost) {
case 0:
fprintf (ptr_asa_out,
" Locate_Cost = %d, initial cost temperature\n",
OPTIONS->Locate_Cost);
break;
case 1:
fprintf (ptr_asa_out,
" Locate_Cost = %d, initial cost value\n", OPTIONS->Locate_Cost);
break;
case 2:
fprintf (ptr_asa_out,
" Locate_Cost = %d, new generated state\n",
OPTIONS->Locate_Cost);
break;
case 12:
fprintf (ptr_asa_out,
" Locate_Cost = %d, new generated state just after a new best state\n",
OPTIONS->Locate_Cost);
break;
case 3:
fprintf (ptr_asa_out,
" Locate_Cost = %d, cost derivatives, reannealing parameters\n",
OPTIONS->Locate_Cost);
break;
case 4:
fprintf (ptr_asa_out,
" Locate_Cost = %d, reannealing cost temperature\n",
OPTIONS->Locate_Cost);
break;
case 5:
fprintf (ptr_asa_out,
" Locate_Cost = %d, calculating curvatures while exiting asa ()\n",
OPTIONS->Locate_Cost);
break;
case -1:
fprintf (ptr_asa_out,
" Locate_Cost = %d, exited main asa () loop by user-defined OPTIONS\n",
OPTIONS->Locate_Cost);
break;
default:
fprintf (ptr_asa_out,
" Locate_Cost = %d, no index available for Locate_Cost\n",
OPTIONS->Locate_Cost);
}
if (*exit_status != INVALID_USER_INPUT
&& *exit_status != INVALID_COST_FUNCTION
&& *exit_status != INVALID_COST_FUNCTION_DERIV) {
fprintf (ptr_asa_out,
"final_cost = best_generated_state->cost = %-*.*g\n",
G_FIELD, G_PRECISION, *final_cost);
#if INT_LONG
fprintf (ptr_asa_out,
"*number_accepted at best_generated_state->cost = %ld\n",
*best_number_accepted_saved);
fprintf (ptr_asa_out,
"*number_generated at best_generated_state->cost = %ld\n",
*best_number_generated_saved);
#else
fprintf (ptr_asa_out,
"*number_accepted at best_generated_state->cost = %d\n",
*best_number_accepted_saved);
fprintf (ptr_asa_out,
"*number_generated at best_generated_state->cost = %d\n",
*best_number_generated_saved);
#endif
}
#endif
#if ASA_TEMPLATE_SELFOPT
if (OPTIONS->Asa_Data_Dbl[0] > (double) MIN_DOUBLE)
OPTIONS->Asa_Data_Dbl[1] = (double) (*best_number_generated_saved);
#endif
/* reset OPTIONS->Sequential_Parameters */
OPTIONS->Sequential_Parameters = *start_sequence;
#if ASA_PRINT
#if TIME_CALC
/* print ending time */
print_time ("asa_end", ptr_asa_out);
#endif
fprintf (ptr_asa_out, "\n\n\n");
#endif
fflush (ptr_asa_out);
fclose (ptr_asa_out);
return (0);
}
/***********************************************************************
* generate_new_state
* Generates a valid new state from the old state
***********************************************************************/
#if HAVE_ANSI
int
generate_new_state (double (*user_random_generator) (LONG_INT *),
LONG_INT * seed,
double *parameter_minimum,
double *parameter_maximum,
double *current_user_parameter_temp,
#if USER_GENERATING_FUNCTION
double *initial_user_parameter_temp,
double *temperature_scale_parameters,
#endif
ALLOC_INT * number_parameters,
int *parameter_type,
STATE * current_generated_state,
STATE * last_saved_state, USER_DEFINES * OPTIONS)
#else
int
generate_new_state (user_random_generator,
seed,
parameter_minimum,
parameter_maximum, current_user_parameter_temp,
#if USER_GENERATING_FUNCTION
initial_user_parameter_temp, temperature_scale_parameters,
#endif
number_parameters,
parameter_type,
current_generated_state, last_saved_state, OPTIONS)
double (*user_random_generator) ();
LONG_INT *seed;
double *parameter_minimum;
double *parameter_maximum;
double *current_user_parameter_temp;
#if USER_GENERATING_FUNCTION
double *initial_user_parameter_temp;
double *temperature_scale_parameters;
#endif
ALLOC_INT *number_parameters;
int *parameter_type;
STATE *current_generated_state;
STATE *last_saved_state;
USER_DEFINES *OPTIONS;
#endif
{
ALLOC_INT index_v;
double x;
double parameter_v, min_parameter_v, max_parameter_v, temperature_v,
parameter_range_v;
#if USER_GENERATING_FUNCTION
double init_param_temp_v;
double temp_scale_params_v;
#endif
#if ASA_RESOLUTION
double xres, xint, xminus, xplus, dx, dxminus, dxplus;
#endif
/* generate a new value for each parameter */
VFOR (index_v) {
if (OPTIONS->Sequential_Parameters >= -1) {
++OPTIONS->Sequential_Parameters;
if (OPTIONS->Sequential_Parameters == *number_parameters)
OPTIONS->Sequential_Parameters = 0;
index_v = OPTIONS->Sequential_Parameters;
}
min_parameter_v = parameter_minimum[index_v];
max_parameter_v = parameter_maximum[index_v];
parameter_range_v = max_parameter_v - min_parameter_v;
/* ignore parameters that have too small a range */
if (fabs (parameter_range_v) < (double) EPS_DOUBLE)
continue;
temperature_v = current_user_parameter_temp[index_v];
#if USER_GENERATING_FUNCTION
init_param_temp_v = initial_user_parameter_temp[index_v];
temp_scale_params_v = temperature_scale_parameters[index_v];
#endif
parameter_v = last_saved_state->parameter[index_v];
/* Handle discrete parameters. */
#if ASA_RESOLUTION
xres = OPTIONS->Coarse_Resolution[index_v];
if (xres > EPS_DOUBLE) {
min_parameter_v -= (xres / TWO);
max_parameter_v += (xres / TWO);
parameter_range_v = max_parameter_v - min_parameter_v;
}
#endif /* ASA_RESOLUTION */
if (INTEGER_PARAMETER (index_v)) {
#if ASA_RESOLUTION
if (xres > EPS_DOUBLE) {
;
} else {
#endif /* ASA_RESOLUTION */
min_parameter_v -= HALF;
max_parameter_v += HALF;
parameter_range_v = max_parameter_v - min_parameter_v;
}
#if ASA_RESOLUTION
}
#endif
/* generate a new state x within the parameter bounds */
for (;;) {
#if USER_GENERATING_FUNCTION
x = OPTIONS->Generating_Distrib (seed,
number_parameters,
index_v,
temperature_v,
init_param_temp_v,
temp_scale_params_v,
parameter_v,
parameter_range_v,
last_saved_state->parameter, OPTIONS);
#else
x = parameter_v
+ generate_asa_state (user_random_generator, seed, &temperature_v)
* parameter_range_v;
#endif /* USER_GENERATING_FUNCTION */
#if ASA_RESOLUTION
if (xres > EPS_DOUBLE) {
xint = xres * (double) ((LONG_INT) (x / xres));
xplus = xint + xres;
xminus = xint - xres;
dx = fabs (xint - x);
dxminus = fabs (xminus - x);
dxplus = fabs (xplus - x);
if (dx < dxminus && dx < dxplus)
x = xint;
else if (dxminus < dxplus)
x = xminus;
else
x = xplus;
}
#endif /* ASA_RESOLUTION */
/* exit the loop if within its valid parameter range */
if (x <= max_parameter_v - (double) EPS_DOUBLE
&& x >= min_parameter_v + (double) EPS_DOUBLE)
break;
}
/* Handle discrete parameters.
You might have to check rounding on your machine. */
if (INTEGER_PARAMETER (index_v)) {
#if ASA_RESOLUTION
if (xres > EPS_DOUBLE) {
;
} else {
#endif /* ASA_RESOLUTION */
if (x < min_parameter_v + HALF)
x = min_parameter_v + HALF + (double) EPS_DOUBLE;
if (x > max_parameter_v - HALF)
x = max_parameter_v - HALF + (double) EPS_DOUBLE;
if (x + HALF > ZERO) {
x = (double) ((LONG_INT) (x + HALF));
} else {
x = (double) ((LONG_INT) (x - HALF));
}
if (x > parameter_maximum[index_v])
x = parameter_maximum[index_v];
if (x < parameter_minimum[index_v])
x = parameter_minimum[index_v];
}
#if ASA_RESOLUTION
}
if (xres > EPS_DOUBLE) {
if (x < min_parameter_v + xres / TWO)
x = min_parameter_v + xres / TWO + (double) EPS_DOUBLE;
if (x > max_parameter_v - xres / TWO)
x = max_parameter_v - xres / TWO + (double) EPS_DOUBLE;
if (x > parameter_maximum[index_v])
x = parameter_maximum[index_v];
if (x < parameter_minimum[index_v])
x = parameter_minimum[index_v];
}
#endif /* ASA_RESOLUTION */
/* save the newly generated value */
current_generated_state->parameter[index_v] = x;
if (OPTIONS->Sequential_Parameters >= 0)
break;
}
return (0);
}
#if ASA_PARALLEL
/***********************************************************************
* generate_new_state_par
* Generates a valid new state from the old state
***********************************************************************/
#if HAVE_ANSI
int
generate_new_state_par (double (*user_random_generator) (LONG_INT *),
LONG_INT * seed,
double *parameter_minimum,
double *parameter_maximum,
double *current_user_parameter_temp,
#if USER_GENERATING_FUNCTION
double *initial_user_parameter_temp,
double *temperature_scale_parameters,
#endif
ALLOC_INT * number_parameters,
int *parameter_type,
LONG_INT i_prll,
STATE * gener_block_state,
STATE * last_saved_state, USER_DEFINES * OPTIONS)
#else
int
generate_new_state_par (user_random_generator,
seed,
parameter_minimum,
parameter_maximum, current_user_parameter_temp,
#if USER_GENERATING_FUNCTION
initial_user_parameter_temp,
temperature_scale_parameters,
#endif
number_parameters,
parameter_type,
i_prll, gener_block_state, last_saved_state, OPTIONS)
double (*user_random_generator) ();
LONG_INT *seed;
double *parameter_minimum;
double *parameter_maximum;
double *current_user_parameter_temp;
#if USER_GENERATING_FUNCTION
double *initial_user_parameter_temp;
double *temperature_scale_parameters;
#endif
ALLOC_INT *number_parameters;
int *parameter_type;
LONG_INT i_prll;
STATE *gener_block_state;
STATE *last_saved_state;
USER_DEFINES *OPTIONS;
#endif
{
ALLOC_INT index_v;
double x;
double parameter_v, min_parameter_v, max_parameter_v, temperature_v,
parameter_range_v;
#if USER_GENERATING_FUNCTION
double init_param_temp_v;
double temp_scale_params_v;
#endif
#if ASA_RESOLUTION
double xres, xint, xminus, xplus, dx, dxminus, dxplus;
#endif
/* generate a new value for each parameter */
VFOR (index_v) {
if (OPTIONS->Sequential_Parameters >= -1) {
++OPTIONS->Sequential_Parameters;
if (OPTIONS->Sequential_Parameters == *number_parameters)
OPTIONS->Sequential_Parameters = 0;
index_v = OPTIONS->Sequential_Parameters;
}
min_parameter_v = parameter_minimum[index_v];
max_parameter_v = parameter_maximum[index_v];
parameter_range_v = max_parameter_v - min_parameter_v;
/* ignore parameters that have too small a range */
if (fabs (parameter_range_v) < (double) EPS_DOUBLE)
continue;
temperature_v = current_user_parameter_temp[index_v];
#if USER_GENERATING_FUNCTION
init_param_temp_v = initial_user_parameter_temp[index_v];
temp_scale_params_v = temperature_scale_parameters[index_v];
#endif
parameter_v = last_saved_state->parameter[index_v];
/* Handle discrete parameters. */
#if ASA_RESOLUTION
xres = OPTIONS->Coarse_Resolution[index_v];
if (xres > EPS_DOUBLE) {
min_parameter_v -= (xres / TWO);
max_parameter_v += (xres / TWO);
parameter_range_v = max_parameter_v - min_parameter_v;
}
#endif /* ASA_RESOLUTION */
if (INTEGER_PARAMETER (index_v)) {
#if ASA_RESOLUTION
if (xres > EPS_DOUBLE) {
;
} else {
#endif /* ASA_RESOLUTION */
min_parameter_v -= HALF;
max_parameter_v += HALF;
parameter_range_v = max_parameter_v - min_parameter_v;
}
#if ASA_RESOLUTION
}
#endif
/* generate a new state x within the parameter bounds */
for (;;) {
#if USER_GENERATING_FUNCTION
x = OPTIONS->Generating_Distrib (seed,
number_parameters,
index_v,
temperature_v,
init_param_temp_v,
temp_scale_params_v,
parameter_v,
parameter_range_v,
last_saved_state->parameter, OPTIONS);
#else
x = parameter_v
+ generate_asa_state (user_random_generator, seed, &temperature_v)
* parameter_range_v;
#endif /* USER_GENERATING_FUNCTION */
#if ASA_RESOLUTION
if (xres > EPS_DOUBLE) {
xint = xres * (double) ((LONG_INT) (x / xres));
xplus = xint + xres;
xminus = xint - xres;
dx = fabs (xint - x);
dxminus = fabs (xminus - x);
dxplus = fabs (xplus - x);
if (dx < dxminus && dx < dxplus)
x = xint;
else if (dxminus < dxplus)
x = xminus;
else
x = xplus;
}
#endif /* ASA_RESOLUTION */
/* exit the loop if within its valid parameter range */
if (x <= max_parameter_v - (double) EPS_DOUBLE
&& x >= min_parameter_v + (double) EPS_DOUBLE)
break;
}
/* Handle discrete parameters.
You might have to check rounding on your machine. */
if (INTEGER_PARAMETER (index_v)) {
#if ASA_RESOLUTION
if (xres > EPS_DOUBLE) {
;
} else {
#endif /* ASA_RESOLUTION */
if (x < min_parameter_v + HALF)
x = min_parameter_v + HALF + (double) EPS_DOUBLE;
if (x > max_parameter_v - HALF)
x = max_parameter_v - HALF + (double) EPS_DOUBLE;
if (x + HALF > ZERO) {
x = (double) ((LONG_INT) (x + HALF));
} else {
x = (double) ((LONG_INT) (x - HALF));
}
if (x > parameter_maximum[index_v])
x = parameter_maximum[index_v];
if (x < parameter_minimum[index_v])
x = parameter_minimum[index_v];
}
#if ASA_RESOLUTION
}
if (xres > EPS_DOUBLE) {
if (x < min_parameter_v + xres / TWO)
x = min_parameter_v + xres / TWO + (double) EPS_DOUBLE;
if (x > max_parameter_v - xres / TWO)
x = max_parameter_v - xres / TWO + (double) EPS_DOUBLE;
if (x > parameter_maximum[index_v])
x = parameter_maximum[index_v];
if (x < parameter_minimum[index_v])
x = parameter_minimum[index_v];
}
#endif /* ASA_RESOLUTION */
/* save the newly generated value */
gener_block_state[i_prll].parameter[index_v] = x;
if (OPTIONS->Sequential_Parameters >= 0)
break;
}
return (0);
}
#endif /* ASA_PARALLEL */
/***********************************************************************
* generate_asa_state
* This function generates a single value according to the
* ASA generating function and the passed temperature
***********************************************************************/
#if HAVE_ANSI
double
generate_asa_state (double (*user_random_generator) (LONG_INT *),
LONG_INT * seed, double *temp)
#else
double
generate_asa_state (user_random_generator, seed, temp)
double (*user_random_generator) ();
LONG_INT *seed;
double *temp;
#endif
{
double x, y, z;
x = (*user_random_generator) (seed);
y = x < HALF ? -ONE : ONE;
z = y * *temp * (F_POW ((ONE + ONE / *temp), fabs (TWO * x - ONE)) - ONE);
return (z);
}
/***********************************************************************
* accept_new_state
* This procedure accepts or rejects a newly generated state,
* depending on whether the difference between new and old
* cost functions passes a statistical test. If accepted,
* the current state is updated.
***********************************************************************/
#if HAVE_ANSI
void
accept_new_state (double (*user_random_generator) (LONG_INT *),
LONG_INT * seed,
double *parameter_minimum,
double *parameter_maximum, double *current_cost_temperature,
#if ASA_SAMPLE
double *current_user_parameter_temp,
#endif
ALLOC_INT * number_parameters,
LONG_INT * recent_number_acceptances,
LONG_INT * number_accepted,
LONG_INT * index_cost_acceptances,
LONG_INT * number_acceptances_saved,
LONG_INT * recent_number_generated,
LONG_INT * number_generated,
LONG_INT * index_parameter_generations,
STATE * current_generated_state, STATE * last_saved_state,
#if ASA_SAMPLE
FILE * ptr_asa_out,
#endif
USER_DEFINES * OPTIONS)
#else
void
accept_new_state (user_random_generator,
seed,
parameter_minimum,
parameter_maximum, current_cost_temperature,
#if ASA_SAMPLE
current_user_parameter_temp,
#endif
number_parameters,
recent_number_acceptances,
number_accepted,
index_cost_acceptances,
number_acceptances_saved,
recent_number_generated,
number_generated,
index_parameter_generations,
current_generated_state, last_saved_state,
#if ASA_SAMPLE
ptr_asa_out,
#endif
OPTIONS)
double (*user_random_generator) ();
LONG_INT *seed;
double *parameter_minimum;
double *parameter_maximum;
double *current_cost_temperature;
#if ASA_SAMPLE
double *current_user_parameter_temp;
#endif
ALLOC_INT *number_parameters;
LONG_INT *recent_number_acceptances;
LONG_INT *number_accepted;
LONG_INT *index_cost_acceptances;
LONG_INT *number_acceptances_saved;
LONG_INT *recent_number_generated;
LONG_INT *number_generated;
LONG_INT *index_parameter_generations;
STATE *current_generated_state;
STATE *last_saved_state;
#if ASA_SAMPLE
FILE *ptr_asa_out;
#endif
USER_DEFINES *OPTIONS;
#endif
{
#if USER_ACCEPTANCE_TEST
#else
double delta_cost;
#if USER_ACCEPT_ASYMP_EXP
double q;
#endif
#endif
double prob_test, unif_test;
double curr_cost_temp;
ALLOC_INT index_v;
#if ASA_SAMPLE
LONG_INT active_params;
double weight_param_ind, weight_aver, range;
#endif
/* update accepted and generated count */
++*number_acceptances_saved;
++*recent_number_generated;
++*number_generated;
OPTIONS->N_Generated = *number_generated;
/* increment the parameter index generation for each parameter */
if (OPTIONS->Sequential_Parameters >= 0) {
/* ignore parameters with too small a range */
if (!PARAMETER_RANGE_TOO_SMALL (OPTIONS->Sequential_Parameters))
++(index_parameter_generations[OPTIONS->Sequential_Parameters]);
} else {
VFOR (index_v) {
if (!PARAMETER_RANGE_TOO_SMALL (index_v))
++(index_parameter_generations[index_v]);
}
}
/* effective cost function for testing acceptance criteria,
calculate the cost difference and divide by the temperature */
curr_cost_temp = *current_cost_temperature;
#if USER_ACCEPTANCE_TEST
if (OPTIONS->Cost_Acceptance_Flag == TRUE) {
if (OPTIONS->User_Acceptance_Flag == TRUE) {
unif_test = ZERO;
OPTIONS->User_Acceptance_Flag = FALSE;
OPTIONS->Cost_Acceptance_Flag = FALSE;
} else {
unif_test = ONE;
OPTIONS->Cost_Acceptance_Flag = FALSE;
}
} else {
OPTIONS->Acceptance_Test (current_generated_state->cost,
parameter_minimum,
parameter_maximum, number_parameters, OPTIONS);
if (OPTIONS->User_Acceptance_Flag == TRUE) {
unif_test = ZERO;
OPTIONS->User_Acceptance_Flag = FALSE;
} else {
unif_test = ONE;
}
}
prob_test = OPTIONS->Prob_Bias;
#else /* USER_ACCEPTANCE_TEST */
#if USER_COST_SCHEDULE
curr_cost_temp =
(OPTIONS->Cost_Schedule (*current_cost_temperature, OPTIONS)
+ (double) EPS_DOUBLE);
#endif
delta_cost = (current_generated_state->cost - last_saved_state->cost)
/ (curr_cost_temp + (double) EPS_DOUBLE);
#if USER_ACCEPT_ASYMP_EXP
q = OPTIONS->Asymp_Exp_Param;
if (fabs (ONE - q) < (double) EPS_DOUBLE)
prob_test = MIN (ONE, (F_EXP (EXPONENT_CHECK (-delta_cost))));
else if ((ONE - (ONE - q) * delta_cost) < (double) EPS_DOUBLE)
prob_test = MIN (ONE, (F_EXP (EXPONENT_CHECK (-delta_cost))));
else
prob_test = MIN (ONE, F_POW ((ONE - (ONE - q) * delta_cost),
(ONE / (ONE - q))));
#else /* USER_ACCEPT_ASYMP_EXP */
#if USER_ACCEPT_THRESHOLD /* USER_ACCEPT_THRESHOLD */
prob_test = delta_cost <= 1.0 ? 1.0 : 0.0;
#else /* Metropolis */
prob_test = MIN (ONE, (F_EXP (EXPONENT_CHECK (-delta_cost))));
#endif /* USER_ACCEPT_THRESHOLD */
#endif /* USER_ACCEPT_ASYMP_EXP */
unif_test = (*user_random_generator) (seed);
#endif /* USER_ACCEPTANCE_TEST */
#if ASA_SAMPLE
active_params = 0;
weight_aver = ZERO;
VFOR (index_v) {
/* ignore parameters with too small a range */
if (PARAMETER_RANGE_TOO_SMALL (index_v))
continue;
++active_params;
range = parameter_maximum[index_v] - parameter_minimum[index_v];
weight_param_ind = TWO * (fabs ((last_saved_state->parameter[index_v]
-
current_generated_state->parameter
[index_v]) / range)
+ current_user_parameter_temp[index_v])
* F_LOG (ONE + ONE / current_user_parameter_temp[index_v]);
weight_aver += weight_param_ind;
OPTIONS->Bias_Generated[index_v] = ONE / weight_param_ind;
}
weight_aver /= (double) active_params;
OPTIONS->Average_Weights = weight_aver;
if (prob_test >= unif_test) {
OPTIONS->Bias_Acceptance = prob_test;
} else {
OPTIONS->Bias_Acceptance = ONE - prob_test;
}
#if ASA_PRINT
if (OPTIONS->Limit_Weights < OPTIONS->Average_Weights) {
fprintf (ptr_asa_out, ":SAMPLE#\n");
if (prob_test >= unif_test) {
fprintf (ptr_asa_out,
#if INT_LONG
":SAMPLE+ %10ld %*.*g %*.*g %*.*g %*.*g\n",
#else
":SAMPLE+ %10d %*.*g %*.*g %*.*g\n",
#endif
OPTIONS->N_Accepted,
G_FIELD, G_PRECISION, current_generated_state->cost,
G_FIELD, G_PRECISION, *current_cost_temperature,
G_FIELD, G_PRECISION, OPTIONS->Bias_Acceptance,
G_FIELD, G_PRECISION, OPTIONS->Average_Weights);
VFOR (index_v) {
/* ignore parameters with too small a range */
if (PARAMETER_RANGE_TOO_SMALL (index_v))
continue;
range = parameter_maximum[index_v] - parameter_minimum[index_v];
fprintf (ptr_asa_out,
#if INT_ALLOC
":SAMPLE %11d %*.*g %*.*g %*.*g %*.*g\n",
#else
#if INT_LONG
":SAMPLE %11ld %*.*g %*.*g %*.*g %*.*g\n",
#else
":SAMPLE %11d %*.*g %*.*g %*.*g %*.*g\n",
#endif
#endif
index_v,
G_FIELD, G_PRECISION,
current_generated_state->parameter[index_v], G_FIELD,
G_PRECISION, current_user_parameter_temp[index_v],
G_FIELD, G_PRECISION, OPTIONS->Bias_Generated[index_v],
G_FIELD, G_PRECISION, range);
}
} else {
fprintf (ptr_asa_out,
#if INT_LONG
":SAMPLE %11ld %*.*g %*.*g %*.*g %*.*g\n",
#else
":SAMPLE %11d %*.*g %*.*g %*.*g\n",
#endif
OPTIONS->N_Accepted,
G_FIELD, G_PRECISION, last_saved_state->cost,
G_FIELD, G_PRECISION, *current_cost_temperature,
G_FIELD, G_PRECISION, OPTIONS->Bias_Acceptance,
G_FIELD, G_PRECISION, OPTIONS->Average_Weights);
VFOR (index_v) {
/* ignore parameters with too small a range */
if (PARAMETER_RANGE_TOO_SMALL (index_v))
continue;
range = parameter_maximum[index_v] - parameter_minimum[index_v];
fprintf (ptr_asa_out,
#if INT_ALLOC
":SAMPLE %11d %*.*g %*.*g %*.*g %*.*g\n",
#else
#if INT_LONG
":SAMPLE %11ld %*.*g %*.*g %*.*g %*.*g\n",
#else
":SAMPLE %11d %*.*g %*.*g %*.*g %*.*g\n",
#endif
#endif
index_v,
G_FIELD, G_PRECISION,
last_saved_state->parameter[index_v], G_FIELD,
G_PRECISION, current_user_parameter_temp[index_v],
G_FIELD, G_PRECISION, OPTIONS->Bias_Generated[index_v],
G_FIELD, G_PRECISION, range);
}
}
}
#endif
#endif /* ASA_SAMPLE */
/* accept/reject the new state */
if (prob_test >= unif_test) {
/* copy current state to the last saved state */
last_saved_state->cost = current_generated_state->cost;
VFOR (index_v) {
/* ignore parameters with too small a range */
if (PARAMETER_RANGE_TOO_SMALL (index_v))
continue;
last_saved_state->parameter[index_v] =
current_generated_state->parameter[index_v];
}
/* update acceptance counts */
++*recent_number_acceptances;
++*number_accepted;
++*index_cost_acceptances;
*number_acceptances_saved = *number_accepted;
OPTIONS->N_Accepted = *number_accepted;
}
}
/***********************************************************************
* reanneal
* Readjust temperatures of generating and acceptance functions
***********************************************************************/
#if HAVE_ANSI
void
reanneal (double *parameter_minimum,
double *parameter_maximum,
double *tangents,
double *maximum_tangent,
double *current_cost_temperature,
double *initial_cost_temperature,
double *temperature_scale_cost,
double *current_user_parameter_temp,
double *initial_user_parameter_temp,
double *temperature_scale_parameters,
ALLOC_INT * number_parameters,
int *parameter_type,
LONG_INT * index_cost_acceptances,
LONG_INT * index_parameter_generations,
STATE * last_saved_state,
STATE * best_generated_state, USER_DEFINES * OPTIONS)
#else
void
reanneal (parameter_minimum,
parameter_maximum,
tangents,
maximum_tangent,
current_cost_temperature,
initial_cost_temperature,
temperature_scale_cost,
current_user_parameter_temp,
initial_user_parameter_temp,
temperature_scale_parameters,
number_parameters,
parameter_type,
index_cost_acceptances,
index_parameter_generations,
last_saved_state, best_generated_state, OPTIONS)
double *parameter_minimum;
double *parameter_maximum;
double *tangents;
double *maximum_tangent;
double *current_cost_temperature;
double *initial_cost_temperature;
double *temperature_scale_cost;
double *current_user_parameter_temp;
double *initial_user_parameter_temp;
double *temperature_scale_parameters;
ALLOC_INT *number_parameters;
int *parameter_type;
LONG_INT *index_cost_acceptances;
LONG_INT *index_parameter_generations;
STATE *last_saved_state;
STATE *best_generated_state;
USER_DEFINES *OPTIONS;
#endif
{
ALLOC_INT index_v;
int cost_test;
double tmp_var_db3;
double new_temperature;
double log_new_temperature_ratio;
double log_init_cur_temp_ratio;
double temperature_rescale_power;
double cost_best, cost_last;
double tmp_dbl, tmp_dbl1;
double xnumber_parameters[1];
cost_test = cost_function_test (last_saved_state->cost,
last_saved_state->parameter,
parameter_minimum,
parameter_maximum, number_parameters,
xnumber_parameters);
if (OPTIONS->Reanneal_Parameters == TRUE) {
VFOR (index_v) {
if (NO_REANNEAL (index_v))
continue;
/* use the temp double to prevent overflow */
tmp_dbl = (double) index_parameter_generations[index_v];
/* skip parameters with too small range or integer parameters */
if (OPTIONS->Include_Integer_Parameters == TRUE) {
if (PARAMETER_RANGE_TOO_SMALL (index_v))
continue;
} else {
if (PARAMETER_RANGE_TOO_SMALL (index_v) ||
INTEGER_PARAMETER (index_v))
continue;
}
/* ignore parameters with too small tangents */
if (fabs (tangents[index_v]) < (double) EPS_DOUBLE)
continue;
/* reset the index of parameter generations appropriately */
#if USER_REANNEAL_PARAMETERS
new_temperature =
fabs (OPTIONS->Reanneal_Params_Function (current_user_parameter_temp
[index_v], tangents[index_v],
*maximum_tangent, OPTIONS));
#else
new_temperature =
fabs (FUNCTION_REANNEAL_PARAMS
(current_user_parameter_temp[index_v], tangents[index_v],
*maximum_tangent));
#endif
if (new_temperature < initial_user_parameter_temp[index_v]) {
log_init_cur_temp_ratio =
fabs (F_LOG (((double) EPS_DOUBLE
+ initial_user_parameter_temp[index_v])
/ ((double) EPS_DOUBLE + new_temperature)));
tmp_dbl = (double) EPS_DOUBLE
+ F_POW (log_init_cur_temp_ratio
/ temperature_scale_parameters[index_v],
*xnumber_parameters
#if QUENCH_PARAMETERS
/ OPTIONS->User_Quench_Param_Scale[index_v]);
#else
);
#endif
} else {
tmp_dbl = ONE;
}
/* Reset index_parameter_generations if index reset too large,
and also reset the initial_user_parameter_temp, to achieve
the same new temperature. */
while (tmp_dbl > ((double) MAXIMUM_REANNEAL_INDEX)) {
log_new_temperature_ratio =
-temperature_scale_parameters[index_v] * F_POW (tmp_dbl,
#if QUENCH_PARAMETERS
OPTIONS->
User_Quench_Param_Scale
[index_v]
#else
ONE
#endif
/
*xnumber_parameters);
log_new_temperature_ratio =
EXPONENT_CHECK (log_new_temperature_ratio);
new_temperature =
initial_user_parameter_temp[index_v] *
F_EXP (log_new_temperature_ratio);
tmp_dbl /= (double) REANNEAL_SCALE;
temperature_rescale_power = ONE / F_POW ((double) REANNEAL_SCALE,
#if QUENCH_PARAMETERS
OPTIONS->
User_Quench_Param_Scale
[index_v]
#else
ONE
#endif
/ *xnumber_parameters);
initial_user_parameter_temp[index_v] =
new_temperature * F_POW (initial_user_parameter_temp[index_v] /
new_temperature,
temperature_rescale_power);
}
/* restore from temporary double */
index_parameter_generations[index_v] = (LONG_INT) tmp_dbl;
}
}
if (OPTIONS->Reanneal_Cost == 0) {
;
} else if (OPTIONS->Reanneal_Cost < -1) {
*index_cost_acceptances = 1;
} else {
/* reanneal : Reset the current cost temp and rescale the
index of cost acceptances. */
cost_best = best_generated_state->cost;
cost_last = last_saved_state->cost;
#if USER_REANNEAL_COST
cost_test = OPTIONS->Reanneal_Cost_Function (&cost_best,
&cost_last,
initial_cost_temperature,
current_cost_temperature,
OPTIONS);
tmp_dbl1 = *current_cost_temperature;
#else
cost_test = TRUE;
if (OPTIONS->Reanneal_Cost == 1) {
/* (re)set the initial cost_temperature */
tmp_dbl = MAX (fabs (cost_last), fabs (cost_best));
tmp_dbl = MAX (tmp_dbl, fabs (cost_best - cost_last));
tmp_dbl = MAX ((double) EPS_DOUBLE, tmp_dbl);
*initial_cost_temperature = MIN (*initial_cost_temperature, tmp_dbl);
}
tmp_dbl = (double) *index_cost_acceptances;
tmp_dbl1 = MAX (fabs (cost_last - cost_best), *current_cost_temperature);
tmp_dbl1 = MAX ((double) EPS_DOUBLE, tmp_dbl1);
tmp_dbl1 = MIN (tmp_dbl1, *initial_cost_temperature);
#endif /* USER_REANNEAL_COST */
if (cost_test == TRUE && (*current_cost_temperature > tmp_dbl1)) {
tmp_var_db3 =
fabs (F_LOG (((double) EPS_DOUBLE + *initial_cost_temperature) /
(tmp_dbl1)));
tmp_dbl = (double) EPS_DOUBLE + F_POW (tmp_var_db3
/ *temperature_scale_cost,
*xnumber_parameters
#if QUENCH_COST
/
OPTIONS->User_Quench_Cost_Scale
[0]);
#else
);
#endif
} else {
log_init_cur_temp_ratio =
fabs (F_LOG (((double) EPS_DOUBLE + *initial_cost_temperature) /
((double) EPS_DOUBLE + *current_cost_temperature)));
tmp_dbl = (double) EPS_DOUBLE
+ F_POW (log_init_cur_temp_ratio
/ *temperature_scale_cost, *xnumber_parameters
#if QUENCH_COST
/ OPTIONS->User_Quench_Cost_Scale[0]
#else
#endif
);
}
/* reset index_cost_temperature if index reset too large */
while (tmp_dbl > ((double) MAXIMUM_REANNEAL_INDEX)) {
log_new_temperature_ratio = -*temperature_scale_cost * F_POW (tmp_dbl,
#if QUENCH_COST
OPTIONS->
User_Quench_Cost_Scale
[0]
#else
ONE
#endif
/
*xnumber_parameters);
log_new_temperature_ratio = EXPONENT_CHECK (log_new_temperature_ratio);
new_temperature =
*initial_cost_temperature * F_EXP (log_new_temperature_ratio);
tmp_dbl /= (double) REANNEAL_SCALE;
temperature_rescale_power = ONE / F_POW ((double) REANNEAL_SCALE,
#if QUENCH_COST
OPTIONS->User_Quench_Cost_Scale
[0]
#else
ONE
#endif
/ *xnumber_parameters);
*initial_cost_temperature =
new_temperature * F_POW (*initial_cost_temperature /
new_temperature, temperature_rescale_power);
}
*index_cost_acceptances = (LONG_INT) tmp_dbl;
#if USER_ACCEPTANCE_TEST
OPTIONS->Cost_Temp_Init = *initial_cost_temperature;
#endif
}
}
/***********************************************************************
* cost_derivatives
* This procedure calculates the derivatives of the cost function
* with respect to its parameters. The first derivatives are
* used as a sensitivity measure for reannealing. The second
* derivatives are calculated only if *curvature_flag=TRUE;
* these are a measure of the covariance of the fit when a
* minimum is found.
***********************************************************************/
/* Calculate the numerical derivatives of the best
generated state found so far */
/* Assuming no information is given about the metric of the parameter
space, use simple Cartesian space to calculate curvatures. */
#if HAVE_ANSI
void
cost_derivatives (double (*user_cost_function)
(double *, double *, double *, double *, double *,
ALLOC_INT *, int *, int *, int *, USER_DEFINES *),
double *parameter_minimum, double *parameter_maximum,
double *tangents, double *curvature,
double *maximum_tangent, ALLOC_INT * number_parameters,
int *parameter_type, int *exit_status,
int *curvature_flag, int *valid_state_generated_flag,
LONG_INT * number_invalid_generated_states,
STATE * current_generated_state,
STATE * best_generated_state, FILE * ptr_asa_out,
USER_DEFINES * OPTIONS)
#else
void
cost_derivatives (user_cost_function,
parameter_minimum,
parameter_maximum,
tangents,
curvature,
maximum_tangent,
number_parameters,
parameter_type,
exit_status,
curvature_flag,
valid_state_generated_flag,
number_invalid_generated_states,
current_generated_state,
best_generated_state, ptr_asa_out, OPTIONS)
double (*user_cost_function) ();
double *parameter_minimum;
double *parameter_maximum;
double *tangents;
double *curvature;
double *maximum_tangent;
ALLOC_INT *number_parameters;
int *parameter_type;
int *exit_status;
int *curvature_flag;
int *valid_state_generated_flag;
LONG_INT *number_invalid_generated_states;
STATE *current_generated_state;
STATE *best_generated_state;
FILE *ptr_asa_out;
USER_DEFINES *OPTIONS;
#endif
{
ALLOC_INT index_v, index_vv, index_v_vv, index_vv_v;
LONG_INT saved_num_invalid_gen_states;
#if ASA_PRINT
LONG_INT tmp_saved;
#endif
double parameter_v, parameter_vv, parameter_v_offset, parameter_vv_offset;
double recent_best_cost;
double new_cost_state_1, new_cost_state_2, new_cost_state_3;
double delta_parameter_v, delta_parameter_vv;
int immediate_flag;
if (OPTIONS->Curvature_0 == TRUE)
*curvature_flag = FALSE;
if (OPTIONS->Curvature_0 == -1)
*curvature_flag = TRUE;
/* save Immediate_Exit flag */
immediate_flag = OPTIONS->Immediate_Exit;
/* save the best cost */
recent_best_cost = best_generated_state->cost;
/* copy the best state into the current state */
VFOR (index_v) {
/* ignore parameters with too small ranges */
if (PARAMETER_RANGE_TOO_SMALL (index_v))
continue;
current_generated_state->parameter[index_v] =
best_generated_state->parameter[index_v];
}
saved_num_invalid_gen_states = (*number_invalid_generated_states);
/* set parameters (& possibly constraints) to best state */
*valid_state_generated_flag = TRUE;
#if USER_ACCEPTANCE_TEST
OPTIONS->User_Acceptance_Flag = TRUE;
OPTIONS->Cost_Acceptance_Flag = FALSE;
#endif
current_generated_state->cost =
user_cost_function (current_generated_state->parameter,
parameter_minimum,
parameter_maximum,
tangents,
curvature,
number_parameters,
parameter_type,
valid_state_generated_flag, exit_status, OPTIONS);
if ((*valid_state_generated_flag == FALSE)
|| ((current_generated_state->cost) != (current_generated_state->cost))
|| current_generated_state->cost < -MAX_DOUBLE
|| current_generated_state->cost > MAX_DOUBLE) {
*exit_status = INVALID_COST_FUNCTION_DERIV;
return;
}
if (*valid_state_generated_flag == FALSE)
++(*number_invalid_generated_states);
if (OPTIONS->User_Tangents == TRUE) {
*valid_state_generated_flag = -1;
#if USER_ACCEPTANCE_TEST
OPTIONS->User_Acceptance_Flag = TRUE;
OPTIONS->Cost_Acceptance_Flag = FALSE;
#endif
current_generated_state->cost =
user_cost_function (current_generated_state->parameter,
parameter_minimum,
parameter_maximum,
tangents,
curvature,
number_parameters,
parameter_type,
valid_state_generated_flag, exit_status, OPTIONS);
if ((*valid_state_generated_flag == FALSE)
|| ((current_generated_state->cost) !=
(current_generated_state->cost))
|| current_generated_state->cost < -MAX_DOUBLE
|| current_generated_state->cost > MAX_DOUBLE) {
*exit_status = INVALID_COST_FUNCTION_DERIV;
return;
}
if (*valid_state_generated_flag == FALSE)
++(*number_invalid_generated_states);
} else {
/* calculate tangents */
VFOR (index_v) {
if (NO_REANNEAL (index_v)) {
tangents[index_v] = ZERO;
continue;
}
/* skip parameters with too small range or integer parameters */
if (OPTIONS->Include_Integer_Parameters == TRUE) {
if (PARAMETER_RANGE_TOO_SMALL (index_v)) {
tangents[index_v] = ZERO;
continue;
}
} else {
if (PARAMETER_RANGE_TOO_SMALL (index_v) ||
INTEGER_PARAMETER (index_v)) {
tangents[index_v] = ZERO;
continue;
}
}
#if DELTA_PARAMETERS
delta_parameter_v = OPTIONS->User_Delta_Parameter[index_v];
#else
delta_parameter_v = OPTIONS->Delta_X;
#endif
if (delta_parameter_v < SMALL_FLOAT) {
tangents[index_v] = 0;
continue;
}
/* save the v_th parameter and delta_parameter */
parameter_v = best_generated_state->parameter[index_v];
parameter_v_offset = (ONE + delta_parameter_v) * parameter_v;
if (parameter_v_offset > parameter_maximum[index_v] ||
parameter_v_offset < parameter_minimum[index_v]) {
delta_parameter_v = -delta_parameter_v;
parameter_v_offset = (ONE + delta_parameter_v) * parameter_v;
}
/* generate the first sample point */
current_generated_state->parameter[index_v] = parameter_v_offset;
*valid_state_generated_flag = TRUE;
#if USER_ACCEPTANCE_TEST
OPTIONS->User_Acceptance_Flag = TRUE;
OPTIONS->Cost_Acceptance_Flag = FALSE;
#endif
current_generated_state->cost =
user_cost_function (current_generated_state->parameter,
parameter_minimum,
parameter_maximum,
tangents,
curvature,
number_parameters,
parameter_type,
valid_state_generated_flag, exit_status, OPTIONS);
if ((*valid_state_generated_flag == FALSE)
|| ((current_generated_state->cost) !=
(current_generated_state->cost))
|| current_generated_state->cost < -MAX_DOUBLE
|| current_generated_state->cost > MAX_DOUBLE) {
*exit_status = INVALID_COST_FUNCTION_DERIV;
return;
}
if (*valid_state_generated_flag == FALSE)
++(*number_invalid_generated_states);
new_cost_state_1 = current_generated_state->cost;
/* restore the parameter state */
current_generated_state->parameter[index_v] = parameter_v;
/* calculate the numerical derivative */
tangents[index_v] = (new_cost_state_1 - recent_best_cost)
/ (delta_parameter_v * parameter_v + (double) EPS_DOUBLE);
}
}
/* find the maximum |tangent| from all tangents */
*maximum_tangent = 0;
VFOR (index_v) {
if (NO_REANNEAL (index_v))
continue;
/* ignore too small ranges and integer parameters types */
if (OPTIONS->Include_Integer_Parameters == TRUE) {
if (PARAMETER_RANGE_TOO_SMALL (index_v))
continue;
} else {
if (PARAMETER_RANGE_TOO_SMALL (index_v)
|| INTEGER_PARAMETER (index_v))
continue;
}
/* find the maximum |tangent| (from all tangents) */
if (fabs (tangents[index_v]) > *maximum_tangent) {
*maximum_tangent = fabs (tangents[index_v]);
}
}
if (*curvature_flag == TRUE || *curvature_flag == -1) {
/* calculate diagonal curvatures */
VFOR (index_v) {
/* index_v_vv: row index_v, column index_v */
index_v_vv = ROW_COL_INDEX (index_v, index_v);
if (NO_REANNEAL (index_v)) {
curvature[index_v_vv] = ZERO;
continue;
}
/* skip parameters with too small range or integer parameters */
if (OPTIONS->Include_Integer_Parameters == TRUE) {
if (PARAMETER_RANGE_TOO_SMALL (index_v)) {
curvature[index_v_vv] = ZERO;
continue;
}
} else {
if (PARAMETER_RANGE_TOO_SMALL (index_v) ||
INTEGER_PARAMETER (index_v)) {
curvature[index_v_vv] = ZERO;
continue;
}
}
#if DELTA_PARAMETERS
delta_parameter_v = OPTIONS->User_Delta_Parameter[index_v];
#else
delta_parameter_v = OPTIONS->Delta_X;
#endif
if (delta_parameter_v < SMALL_FLOAT) {
curvature[index_v_vv] = ZERO;
continue;
}
/* save the v_th parameter and delta_parameter */
parameter_v = best_generated_state->parameter[index_v];
if (parameter_v + delta_parameter_v * fabs (parameter_v)
> parameter_maximum[index_v]) {
/* generate the first sample point */
current_generated_state->parameter[index_v] =
parameter_v - TWO * delta_parameter_v * fabs (parameter_v);
*valid_state_generated_flag = TRUE;
#if USER_ACCEPTANCE_TEST
OPTIONS->User_Acceptance_Flag = TRUE;
OPTIONS->Cost_Acceptance_Flag = FALSE;
#endif
current_generated_state->cost =
user_cost_function (current_generated_state->parameter,
parameter_minimum,
parameter_maximum,
tangents,
curvature,
number_parameters,
parameter_type,
valid_state_generated_flag,
exit_status, OPTIONS);
if ((*valid_state_generated_flag == FALSE)
|| ((current_generated_state->cost) !=
(current_generated_state->cost))
|| current_generated_state->cost < -MAX_DOUBLE
|| current_generated_state->cost > MAX_DOUBLE) {
*exit_status = INVALID_COST_FUNCTION_DERIV;
return;
}
if (*valid_state_generated_flag == FALSE)
++(*number_invalid_generated_states);
new_cost_state_1 = current_generated_state->cost;
/* generate the second sample point */
current_generated_state->parameter[index_v] =
parameter_v - delta_parameter_v * fabs (parameter_v);
*valid_state_generated_flag = TRUE;
#if USER_ACCEPTANCE_TEST
OPTIONS->User_Acceptance_Flag = TRUE;
OPTIONS->Cost_Acceptance_Flag = FALSE;
#endif
current_generated_state->cost =
user_cost_function (current_generated_state->parameter,
parameter_minimum,
parameter_maximum,
tangents,
curvature,
number_parameters,
parameter_type,
valid_state_generated_flag,
exit_status, OPTIONS);
if ((*valid_state_generated_flag == FALSE)
|| ((current_generated_state->cost) !=
(current_generated_state->cost))
|| current_generated_state->cost < -MAX_DOUBLE
|| current_generated_state->cost > MAX_DOUBLE) {
*exit_status = INVALID_COST_FUNCTION_DERIV;
return;
}
if (*valid_state_generated_flag == FALSE)
++(*number_invalid_generated_states);
new_cost_state_2 = current_generated_state->cost;
/* restore the parameter state */
current_generated_state->parameter[index_v] = parameter_v;
/* calculate and store the curvature */
curvature[index_v_vv] =
(recent_best_cost - TWO * new_cost_state_2
+ new_cost_state_1) / (delta_parameter_v * delta_parameter_v
* parameter_v * parameter_v +
(double) EPS_DOUBLE);
} else if (parameter_v - delta_parameter_v * fabs (parameter_v)
< parameter_minimum[index_v]) {
/* generate the first sample point */
current_generated_state->parameter[index_v] =
parameter_v + TWO * delta_parameter_v * fabs (parameter_v);
*valid_state_generated_flag = TRUE;
#if USER_ACCEPTANCE_TEST
OPTIONS->User_Acceptance_Flag = TRUE;
OPTIONS->Cost_Acceptance_Flag = FALSE;
#endif
current_generated_state->cost =
user_cost_function (current_generated_state->parameter,
parameter_minimum,
parameter_maximum,
tangents,
curvature,
number_parameters,
parameter_type,
valid_state_generated_flag,
exit_status, OPTIONS);
if ((*valid_state_generated_flag == FALSE)
|| ((current_generated_state->cost) !=
(current_generated_state->cost))
|| current_generated_state->cost < -MAX_DOUBLE
|| current_generated_state->cost > MAX_DOUBLE) {
*exit_status = INVALID_COST_FUNCTION_DERIV;
return;
}
if (*valid_state_generated_flag == FALSE)
++(*number_invalid_generated_states);
new_cost_state_1 = current_generated_state->cost;
/* generate the second sample point */
current_generated_state->parameter[index_v] =
parameter_v + delta_parameter_v * fabs (parameter_v);
*valid_state_generated_flag = TRUE;
#if USER_ACCEPTANCE_TEST
OPTIONS->User_Acceptance_Flag = TRUE;
OPTIONS->Cost_Acceptance_Flag = FALSE;
#endif
current_generated_state->cost =
user_cost_function (current_generated_state->parameter,
parameter_minimum,
parameter_maximum,
tangents,
curvature,
number_parameters,
parameter_type,
valid_state_generated_flag,
exit_status, OPTIONS);
if ((*valid_state_generated_flag == FALSE)
|| ((current_generated_state->cost) !=
(current_generated_state->cost))
|| current_generated_state->cost < -MAX_DOUBLE
|| current_generated_state->cost > MAX_DOUBLE) {
*exit_status = INVALID_COST_FUNCTION_DERIV;
return;
}
if (*valid_state_generated_flag == FALSE)
++(*number_invalid_generated_states);
new_cost_state_2 = current_generated_state->cost;
/* restore the parameter state */
current_generated_state->parameter[index_v] = parameter_v;
/* index_v_vv: row index_v, column index_v */
index_v_vv = ROW_COL_INDEX (index_v, index_v);
/* calculate and store the curvature */
curvature[index_v_vv] =
(recent_best_cost - TWO * new_cost_state_2
+ new_cost_state_1) / (delta_parameter_v * delta_parameter_v
* parameter_v * parameter_v +
(double) EPS_DOUBLE);
} else {
/* generate the first sample point */
parameter_v_offset = (ONE + delta_parameter_v) * parameter_v;
current_generated_state->parameter[index_v] = parameter_v_offset;
*valid_state_generated_flag = TRUE;
#if USER_ACCEPTANCE_TEST
OPTIONS->User_Acceptance_Flag = TRUE;
OPTIONS->Cost_Acceptance_Flag = FALSE;
#endif
current_generated_state->cost =
user_cost_function (current_generated_state->parameter,
parameter_minimum,
parameter_maximum,
tangents,
curvature,
number_parameters,
parameter_type,
valid_state_generated_flag,
exit_status, OPTIONS);
if ((*valid_state_generated_flag == FALSE)
|| ((current_generated_state->cost) !=
(current_generated_state->cost))
|| current_generated_state->cost < -MAX_DOUBLE
|| current_generated_state->cost > MAX_DOUBLE) {
*exit_status = INVALID_COST_FUNCTION_DERIV;
return;
}
if (*valid_state_generated_flag == FALSE)
++(*number_invalid_generated_states);
new_cost_state_1 = current_generated_state->cost;
/* generate the second sample point */
current_generated_state->parameter[index_v] =
(ONE - delta_parameter_v) * parameter_v;
*valid_state_generated_flag = TRUE;
#if USER_ACCEPTANCE_TEST
OPTIONS->User_Acceptance_Flag = TRUE;
OPTIONS->Cost_Acceptance_Flag = FALSE;
#endif
current_generated_state->cost =
user_cost_function (current_generated_state->parameter,
parameter_minimum,
parameter_maximum,
tangents,
curvature,
number_parameters,
parameter_type,
valid_state_generated_flag,
exit_status, OPTIONS);
if ((*valid_state_generated_flag == FALSE)
|| ((current_generated_state->cost) !=
(current_generated_state->cost))
|| current_generated_state->cost < -MAX_DOUBLE
|| current_generated_state->cost > MAX_DOUBLE) {
*exit_status = INVALID_COST_FUNCTION_DERIV;
return;
}
if (*valid_state_generated_flag == FALSE)
++(*number_invalid_generated_states);
new_cost_state_2 = current_generated_state->cost;
/* restore the parameter state */
current_generated_state->parameter[index_v] = parameter_v;
/* calculate and store the curvature */
curvature[index_v_vv] =
(new_cost_state_2 - TWO * recent_best_cost
+ new_cost_state_1) / (delta_parameter_v * delta_parameter_v
* parameter_v * parameter_v +
(double) EPS_DOUBLE);
}
}
/* calculate off-diagonal curvatures */
VFOR (index_v) {
#if DELTA_PARAMETERS
delta_parameter_v = OPTIONS->User_Delta_Parameter[index_v];
#else
delta_parameter_v = OPTIONS->Delta_X;
#endif
if (delta_parameter_v < SMALL_FLOAT) {
VFOR (index_vv) {
/* index_v_vv: row index_v, column index_vv */
index_v_vv = ROW_COL_INDEX (index_v, index_vv);
index_vv_v = ROW_COL_INDEX (index_vv, index_v);
curvature[index_vv_v] = curvature[index_v_vv] = ZERO;
}
continue;
}
/* save the v_th parameter and delta_x */
parameter_v = current_generated_state->parameter[index_v];
VFOR (index_vv) {
/* index_v_vv: row index_v, column index_vv */
index_v_vv = ROW_COL_INDEX (index_v, index_vv);
index_vv_v = ROW_COL_INDEX (index_vv, index_v);
if (NO_REANNEAL (index_vv) || NO_REANNEAL (index_v)) {
curvature[index_vv_v] = curvature[index_v_vv] = ZERO;
continue;
}
/* calculate only the upper diagonal */
if (index_v <= index_vv) {
continue;
}
/* skip parms with too small range or integer parameters */
if (OPTIONS->Include_Integer_Parameters == TRUE) {
if (PARAMETER_RANGE_TOO_SMALL (index_v) ||
PARAMETER_RANGE_TOO_SMALL (index_vv)) {
curvature[index_vv_v] = curvature[index_v_vv] = ZERO;
continue;
}
} else {
if (INTEGER_PARAMETER (index_v) ||
INTEGER_PARAMETER (index_vv) ||
PARAMETER_RANGE_TOO_SMALL (index_v) ||
PARAMETER_RANGE_TOO_SMALL (index_vv)) {
curvature[index_vv_v] = curvature[index_v_vv] = ZERO;
continue;
}
}
#if DELTA_PARAMETERS
delta_parameter_vv = OPTIONS->User_Delta_Parameter[index_vv];
#else
delta_parameter_vv = OPTIONS->Delta_X;
#endif
if (delta_parameter_vv < SMALL_FLOAT) {
curvature[index_vv_v] = curvature[index_v_vv] = ZERO;
continue;
}
/* save the vv_th parameter and delta_parameter */
parameter_vv = current_generated_state->parameter[index_vv];
/* generate first sample point */
parameter_v_offset = current_generated_state->parameter[index_v] =
(ONE + delta_parameter_v) * parameter_v;
parameter_vv_offset = current_generated_state->parameter[index_vv] =
(ONE + delta_parameter_vv) * parameter_vv;
if (parameter_v_offset > parameter_maximum[index_v] ||
parameter_v_offset < parameter_minimum[index_v]) {
delta_parameter_v = -delta_parameter_v;
current_generated_state->parameter[index_v] =
(ONE + delta_parameter_v) * parameter_v;
}
if (parameter_vv_offset > parameter_maximum[index_vv] ||
parameter_vv_offset < parameter_minimum[index_vv]) {
delta_parameter_vv = -delta_parameter_vv;
current_generated_state->parameter[index_vv] =
(ONE + delta_parameter_vv) * parameter_vv;
}
*valid_state_generated_flag = TRUE;
#if USER_ACCEPTANCE_TEST
OPTIONS->User_Acceptance_Flag = TRUE;
OPTIONS->Cost_Acceptance_Flag = FALSE;
#endif
current_generated_state->cost =
user_cost_function (current_generated_state->parameter,
parameter_minimum,
parameter_maximum,
tangents,
curvature,
number_parameters,
parameter_type,
valid_state_generated_flag,
exit_status, OPTIONS);
if ((*valid_state_generated_flag == FALSE)
|| ((current_generated_state->cost) !=
(current_generated_state->cost))
|| current_generated_state->cost < -MAX_DOUBLE
|| current_generated_state->cost > MAX_DOUBLE) {
*exit_status = INVALID_COST_FUNCTION_DERIV;
return;
}
if (*valid_state_generated_flag == FALSE)
++(*number_invalid_generated_states);
new_cost_state_1 = current_generated_state->cost;
/* restore the v_th parameter */
current_generated_state->parameter[index_v] = parameter_v;
/* generate second sample point */
*valid_state_generated_flag = TRUE;
#if USER_ACCEPTANCE_TEST
OPTIONS->User_Acceptance_Flag = TRUE;
OPTIONS->Cost_Acceptance_Flag = FALSE;
#endif
current_generated_state->cost =
user_cost_function (current_generated_state->parameter,
parameter_minimum,
parameter_maximum,
tangents,
curvature,
number_parameters,
parameter_type,
valid_state_generated_flag,
exit_status, OPTIONS);
if ((*valid_state_generated_flag == FALSE)
|| ((current_generated_state->cost) !=
(current_generated_state->cost))
|| current_generated_state->cost < -MAX_DOUBLE
|| current_generated_state->cost > MAX_DOUBLE) {
*exit_status = INVALID_COST_FUNCTION_DERIV;
return;
}
if (*valid_state_generated_flag == FALSE)
++(*number_invalid_generated_states);
new_cost_state_2 = current_generated_state->cost;
/* restore the vv_th parameter */
current_generated_state->parameter[index_vv] = parameter_vv;
/* generate third sample point */
current_generated_state->parameter[index_v] =
(ONE + delta_parameter_v) * parameter_v;
*valid_state_generated_flag = TRUE;
#if USER_ACCEPTANCE_TEST
OPTIONS->User_Acceptance_Flag = TRUE;
OPTIONS->Cost_Acceptance_Flag = FALSE;
#endif
current_generated_state->cost =
user_cost_function (current_generated_state->parameter,
parameter_minimum,
parameter_maximum,
tangents,
curvature,
number_parameters,
parameter_type,
valid_state_generated_flag,
exit_status, OPTIONS);
if ((*valid_state_generated_flag == FALSE)
|| ((current_generated_state->cost) !=
(current_generated_state->cost))
|| current_generated_state->cost < -MAX_DOUBLE
|| current_generated_state->cost > MAX_DOUBLE) {
*exit_status = INVALID_COST_FUNCTION_DERIV;
return;
}
if (*valid_state_generated_flag == FALSE)
++(*number_invalid_generated_states);
new_cost_state_3 = current_generated_state->cost;
/* restore the v_th parameter */
current_generated_state->parameter[index_v] = parameter_v;
/* calculate and store the curvature */
curvature[index_vv_v] = curvature[index_v_vv] =
(new_cost_state_1 - new_cost_state_2
- new_cost_state_3 + recent_best_cost)
/ (delta_parameter_v * delta_parameter_vv
* parameter_v * parameter_vv + (double) EPS_DOUBLE);
}
}
}
/* restore Immediate_Exit flag */
OPTIONS->Immediate_Exit = immediate_flag;
/* restore the best cost function value */
current_generated_state->cost = recent_best_cost;
#if ASA_PRINT
tmp_saved = *number_invalid_generated_states - saved_num_invalid_gen_states;
if (tmp_saved > 0)
#if INT_LONG
fprintf (ptr_asa_out,
"Generated %ld invalid states when calculating the derivatives\n",
tmp_saved);
#else
fprintf (ptr_asa_out,
"Generated %d invalid states when calculating the derivatives\n",
tmp_saved);
#endif
#endif /* ASA_PRINT */
*number_invalid_generated_states = saved_num_invalid_gen_states;
#if USER_ACCEPTANCE_TEST
OPTIONS->User_Acceptance_Flag = TRUE;
OPTIONS->Cost_Acceptance_Flag = FALSE;
#endif
}
/***********************************************************************
* asa_test_asa_options
* Tests user's selected options
***********************************************************************/
#if HAVE_ANSI
int
asa_test_asa_options (LONG_INT * seed,
double *parameter_initial_final,
double *parameter_minimum,
double *parameter_maximum,
double *tangents,
double *curvature,
ALLOC_INT * number_parameters,
int *parameter_type,
int *valid_state_generated_flag,
int *exit_status,
FILE * ptr_asa_out, USER_DEFINES * OPTIONS)
#else
int
asa_test_asa_options (seed,
parameter_initial_final,
parameter_minimum,
parameter_maximum,
tangents,
curvature,
number_parameters,
parameter_type,
valid_state_generated_flag,
exit_status, ptr_asa_out, OPTIONS)
LONG_INT *seed;
double *parameter_initial_final;
double *parameter_minimum;
double *parameter_maximum;
double *tangents;
double *curvature;
ALLOC_INT *number_parameters;
int *parameter_type;
int *valid_state_generated_flag;
int *exit_status;
FILE *ptr_asa_out;
USER_DEFINES *OPTIONS;
#endif /* HAVE_ANSI */
{
int invalid, index_v;
invalid = 0;
if (seed == NULL) {
strcpy (exit_msg, "*** seed == NULL ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (parameter_initial_final == NULL) {
strcpy (exit_msg, "*** parameter_initial_final == NULL ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (parameter_minimum == NULL) {
strcpy (exit_msg, "*** parameter_minimum == NULL ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (parameter_maximum == NULL) {
strcpy (exit_msg, "*** parameter_maximum == NULL ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (tangents == NULL) {
strcpy (exit_msg, "*** tangents == NULL ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if ((OPTIONS->Curvature_0 == FALSE) || (OPTIONS->Curvature_0 == -1)) {
if (curvature == NULL) {
strcpy (exit_msg, "*** curvature == NULL ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
}
if (number_parameters == NULL) {
strcpy (exit_msg, "*** number_parameters == NULL ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (parameter_type == NULL) {
strcpy (exit_msg, "*** parameter_type == NULL ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (valid_state_generated_flag == NULL) {
strcpy (exit_msg, "*** valid_state_generated_flag == NULL ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (exit_status == NULL) {
strcpy (exit_msg, "*** exit_status == NULL ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (OPTIONS == NULL) {
strcpy (exit_msg, "*** OPTIONS == NULL ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
VFOR (index_v) if (parameter_minimum[index_v] > parameter_maximum[index_v]) {
strcpy (exit_msg, "*** parameter_minimum[] > parameter_maximum[] ***");
print_string_index (ptr_asa_out, exit_msg, index_v);
++invalid;
}
VFOR (index_v)
if (parameter_initial_final[index_v] < parameter_minimum[index_v]) {
if (PARAMETER_RANGE_TOO_SMALL (index_v))
continue;
strcpy (exit_msg, "*** parameter_initial[] < parameter_minimum[] ***");
print_string_index (ptr_asa_out, exit_msg, index_v);
++invalid;
}
VFOR (index_v)
if (parameter_initial_final[index_v] > parameter_maximum[index_v]) {
if (PARAMETER_RANGE_TOO_SMALL (index_v))
continue;
strcpy (exit_msg, "*** parameter_initial[] > parameter_maximum[] ***");
print_string_index (ptr_asa_out, exit_msg, index_v);
++invalid;
}
if (*number_parameters < 1) {
strcpy (exit_msg, "*** *number_parameters < 1 ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
VFOR (index_v)
if (parameter_type[index_v] != -2 && parameter_type[index_v] != 2
&& parameter_type[index_v] != -1 && parameter_type[index_v] != 1) {
strcpy (exit_msg,
"*** parameter_type[] != -2 && parameter_type[] != 2 && parameter_type[] != -1 && parameter_type[] != 1 ***");
print_string_index (ptr_asa_out, exit_msg, index_v);
++invalid;
}
if (OPTIONS_FILE != FALSE && OPTIONS_FILE != TRUE) {
strcpy (exit_msg,
"*** OPTIONS_FILE != FALSE && OPTIONS_FILE != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (OPTIONS_FILE_DATA != FALSE && OPTIONS_FILE_DATA != TRUE) {
strcpy (exit_msg,
"*** OPTIONS_FILE_DATA != FALSE && OPTIONS_FILE_DATA != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (RECUR_OPTIONS_FILE != FALSE && RECUR_OPTIONS_FILE != TRUE) {
strcpy (exit_msg,
"*** RECUR_OPTIONS_FILE != FALSE && RECUR_OPTIONS_FILE != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (RECUR_OPTIONS_FILE_DATA != FALSE && RECUR_OPTIONS_FILE_DATA != TRUE) {
strcpy (exit_msg,
"*** RECUR_OPTIONS_FILE_DATA != FALSE && RECUR_OPTIONS_FILE_DATA != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (COST_FILE != FALSE && COST_FILE != TRUE) {
strcpy (exit_msg, "*** COST_FILE != FALSE && COST_FILE != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (ASA_LIB != FALSE && ASA_LIB != TRUE) {
strcpy (exit_msg, "*** ASA_LIB != FALSE && ASA_LIB != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (MY_TEMPLATE != FALSE && MY_TEMPLATE != TRUE) {
strcpy (exit_msg, "*** MY_TEMPLATE != FALSE && MY_TEMPLATE != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (ASA_TEMPLATE_LIB != FALSE && ASA_TEMPLATE_LIB != TRUE) {
strcpy (exit_msg,
"*** ASA_TEMPLATE_LIB != FALSE && ASA_TEMPLATE_LIB != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (HAVE_ANSI != FALSE && HAVE_ANSI != TRUE) {
strcpy (exit_msg, "*** HAVE_ANSI != FALSE && HAVE_ANSI != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (IO_PROTOTYPES != FALSE && IO_PROTOTYPES != TRUE) {
strcpy (exit_msg,
"*** IO_PROTOTYPES != FALSE && IO_PROTOTYPES != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (TIME_CALC != FALSE && TIME_CALC != TRUE) {
strcpy (exit_msg, "*** TIME_CALC != FALSE && TIME_CALC != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (TIME_STD != FALSE && TIME_STD != TRUE) {
strcpy (exit_msg, "*** TIME_STD != FALSE && TIME_STD != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (TIME_GETRUSAGE != FALSE && TIME_GETRUSAGE != TRUE) {
strcpy (exit_msg,
"*** TIME_GETRUSAGE != FALSE && TIME_GETRUSAGE != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (INT_LONG != FALSE && INT_LONG != TRUE) {
strcpy (exit_msg, "*** INT_LONG != FALSE && INT_LONG != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (INT_ALLOC != FALSE && INT_ALLOC != TRUE) {
strcpy (exit_msg, "*** INT_ALLOC != FALSE && INT_ALLOC != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (SMALL_FLOAT < ZERO) {
strcpy (exit_msg, "*** SMALL_FLOAT < ZERO ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (MIN_DOUBLE < ZERO) {
strcpy (exit_msg, "*** MIN_DOUBLE < ZERO ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (MAX_DOUBLE < ZERO) {
strcpy (exit_msg, "*** MAX_DOUBLE < ZERO ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (EPS_DOUBLE < ZERO) {
strcpy (exit_msg, "*** EPS_DOUBLE < ZERO ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (CHECK_EXPONENT != FALSE && CHECK_EXPONENT != TRUE) {
strcpy (exit_msg,
"*** CHECK_EXPONENT != FALSE && CHECK_EXPONENT != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (NO_PARAM_TEMP_TEST != FALSE && NO_PARAM_TEMP_TEST != TRUE) {
strcpy (exit_msg,
"*** NO_PARAM_TEMP_TEST != FALSE && NO_PARAM_TEMP_TEST != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (NO_COST_TEMP_TEST != FALSE && NO_COST_TEMP_TEST != TRUE) {
strcpy (exit_msg,
"*** NO_COST_TEMP_TEST != FALSE && NO_COST_TEMP_TEST != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (SELF_OPTIMIZE != FALSE && SELF_OPTIMIZE != TRUE) {
strcpy (exit_msg,
"*** SELF_OPTIMIZE != FALSE && SELF_OPTIMIZE != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (ASA_TEST != FALSE && ASA_TEST != TRUE) {
strcpy (exit_msg, "*** ASA_TEST != FALSE && ASA_TEST != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (ASA_TEST_POINT != FALSE && ASA_TEST_POINT != TRUE) {
strcpy (exit_msg,
"*** ASA_TEST_POINT != FALSE && ASA_TEST_POINT != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (ASA_EXIT_ANYTIME != FALSE && ASA_EXIT_ANYTIME != TRUE) {
strcpy (exit_msg,
"*** ASA_EXIT_ANYTIME != FALSE && ASA_EXIT_ANYTIME != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (ASA_TEMPLATE != FALSE) {
strcpy (exit_msg, "*** ASA_TEMPLATE != FALSE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (ASA_TEMPLATE_ASA_OUT_PID != FALSE && ASA_TEMPLATE_ASA_OUT_PID != TRUE) {
strcpy (exit_msg,
"*** ASA_TEMPLATE_ASA_OUT_PID != FALSE && ASA_TEMPLATE_ASA_OUT_PID != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (ASA_TEMPLATE_MULTIPLE != FALSE && ASA_TEMPLATE_MULTIPLE != TRUE) {
strcpy (exit_msg,
"*** ASA_TEMPLATE_MULTIPLE != FALSE && ASA_TEMPLATE_MULTIPLE != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (ASA_TEMPLATE_SELFOPT != FALSE && ASA_TEMPLATE_SELFOPT != TRUE) {
strcpy (exit_msg,
"*** ASA_TEMPLATE_SELFOPT != FALSE && ASA_TEMPLATE_SELFOPT != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (ASA_TEMPLATE_SAMPLE != FALSE && ASA_TEMPLATE_SAMPLE != TRUE) {
strcpy (exit_msg,
"*** ASA_TEMPLATE_SAMPLE != FALSE && ASA_TEMPLATE_SAMPLE != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (ASA_TEMPLATE_QUEUE != FALSE && ASA_TEMPLATE_QUEUE != TRUE) {
strcpy (exit_msg,
"*** ASA_TEMPLATE_QUEUE != FALSE && ASA_TEMPLATE_QUEUE != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (ASA_TEMPLATE_PARALLEL != FALSE && ASA_TEMPLATE_PARALLEL != TRUE) {
strcpy (exit_msg,
"*** ASA_TEMPLATE_PARALLEL != FALSE && ASA_TEMPLATE_PARALLEL != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (ASA_TEMPLATE_SAVE != FALSE && ASA_TEMPLATE_SAVE != TRUE) {
strcpy (exit_msg,
"*** ASA_TEMPLATE_SAVE != FALSE && ASA_TEMPLATE_SAVE != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (USER_INITIAL_COST_TEMP != FALSE && USER_INITIAL_COST_TEMP != TRUE) {
strcpy (exit_msg,
"*** USER_INITIAL_COST_TEMP != FALSE && USER_INITIAL_COST_TEMP != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (RATIO_TEMPERATURE_SCALES != FALSE && RATIO_TEMPERATURE_SCALES != TRUE) {
strcpy (exit_msg,
"*** RATIO_TEMPERATURE_SCALES != FALSE && RATIO_TEMPERATURE_SCALES != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (USER_INITIAL_PARAMETERS_TEMPS != FALSE
&& USER_INITIAL_PARAMETERS_TEMPS != TRUE) {
strcpy (exit_msg,
"*** USER_INITIAL_PARAMETERS_TEMPS != FALSE && USER_INITIAL_PARAMETERS_TEMPS != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (DELTA_PARAMETERS != FALSE && DELTA_PARAMETERS != TRUE) {
strcpy (exit_msg,
"*** DELTA_PARAMETERS != FALSE && DELTA_PARAMETERS != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (QUENCH_PARAMETERS != FALSE && QUENCH_PARAMETERS != TRUE) {
strcpy (exit_msg,
"*** QUENCH_PARAMETERS != FALSE && QUENCH_PARAMETERS != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (QUENCH_COST != FALSE && QUENCH_COST != TRUE) {
strcpy (exit_msg, "*** QUENCH_COST != FALSE && QUENCH_COST != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (QUENCH_PARAMETERS_SCALE != FALSE && QUENCH_PARAMETERS_SCALE != TRUE) {
strcpy (exit_msg,
"*** QUENCH_PARAMETERS_SCALE != FALSE && QUENCH_PARAMETERS_SCALE != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (QUENCH_COST_SCALE != FALSE && QUENCH_COST_SCALE != TRUE) {
strcpy (exit_msg,
"*** QUENCH_COST_SCALE != FALSE && QUENCH_COST_SCALE != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (OPTIONAL_DATA_DBL != FALSE && OPTIONAL_DATA_DBL != TRUE) {
strcpy (exit_msg,
"*** OPTIONAL_DATA_DBL != FALSE && OPTIONAL_DATA_DBL != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (OPTIONAL_DATA_INT != FALSE && OPTIONAL_DATA_INT != TRUE) {
strcpy (exit_msg,
"*** OPTIONAL_DATA_INT != FALSE && OPTIONAL_DATA_INT != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (OPTIONAL_DATA_PTR != FALSE && OPTIONAL_DATA_PTR != TRUE) {
strcpy (exit_msg,
"*** OPTIONAL_DATA_PTR != FALSE && OPTIONAL_DATA_PTR != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (USER_COST_SCHEDULE != FALSE && USER_COST_SCHEDULE != TRUE) {
strcpy (exit_msg,
"*** USER_COST_SCHEDULE != FALSE && USER_COST_SCHEDULE != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (USER_ACCEPT_ASYMP_EXP != FALSE && USER_ACCEPT_ASYMP_EXP != TRUE) {
strcpy (exit_msg,
"*** USER_ACCEPT_ASYMP_EXP != FALSE && USER_ACCEPT_ASYMP_EXP != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (USER_ACCEPT_THRESHOLD != FALSE && USER_ACCEPT_THRESHOLD != TRUE) {
strcpy (exit_msg,
"*** USER_ACCEPT_THRESHOLD != FALSE && USER_ACCEPT_THRESHOLD != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (USER_ACCEPTANCE_TEST != FALSE && USER_ACCEPTANCE_TEST != TRUE) {
strcpy (exit_msg,
"*** USER_ACCEPTANCE_TEST != FALSE && USER_ACCEPTANCE_TEST != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (USER_GENERATING_FUNCTION != FALSE && USER_GENERATING_FUNCTION != TRUE) {
strcpy (exit_msg,
"*** USER_GENERATING_FUNCTION != FALSE && USER_GENERATING_FUNCTION != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (USER_REANNEAL_COST != FALSE && USER_REANNEAL_COST != TRUE) {
strcpy (exit_msg,
"*** USER_REANNEAL_COST != FALSE && USER_REANNEAL_COST != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (USER_REANNEAL_PARAMETERS != FALSE && USER_REANNEAL_PARAMETERS != TRUE) {
strcpy (exit_msg,
"*** USER_REANNEAL_PARAMETERS != FALSE && USER_REANNEAL_PARAMETERS != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (MAXIMUM_REANNEAL_INDEX < 1) {
strcpy (exit_msg, "*** MAXIMUM_REANNEAL_INDEX < 1 ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (REANNEAL_SCALE < ZERO) {
strcpy (exit_msg, "*** REANNEAL_SCALE < ZERO ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (ASA_SAMPLE != FALSE && ASA_SAMPLE != TRUE) {
strcpy (exit_msg, "*** ASA_SAMPLE != FALSE && ASA_SAMPLE != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (ADAPTIVE_OPTIONS != FALSE && ADAPTIVE_OPTIONS != TRUE) {
strcpy (exit_msg,
"*** ADAPTIVE_OPTIONS != FALSE && ADAPTIVE_OPTIONS != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (ASA_QUEUE != FALSE && ASA_QUEUE != TRUE) {
strcpy (exit_msg, "*** ASA_QUEUE != FALSE && ASA_QUEUE != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (ASA_RESOLUTION != FALSE && ASA_RESOLUTION != TRUE) {
strcpy (exit_msg,
"*** ASA_RESOLUTION != FALSE && ASA_RESOLUTION != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (ASA_FUZZY != FALSE && ASA_FUZZY != TRUE) {
strcpy (exit_msg, "*** ASA_FUZZY != FALSE && ASA_FUZZY != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (ASA_FUZZY_PRINT != FALSE && ASA_FUZZY_PRINT != TRUE) {
strcpy (exit_msg,
"*** ASA_FUZZY_PRINT != FALSE && ASA_FUZZY_PRINT != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (FITLOC != FALSE && FITLOC != TRUE) {
strcpy (exit_msg, "*** FITLOC != FALSE && FITLOC != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (FITLOC_ROUND != FALSE && FITLOC_ROUND != TRUE) {
strcpy (exit_msg,
"*** FITLOC_ROUND != FALSE && FITLOC_ROUND != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (FITLOC_PRINT != FALSE && FITLOC_PRINT != TRUE) {
strcpy (exit_msg,
"*** FITLOC_PRINT != FALSE && FITLOC_PRINT != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (MULTI_MIN != FALSE && MULTI_MIN != TRUE) {
strcpy (exit_msg, "*** MULTI_MIN != FALSE && MULTI_MIN != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
#if MULTI_MIN
if (OPTIONS->Multi_Number <= 0) {
strcpy (exit_msg, "*** OPTIONS->Multi_Number <= 0 ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
VFOR (index_v) {
if (((OPTIONS->Multi_Grid[index_v]) != (OPTIONS->Multi_Grid[index_v]))
|| OPTIONS->Multi_Grid[index_v] < 0) {
strcpy (exit_msg,
"*** (OPTIONS->Multi_Grid[]) != (OPTIONS->Multi_Grid[]) || OPTIONS->Multi_Grid[] < 0 ***");
print_string_index (ptr_asa_out, exit_msg, index_v);
++invalid;
}
}
if (OPTIONS->Multi_Specify != 0 && OPTIONS->Multi_Specify != 1) {
strcpy (exit_msg,
"*** OPTIONS->Multi_Specify != 0 && OPTIONS->Multi_Specify != 1 ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
#endif
if (ASA_PARALLEL != FALSE && ASA_PARALLEL != TRUE) {
strcpy (exit_msg,
"*** ASA_PARALLEL != FALSE && ASA_PARALLEL != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (ASA_SAVE != FALSE && ASA_SAVE != TRUE) {
strcpy (exit_msg, "*** ASA_SAVE != FALSE && ASA_SAVE != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (ASA_SAVE_OPT != FALSE && ASA_SAVE_OPT != TRUE) {
strcpy (exit_msg,
"*** ASA_SAVE_OPT != FALSE && ASA_SAVE_OPT != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (ASA_SAVE_BACKUP != FALSE && ASA_SAVE_BACKUP != TRUE) {
strcpy (exit_msg,
"*** ASA_SAVE_BACKUP != FALSE && ASA_SAVE_BACKUP != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (ASA_PIPE != FALSE && ASA_PIPE != TRUE) {
strcpy (exit_msg, "*** ASA_PIPE != FALSE && ASA_PIPE != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (ASA_PIPE_FILE != FALSE && ASA_PIPE_FILE != TRUE) {
strcpy (exit_msg,
"*** ASA_PIPE_FILE != FALSE && ASA_PIPE_FILE != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (SYSTEM_CALL != FALSE && SYSTEM_CALL != TRUE) {
strcpy (exit_msg, "*** SYSTEM_CALL != FALSE && SYSTEM_CALL != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (FDLIBM_POW != FALSE && FDLIBM_POW != TRUE) {
strcpy (exit_msg, "*** FDLIBM_POW != FALSE && FDLIBM_POW != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (FDLIBM_LOG != FALSE && FDLIBM_LOG != TRUE) {
strcpy (exit_msg, "*** FDLIBM_LOG != FALSE && FDLIBM_LOG != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (FDLIBM_EXP != FALSE && FDLIBM_EXP != TRUE) {
strcpy (exit_msg, "*** FDLIBM_EXP != FALSE && FDLIBM_EXP != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (ASA_PRINT != FALSE && ASA_PRINT != TRUE) {
strcpy (exit_msg, "*** ASA_PRINT != FALSE && ASA_PRINT != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (USER_ASA_OUT != FALSE && USER_ASA_OUT != TRUE) {
strcpy (exit_msg,
"*** USER_ASA_OUT != FALSE && USER_ASA_OUT != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (USER_ASA_USR_OUT != FALSE && USER_ASA_USR_OUT != TRUE) {
strcpy (exit_msg,
"*** USER_ASA_USR_OUT != FALSE && USER_ASA_USR_OUT != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (ASA_PRINT_INTERMED != FALSE && ASA_PRINT_INTERMED != TRUE) {
strcpy (exit_msg,
"*** ASA_PRINT_INTERMED != FALSE && ASA_PRINT_INTERMED != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (ASA_PRINT_MORE != FALSE && ASA_PRINT_MORE != TRUE) {
strcpy (exit_msg,
"*** ASA_PRINT_MORE != FALSE && ASA_PRINT_MORE != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (G_FIELD < 0) {
strcpy (exit_msg, "*** G_FIELD < 0 ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (G_PRECISION < 0) {
strcpy (exit_msg, "*** G_PRECISION < 0 ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (OPTIONS->Limit_Acceptances < 0) {
strcpy (exit_msg, "*** Limit_Acceptances < 0 ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (OPTIONS->Limit_Generated < 0) {
strcpy (exit_msg, "*** Limit_Generated < 0 ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (OPTIONS->Limit_Invalid_Generated_States < 0) {
strcpy (exit_msg, "*** Limit_Invalid_Generated_States < 0 ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (OPTIONS->Accepted_To_Generated_Ratio <= ZERO) {
strcpy (exit_msg, "*** Accepted_To_Generated_Ratio <= ZERO ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (OPTIONS->Cost_Precision <= ZERO) {
strcpy (exit_msg, "*** Cost_Precision <= ZERO ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (OPTIONS->Maximum_Cost_Repeat < 0) {
strcpy (exit_msg, "*** Maximum_Cost_Repeat < 0 ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (OPTIONS->Number_Cost_Samples == 0 || OPTIONS->Number_Cost_Samples == -1) {
strcpy (exit_msg,
"*** Number_Cost_Samples == 0 || Number_Cost_Samples == -1 ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (OPTIONS->Temperature_Ratio_Scale <= ZERO) {
strcpy (exit_msg, "*** Temperature_Ratio_Scale <= ZERO ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (OPTIONS->Cost_Parameter_Scale_Ratio <= ZERO) {
strcpy (exit_msg, "*** Cost_Parameter_Scale_Ratio <= ZERO ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (OPTIONS->Temperature_Anneal_Scale <= ZERO) {
strcpy (exit_msg, "*** Temperature_Anneal_Scale <= ZERO ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
#if USER_INITIAL_COST_TEMP
if (OPTIONS->User_Cost_Temperature[0] <= ZERO) {
strcpy (exit_msg, "*** User_Cost_Temperature[0] <= ZERO ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
#endif
if (OPTIONS->Include_Integer_Parameters != FALSE
&& OPTIONS->Include_Integer_Parameters != TRUE) {
strcpy (exit_msg, "");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (OPTIONS->User_Initial_Parameters != FALSE
&& OPTIONS->User_Initial_Parameters != TRUE) {
strcpy (exit_msg,
"*** User_Initial_Parameters != FALSE && User_Initial_Parameters != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (OPTIONS->Sequential_Parameters >= *number_parameters) {
strcpy (exit_msg, "*** Sequential_Parameters >= *number_parameters ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (OPTIONS->Initial_Parameter_Temperature <= ZERO) {
strcpy (exit_msg, "*** Initial_Parameter_Temperature <= ZERO ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
#if RATIO_TEMPERATURE_SCALES
VFOR (index_v) if (OPTIONS->User_Temperature_Ratio[index_v] <= ZERO) {
strcpy (exit_msg, "*** User_Temperature_Ratio[] <= ZERO ***");
print_string_index (ptr_asa_out, exit_msg, index_v);
++invalid;
}
#endif
#if USER_INITIAL_PARAMETERS_TEMPS
VFOR (index_v) if (OPTIONS->User_Parameter_Temperature[index_v] <= ZERO) {
strcpy (exit_msg, "*** User_Parameter_Temperature[] <= ZERO ***");
print_string_index (ptr_asa_out, exit_msg, index_v);
++invalid;
}
#endif
if (OPTIONS->Acceptance_Frequency_Modulus < 0) {
strcpy (exit_msg, "*** Acceptance_Frequency_Modulus < 0 ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (OPTIONS->Generated_Frequency_Modulus < 0) {
strcpy (exit_msg, "*** Generated_Frequency_Modulus < 0 ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (OPTIONS->Reanneal_Cost == -1) {
strcpy (exit_msg, "*** Reanneal_Cost == -1 ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (OPTIONS->Reanneal_Parameters != FALSE
&& OPTIONS->Reanneal_Parameters != TRUE) {
strcpy (exit_msg,
"*** Reanneal_Parameters != FALSE && Reanneal_Parameters != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (OPTIONS->Delta_X < ZERO) {
strcpy (exit_msg, "*** Delta_X < ZERO ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
#if DELTA_PARAMETERS
VFOR (index_v) if (OPTIONS->User_Delta_Parameter[index_v] < ZERO) {
strcpy (exit_msg, "*** User_Delta_Parameter[] < ZERO ***");
print_string_index (ptr_asa_out, exit_msg, index_v);
++invalid;
}
#endif
if (OPTIONS->User_Tangents != FALSE && OPTIONS->User_Tangents != TRUE) {
strcpy (exit_msg,
"*** User_Tangents != FALSE && User_Tangents != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (OPTIONS->Curvature_0 != -1 && OPTIONS->Curvature_0 != FALSE
&& OPTIONS->Curvature_0 != TRUE) {
strcpy (exit_msg,
"*** Curvature_0 -1 && Curvature_0 != FALSE && Curvature_0 != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
#if QUENCH_PARAMETERS
VFOR (index_v) if (OPTIONS->User_Quench_Param_Scale[index_v] <= ZERO) {
strcpy (exit_msg, "*** User_Quench_Param_Scale[] <= ZERO ***");
print_string_index (ptr_asa_out, exit_msg, index_v);
++invalid;
}
#endif
#if QUENCH_COST
if (OPTIONS->User_Quench_Cost_Scale[0] <= ZERO) {
strcpy (exit_msg, "*** User_Quench_Cost_Scale[0] <= ZERO ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
#endif
#if OPTIONAL_DATA_DBL
if (OPTIONS->Asa_Data_Dim_Dbl < 1) {
strcpy (exit_msg, "*** Asa_Data_Dim_Dbl < 1 ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (OPTIONS->Asa_Data_Dbl == NULL) {
strcpy (exit_msg, "*** Asa_Data_Dbl == NULL ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
#endif
#if ASA_SAVE
if (OPTIONS->Random_Array_Dim < 1) {
strcpy (exit_msg, "*** Random_Array_Dim < 1 ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (OPTIONS->Random_Array == NULL) {
strcpy (exit_msg, "*** Random_Array == NULL ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
#endif
#if OPTIONAL_DATA_INT
if (OPTIONS->Asa_Data_Dim_Int < 1) {
strcpy (exit_msg, "*** Asa_Data_Dim_Int < 1 ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (OPTIONS->Asa_Data_Int == NULL) {
strcpy (exit_msg, "*** Asa_Data_Int == NULL ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
#endif
#if OPTIONAL_DATA_PTR
if (OPTIONS->Asa_Data_Dim_Ptr < 1) {
strcpy (exit_msg, "*** Asa_Data_Dim_Ptr < 1 ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (OPTIONS->Asa_Data_Ptr == NULL) {
strcpy (exit_msg, "*** Asa_Data_Ptr == NULL ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
#endif
#if USER_ASA_OUT
if (OPTIONS->Asa_Out_File == NULL) {
strcpy (exit_msg, "*** Asa_Out_File == NULL ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
#endif
#if USER_COST_SCHEDULE
if (OPTIONS->Cost_Schedule == NULL) {
strcpy (exit_msg, "*** Cost_Schedule == NULL ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
#endif
#if USER_ACCEPTANCE_TEST
if (OPTIONS->Acceptance_Test == NULL) {
strcpy (exit_msg, "*** Acceptance_Test == NULL ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (OPTIONS->User_Acceptance_Flag != FALSE
&& OPTIONS->User_Acceptance_Flag != TRUE) {
strcpy (exit_msg,
"*** User_Acceptance_Flag != FALSE && User_Acceptance_Flag != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (OPTIONS->Cost_Acceptance_Flag != FALSE
&& OPTIONS->Cost_Acceptance_Flag != TRUE) {
strcpy (exit_msg,
"*** Cost_Acceptance_Flag != FALSE && Cost_Acceptance_Flag != TRUE ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
#endif
#if USER_GENERATING_FUNCTION
if (OPTIONS->Generating_Distrib == NULL) {
strcpy (exit_msg, "*** Generating_Distrib == NULL ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
#endif
#if USER_REANNEAL_COST
if (OPTIONS->Reanneal_Cost_Function == NULL) {
strcpy (exit_msg, "*** Reanneal_Cost_Function == NULL ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
#endif
#if USER_REANNEAL_PARAMETERS
if (OPTIONS->Reanneal_Params_Function == NULL) {
strcpy (exit_msg, "*** Reanneal_Params_Function == NULL ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
#endif
#if ASA_SAMPLE
if (OPTIONS->Bias_Generated == NULL) {
strcpy (exit_msg, "*** Bias_Generated == NULL ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (OPTIONS->Limit_Weights < ZERO) {
strcpy (exit_msg, "*** Limit_Weights < ZERO ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
#endif
#if ASA_QUEUE
if (OPTIONS->Queue_Size < 0) {
strcpy (exit_msg, "*** Queue_Size < 0 ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (OPTIONS->Queue_Size > 0) {
if (OPTIONS->Queue_Resolution == NULL) {
strcpy (exit_msg, "*** Queue_Resolution == NULL ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
}
#endif /* ASA_QUEUE */
#if ASA_RESOLUTION
if (OPTIONS->Coarse_Resolution == NULL) {
strcpy (exit_msg, "*** Coarse_Resolution == NULL ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
#endif
#if ASA_PARALLEL
if (OPTIONS->Gener_Block < 1) {
strcpy (exit_msg, "*** Gener_Block < 1 ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (OPTIONS->Gener_Block_Max < 1) {
strcpy (exit_msg, "*** Gener_Block_Max < 1 ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
if (OPTIONS->Gener_Mov_Avr < 1) {
strcpy (exit_msg, "*** Gener_Mov_Avr < 1 ***");
print_string (ptr_asa_out, exit_msg);
++invalid;
}
#endif /* ASA_PARALLEL */
return (invalid);
}
/***********************************************************************
* cost_function_test
* Tests user's returned cost function values and parameters
***********************************************************************/
#if HAVE_ANSI
int
cost_function_test (double cost,
double *parameter,
double *parameter_minimum,
double *parameter_maximum,
ALLOC_INT * number_parameters, double *xnumber_parameters)
#else
int
cost_function_test (cost,
parameter,
parameter_minimum, parameter_maximum,
number_parameters, xnumber_parameters)
double cost;
double *parameter;
double *parameter_minimum;
double *parameter_maximum;
ALLOC_INT *number_parameters;
double *xnumber_parameters;
#endif /* HAVE_ANSI */
{
ALLOC_INT index_v;
int test_flag;
test_flag = 1;
if (((cost) != (cost)) || (cost < -MAX_DOUBLE || cost > MAX_DOUBLE))
test_flag = 0;
*xnumber_parameters = (double) *number_parameters;
VFOR (index_v) {
if (PARAMETER_RANGE_TOO_SMALL (index_v)) {
*xnumber_parameters -= 1.0;
continue;
}
if (parameter[index_v] < parameter_minimum[index_v] ||
parameter[index_v] > parameter_maximum[index_v]) {
test_flag = 0;
}
}
return (test_flag);
}
/***********************************************************************
* print_string
* This prints the designated string
***********************************************************************/
#if HAVE_ANSI
void
print_string (FILE * ptr_asa_out, char *string)
#else
void
print_string (ptr_asa_out, string)
FILE *ptr_asa_out;
char *string;
#endif /* HAVE_ANSI */
{
#if INCL_STDOUT
printf ("\n\n%s\n\n", string);
fflush (stdout);
#endif /* INCL_STDOUT */
#if ASA_PRINT
fprintf (ptr_asa_out, "\n\n%s\n\n", string);
fflush (ptr_asa_out);
#else
#endif
}
/***********************************************************************
* print_string_index
* This prints the designated string and index
***********************************************************************/
#if HAVE_ANSI
void
print_string_index (FILE * ptr_asa_out, char *string, ALLOC_INT index)
#else
void
print_string_index (ptr_asa_out, string, index)
FILE *ptr_asa_out;
char *string;
ALLOC_INT index;
#endif /* HAVE_ANSI */
{
#if INCL_STDOUT
#if INT_ALLOC
printf ("\n\n%s index = %d\n\n", string, index);
#else /* INT_ALLOC */
#if INT_LONG
printf ("\n\n%s index = %ld\n\n", string, index);
#else /* INT_LONG */
printf ("\n\n%s index = %ld\n\n", string, index);
#endif /* INT_LONG */
#endif /* INT_ALLOC */
fflush (stdout);
#endif /* INCL_STDOUT */
#if ASA_PRINT
#if INT_ALLOC
fprintf (ptr_asa_out, "\n\n%s index = %d\n\n", string, index);
#else /* INT_ALLOC */
#if INT_LONG
fprintf (ptr_asa_out, "\n\n%s index = %ld\n\n", string, index);
#else /* INT_LONG */
fprintf (ptr_asa_out, "\n\n%s index = %d\n\n", string, index);
#endif /* INT_LONG */
#endif /* INT_ALLOC */
fflush (ptr_asa_out);
#else /* ASA_PRINT */
;
#endif /* ASA_PRINT */
}
#if ASA_PRINT
/***********************************************************************
* print_state
* Prints a description of the current state of the system
***********************************************************************/
#if HAVE_ANSI
void
print_state (double *parameter_minimum,
double *parameter_maximum,
double *tangents,
double *curvature,
double *current_cost_temperature,
double *current_user_parameter_temp,
double *accepted_to_generated_ratio,
ALLOC_INT * number_parameters,
int *curvature_flag,
LONG_INT * number_accepted,
LONG_INT * index_cost_acceptances,
LONG_INT * number_generated,
LONG_INT * number_invalid_generated_states,
STATE * last_saved_state,
STATE * best_generated_state,
FILE * ptr_asa_out, USER_DEFINES * OPTIONS)
#else
void
print_state (parameter_minimum,
parameter_maximum,
tangents,
curvature,
current_cost_temperature,
current_user_parameter_temp,
accepted_to_generated_ratio,
number_parameters,
curvature_flag,
number_accepted,
index_cost_acceptances,
number_generated,
number_invalid_generated_states,
last_saved_state, best_generated_state, ptr_asa_out, OPTIONS)
double *parameter_minimum;
double *parameter_maximum;
double *tangents;
double *curvature;
double *current_cost_temperature;
double *current_user_parameter_temp;
double *accepted_to_generated_ratio;
ALLOC_INT *number_parameters;
int *curvature_flag;
LONG_INT *number_accepted;
LONG_INT *index_cost_acceptances;
LONG_INT *number_generated;
LONG_INT *number_invalid_generated_states;
STATE *last_saved_state;
STATE *best_generated_state;
FILE *ptr_asa_out;
USER_DEFINES *OPTIONS;
#endif /* HAVE_ANSI */
{
ALLOC_INT index_v;
ALLOC_INT index_vv, index_v_vv;
fprintf (ptr_asa_out, "\n");
#if TIME_CALC
print_time ("", ptr_asa_out);
#endif
if (OPTIONS->Curvature_0 == TRUE)
*curvature_flag = FALSE;
if (OPTIONS->Curvature_0 == -1)
*curvature_flag = TRUE;
#if INT_LONG
fprintf (ptr_asa_out,
"*index_cost_acceptances = %ld, *current_cost_temperature = %*.*g\n",
*index_cost_acceptances,
G_FIELD, G_PRECISION, *current_cost_temperature);
fprintf (ptr_asa_out,
"*accepted_to_generated_ratio = %*.*g, *number_invalid... = %ld\n",
G_FIELD, G_PRECISION, *accepted_to_generated_ratio,
(*number_invalid_generated_states));
fprintf (ptr_asa_out, "*number_generated = %ld, *number_accepted = %ld\n",
*number_generated, *number_accepted);
#else
fprintf (ptr_asa_out,
"*index_cost_acceptances = %d, *current_cost_temperature = %*.*g\n",
*index_cost_acceptances,
G_FIELD, G_PRECISION, *current_cost_temperature);
fprintf (ptr_asa_out,
"*accepted_to_generated_ratio = %*.*g, *number_invalid... = %d\n",
G_FIELD, G_PRECISION, *accepted_to_generated_ratio,
*number_invalid_generated_states);
fprintf (ptr_asa_out, "*number_generated = %d, *number_accepted = %d\n",
*number_generated, *number_accepted);
#endif
fprintf (ptr_asa_out, "best...->cost = %*.*g, last...->cost = %*.*g\n",
G_FIELD, G_PRECISION, best_generated_state->cost, G_FIELD,
G_PRECISION, last_saved_state->cost);
/* Note that tangents will not be calculated until reanneal
is called, and therefore their listing in the printout only
is relevant then */
fprintf (ptr_asa_out,
"index_v best...->parameter current_parameter_temp\ttangent\n");
VFOR (index_v) {
/* ignore too small ranges */
#if DROPPED_PARAMETERS
if (PARAMETER_RANGE_TOO_SMALL (index_v))
continue;
#endif
fprintf (ptr_asa_out,
#if INT_ALLOC
"%d\t%*.*g\t\t%*.*g\t%*.*g\n",
#else
#if INT_LONG
"%ld\t%*.*g\t\t%*.*g\t%*.*g\n",
#else
"%d\t%*.*g\t\t%*.*g\t%*.*g\n",
#endif
#endif
index_v,
G_FIELD, G_PRECISION, best_generated_state->parameter[index_v],
G_FIELD, G_PRECISION, current_user_parameter_temp[index_v],
G_FIELD, G_PRECISION, tangents[index_v]);
}
if (*curvature_flag == TRUE) {
/* print curvatures */
VFOR (index_v) {
/* ignore too small ranges */
if (PARAMETER_RANGE_TOO_SMALL (index_v))
continue;
fprintf (ptr_asa_out, "\n");
VFOR (index_vv) {
/* only print upper diagonal of matrix */
if (index_v < index_vv)
continue;
/* ignore too small ranges (index_vv) */
if (PARAMETER_RANGE_TOO_SMALL (index_vv))
continue;
/* index_v_vv: row index_v, column index_vv */
index_v_vv = ROW_COL_INDEX (index_v, index_vv);
if (index_v == index_vv) {
fprintf (ptr_asa_out,
#if INT_ALLOC
"curvature[%d][%d] = %*.*g\n",
#else
#if INT_LONG
"curvature[%ld][%ld] = %*.*g\n",
#else
"curvature[%d][%d] = %*.*g\n",
#endif
#endif
index_v, index_vv,
G_FIELD, G_PRECISION, curvature[index_v_vv]);
} else {
fprintf (ptr_asa_out,
#if INT_ALLOC
"curvature[%d][%d] = %*.*g \t = curvature[%d][%d]\n",
#else
#if INT_LONG
"curvature[%ld][%ld] = %*.*g \t = curvature[%ld][%ld]\n",
#else
"curvature[%d][%d] = %*.*g \t = curvature[%d][%d]\n",
#endif
#endif
index_v, index_vv,
G_FIELD, G_PRECISION, curvature[index_v_vv],
index_vv, index_v);
}
}
}
}
fprintf (ptr_asa_out, "\n");
fflush (ptr_asa_out);
}
/***********************************************************************
* print_asa_options
* Prints user's selected options
***********************************************************************/
#if HAVE_ANSI
void
print_asa_options (FILE * ptr_asa_out, USER_DEFINES * OPTIONS)
#else
void
print_asa_options (ptr_asa_out, OPTIONS)
FILE *ptr_asa_out;
USER_DEFINES *OPTIONS;
#endif /* HAVE_ANSI */
{
fprintf (ptr_asa_out, "\t\tADAPTIVE SIMULATED ANNEALING\n\n");
fprintf (ptr_asa_out, "%s\n\n", ASA_ID);
fprintf (ptr_asa_out, "OPTIONS_FILE = %d\n", (int) OPTIONS_FILE);
fprintf (ptr_asa_out, "OPTIONS_FILE_DATA = %d\n", (int) OPTIONS_FILE_DATA);
fprintf (ptr_asa_out, "RECUR_OPTIONS_FILE = %d\n",
(int) RECUR_OPTIONS_FILE);
fprintf (ptr_asa_out, "RECUR_OPTIONS_FILE_DATA = %d\n",
(int) RECUR_OPTIONS_FILE_DATA);
fprintf (ptr_asa_out, "COST_FILE = %d\n", (int) COST_FILE);
fprintf (ptr_asa_out, "ASA_LIB = %d\n", (int) ASA_LIB);
fprintf (ptr_asa_out, "HAVE_ANSI = %d\n", (int) HAVE_ANSI);
fprintf (ptr_asa_out, "IO_PROTOTYPES = %d\n", (int) IO_PROTOTYPES);
fprintf (ptr_asa_out, "TIME_CALC = %d\n", (int) TIME_CALC);
fprintf (ptr_asa_out, "TIME_STD = %d\n", (int) TIME_STD);
fprintf (ptr_asa_out, "TIME_GETRUSAGE = %d\n", (int) TIME_GETRUSAGE);
fprintf (ptr_asa_out, "INT_LONG = %d\n", (int) INT_LONG);
fprintf (ptr_asa_out, "INT_ALLOC = %d\n", (int) INT_ALLOC);
fprintf (ptr_asa_out, "SMALL_FLOAT = %*.*g\n",
G_FIELD, G_PRECISION, (double) SMALL_FLOAT);
fprintf (ptr_asa_out, "MIN_DOUBLE = %*.*g\n",
G_FIELD, G_PRECISION, (double) MIN_DOUBLE);
fprintf (ptr_asa_out, "MAX_DOUBLE = %*.*g\n",
G_FIELD, G_PRECISION, (double) MAX_DOUBLE);
fprintf (ptr_asa_out, "EPS_DOUBLE = %*.*g\n",
G_FIELD, G_PRECISION, (double) EPS_DOUBLE);
fprintf (ptr_asa_out, "CHECK_EXPONENT = %d\n", (int) CHECK_EXPONENT);
fprintf (ptr_asa_out, "NO_PARAM_TEMP_TEST = %d\n",
(int) NO_PARAM_TEMP_TEST);
fprintf (ptr_asa_out, "NO_COST_TEMP_TEST = %d\n", (int) NO_COST_TEMP_TEST);
fprintf (ptr_asa_out, "SELF_OPTIMIZE = %d\n", (int) SELF_OPTIMIZE);
fprintf (ptr_asa_out, "ASA_TEST = %d\n", (int) ASA_TEST);
fprintf (ptr_asa_out, "ASA_TEST_POINT = %d\n", (int) ASA_TEST_POINT);
fprintf (ptr_asa_out, "ASA_EXIT_ANYTIME = %d\n", (int) ASA_EXIT_ANYTIME);
fprintf (ptr_asa_out, "ASA_TEMPLATE = %d\n", (int) ASA_TEMPLATE);
fprintf (ptr_asa_out, "MY_TEMPLATE = %d\n", (int) MY_TEMPLATE);
fprintf (ptr_asa_out, "ASA_TEMPLATE_LIB = %d\n", (int) ASA_TEMPLATE_LIB);
fprintf (ptr_asa_out, "ASA_TEMPLATE_ASA_OUT_PID = %d\n",
(int) ASA_TEMPLATE_ASA_OUT_PID);
fprintf (ptr_asa_out, "ASA_TEMPLATE_MULTIPLE = %d\n",
(int) ASA_TEMPLATE_MULTIPLE);
fprintf (ptr_asa_out, "ASA_TEMPLATE_SELFOPT = %d\n",
(int) ASA_TEMPLATE_SELFOPT);
fprintf (ptr_asa_out, "ASA_TEMPLATE_SAMPLE = %d\n",
(int) ASA_TEMPLATE_SAMPLE);
fprintf (ptr_asa_out, "ASA_TEMPLATE_QUEUE = %d\n",
(int) ASA_TEMPLATE_QUEUE);
fprintf (ptr_asa_out, "ASA_TEMPLATE_PARALLEL = %d\n",
(int) ASA_TEMPLATE_PARALLEL);
fprintf (ptr_asa_out, "ASA_TEMPLATE_SAVE = %d\n", (int) ASA_TEMPLATE_SAVE);
fprintf (ptr_asa_out, "USER_INITIAL_COST_TEMP = %d\n",
(int) USER_INITIAL_COST_TEMP);
fprintf (ptr_asa_out, "RATIO_TEMPERATURE_SCALES = %d\n",
(int) RATIO_TEMPERATURE_SCALES);
fprintf (ptr_asa_out, "USER_INITIAL_PARAMETERS_TEMPS = %d\n",
(int) USER_INITIAL_PARAMETERS_TEMPS);
fprintf (ptr_asa_out, "DELTA_PARAMETERS = %d\n", (int) DELTA_PARAMETERS);
fprintf (ptr_asa_out, "QUENCH_PARAMETERS = %d\n", (int) QUENCH_PARAMETERS);
fprintf (ptr_asa_out, "QUENCH_COST = %d\n", (int) QUENCH_COST);
fprintf (ptr_asa_out, "QUENCH_PARAMETERS_SCALE = %d\n",
(int) QUENCH_PARAMETERS_SCALE);
fprintf (ptr_asa_out, "QUENCH_COST_SCALE = %d\n", (int) QUENCH_COST_SCALE);
fprintf (ptr_asa_out, "OPTIONAL_DATA_DBL = %d\n", (int) OPTIONAL_DATA_DBL);
fprintf (ptr_asa_out, "OPTIONAL_DATA_INT = %d\n", (int) OPTIONAL_DATA_INT);
fprintf (ptr_asa_out, "OPTIONAL_DATA_PTR = %d\n", (int) OPTIONAL_DATA_PTR);
fprintf (ptr_asa_out, "USER_COST_SCHEDULE = %d\n",
(int) USER_COST_SCHEDULE);
fprintf (ptr_asa_out, "USER_ACCEPT_ASYMP_EXP = %d\n",
(int) USER_ACCEPT_ASYMP_EXP);
fprintf (ptr_asa_out, "USER_ACCEPT_THRESHOLD = %d\n",
(int) USER_ACCEPT_THRESHOLD);
fprintf (ptr_asa_out, "USER_ACCEPTANCE_TEST = %d\n",
(int) USER_ACCEPTANCE_TEST);
fprintf (ptr_asa_out, "USER_GENERATING_FUNCTION = %d\n",
(int) USER_GENERATING_FUNCTION);
fprintf (ptr_asa_out, "USER_REANNEAL_COST = %d\n",
(int) USER_REANNEAL_COST);
fprintf (ptr_asa_out, "USER_REANNEAL_PARAMETERS = %d\n",
(int) USER_REANNEAL_PARAMETERS);
#if INT_LONG
fprintf (ptr_asa_out, "MAXIMUM_REANNEAL_INDEX = %ld\n",
(LONG_INT) MAXIMUM_REANNEAL_INDEX);
#else
fprintf (ptr_asa_out, "MAXIMUM_REANNEAL_INDEX = %d\n",
(LONG_INT) MAXIMUM_REANNEAL_INDEX);
#endif
fprintf (ptr_asa_out, "REANNEAL_SCALE = %*.*g\n",
G_FIELD, G_PRECISION, (double) REANNEAL_SCALE);
fprintf (ptr_asa_out, "ASA_SAMPLE = %d\n", (int) ASA_SAMPLE);
fprintf (ptr_asa_out, "ADAPTIVE_OPTIONS = %d\n", (int) ADAPTIVE_OPTIONS);
fprintf (ptr_asa_out, "ASA_QUEUE = %d\n", (int) ASA_QUEUE);
fprintf (ptr_asa_out, "ASA_RESOLUTION = %d\n", (int) ASA_RESOLUTION);
fprintf (ptr_asa_out, "ASA_FUZZY = %d\n", (int) ASA_FUZZY);
fprintf (ptr_asa_out, "ASA_FUZZY_PRINT = %d\n", (int) ASA_FUZZY_PRINT);
fprintf (ptr_asa_out, "FITLOC = %d\n", (int) FITLOC);
fprintf (ptr_asa_out, "FITLOC_ROUND = %d\n", (int) FITLOC_ROUND);
fprintf (ptr_asa_out, "FITLOC_PRINT = %d\n", (int) FITLOC_PRINT);
fprintf (ptr_asa_out, "MULTI_MIN = %d\n", (int) MULTI_MIN);
fprintf (ptr_asa_out, "ASA_PARALLEL = %d\n", (int) ASA_PARALLEL);
fprintf (ptr_asa_out, "FDLIBM_POW = %d\n", (int) FDLIBM_POW);
fprintf (ptr_asa_out, "FDLIBM_LOG = %d\n", (int) FDLIBM_LOG);
fprintf (ptr_asa_out, "FDLIBM_EXP = %d\n\n", (int) FDLIBM_EXP);
fprintf (ptr_asa_out, "ASA_PRINT = %d\n", (int) ASA_PRINT);
fprintf (ptr_asa_out, "USER_OUT = %s\n", USER_OUT);
#if USER_ASA_OUT
fprintf (ptr_asa_out, "ASA_OUT = %s\n", OPTIONS->Asa_Out_File);
#else
fprintf (ptr_asa_out, "ASA_OUT = %s\n", ASA_OUT);
#endif
fprintf (ptr_asa_out, "USER_ASA_OUT = %d\n", (int) USER_ASA_OUT);
fprintf (ptr_asa_out, "USER_ASA_USR_OUT = %d\n", (int) USER_ASA_USR_OUT);
fprintf (ptr_asa_out, "ASA_PRINT_INTERMED = %d\n",
(int) ASA_PRINT_INTERMED);
fprintf (ptr_asa_out, "ASA_PRINT_MORE = %d\n", (int) ASA_PRINT_MORE);
fprintf (ptr_asa_out, "INCL_STDOUT = %d\n", (int) INCL_STDOUT);
fprintf (ptr_asa_out, "G_FIELD = %d\n", (int) G_FIELD);
fprintf (ptr_asa_out, "G_PRECISION = %d\n", (int) G_PRECISION);
fprintf (ptr_asa_out, "ASA_SAVE = %d\n", (int) ASA_SAVE);
fprintf (ptr_asa_out, "ASA_SAVE_OPT = %d\n", (int) ASA_SAVE_OPT);
fprintf (ptr_asa_out, "ASA_SAVE_BACKUP = %d\n", (int) ASA_SAVE_BACKUP);
fprintf (ptr_asa_out, "ASA_PIPE = %d\n", (int) ASA_PIPE);
fprintf (ptr_asa_out, "ASA_PIPE_FILE = %d\n", (int) ASA_PIPE_FILE);
fprintf (ptr_asa_out, "SYSTEM_CALL = %d\n\n", (int) SYSTEM_CALL);
#if INT_LONG
fprintf (ptr_asa_out, "OPTIONS->Limit_Acceptances = %ld\n",
(LONG_INT) OPTIONS->Limit_Acceptances);
fprintf (ptr_asa_out, "OPTIONS->Limit_Generated = %ld\n",
(LONG_INT) OPTIONS->Limit_Generated);
#else
fprintf (ptr_asa_out, "OPTIONS->Limit_Acceptances = %d\n",
(LONG_INT) OPTIONS->Limit_Acceptances);
fprintf (ptr_asa_out, "OPTIONS->Limit_Generated = %d\n",
(LONG_INT) OPTIONS->Limit_Generated);
#endif
fprintf (ptr_asa_out, "OPTIONS->Limit_Invalid_Generated_States = %d\n",
OPTIONS->Limit_Invalid_Generated_States);
fprintf (ptr_asa_out, "OPTIONS->Accepted_To_Generated_Ratio = %*.*g\n\n",
G_FIELD, G_PRECISION, OPTIONS->Accepted_To_Generated_Ratio);
fprintf (ptr_asa_out, "OPTIONS->Cost_Precision = %*.*g\n",
G_FIELD, G_PRECISION, OPTIONS->Cost_Precision);
fprintf (ptr_asa_out, "OPTIONS->Maximum_Cost_Repeat = %d\n",
OPTIONS->Maximum_Cost_Repeat);
fprintf (ptr_asa_out, "OPTIONS->Number_Cost_Samples = %d\n",
OPTIONS->Number_Cost_Samples);
fprintf (ptr_asa_out, "OPTIONS->Temperature_Ratio_Scale = %*.*g\n",
G_FIELD, G_PRECISION, OPTIONS->Temperature_Ratio_Scale);
fprintf (ptr_asa_out, "OPTIONS->Cost_Parameter_Scale_Ratio = %*.*g\n",
G_FIELD, G_PRECISION, OPTIONS->Cost_Parameter_Scale_Ratio);
fprintf (ptr_asa_out, "OPTIONS->Temperature_Anneal_Scale = %*.*g\n",
G_FIELD, G_PRECISION, OPTIONS->Temperature_Anneal_Scale);
fprintf (ptr_asa_out, "OPTIONS->Include_Integer_Parameters = %d\n",
OPTIONS->Include_Integer_Parameters);
fprintf (ptr_asa_out, "OPTIONS->User_Initial_Parameters = %d\n",
OPTIONS->User_Initial_Parameters);
#if INT_ALLOC
fprintf (ptr_asa_out, "OPTIONS->Sequential_Parameters = %d\n",
(int) OPTIONS->Sequential_Parameters);
#else
#if INT_LONG
fprintf (ptr_asa_out, "OPTIONS->Sequential_Parameters = %ld\n",
(LONG_INT) OPTIONS->Sequential_Parameters);
#else
fprintf (ptr_asa_out, "OPTIONS->Sequential_Parameters = %d\n",
(LONG_INT) OPTIONS->Sequential_Parameters);
#endif
#endif
fprintf (ptr_asa_out, "OPTIONS->Initial_Parameter_Temperature = %*.*g\n",
G_FIELD, G_PRECISION, OPTIONS->Initial_Parameter_Temperature);
fprintf (ptr_asa_out, "OPTIONS->Acceptance_Frequency_Modulus = %d\n",
OPTIONS->Acceptance_Frequency_Modulus);
fprintf (ptr_asa_out, "OPTIONS->Generated_Frequency_Modulus = %d\n",
OPTIONS->Generated_Frequency_Modulus);
fprintf (ptr_asa_out, "OPTIONS->Reanneal_Cost = %d\n",
OPTIONS->Reanneal_Cost);
fprintf (ptr_asa_out, "OPTIONS->Reanneal_Parameters = %d\n\n",
OPTIONS->Reanneal_Parameters);
fprintf (ptr_asa_out, "OPTIONS->Delta_X = %*.*g\n",
G_FIELD, G_PRECISION, OPTIONS->Delta_X);
fprintf (ptr_asa_out, "OPTIONS->User_Tangents = %d\n",
OPTIONS->User_Tangents);
fprintf (ptr_asa_out, "OPTIONS->Curvature_0 = %d\n", OPTIONS->Curvature_0);
fprintf (ptr_asa_out, "OPTIONS->Asa_Recursive_Level = %d\n\n",
OPTIONS->Asa_Recursive_Level);
fprintf (ptr_asa_out, "\n");
}
#endif /* ASA_PRINT */
#if TIME_CALC
#if TIME_GETRUSAGE
/***********************************************************************
* print_time
* This calculates the time and runtime and prints it.
***********************************************************************/
#if HAVE_ANSI
void
print_time (char *message, FILE * ptr_asa_out)
#else
void
print_time (message, ptr_asa_out)
char *message;
FILE *ptr_asa_out;
#endif /* HAVE_ANSI */
{
int who = RUSAGE_SELF; /* Check our own time */
struct rusage usage;
/* get the resource usage information */
#if TIME_STD
syscall (SYS_GETRUSAGE, who, &usage);
#else
getrusage (who, &usage);
#endif
/* print the usage time in reasonable form */
aux_print_time (&usage.ru_utime, message, ptr_asa_out);
}
/***********************************************************************
* aux_print_time
* auxiliary print the time routine
***********************************************************************/
#if HAVE_ANSI
void
aux_print_time (struct timeval *time, char *message, FILE * ptr_asa_out)
#else
void
aux_print_time (time, message, ptr_asa_out)
struct timeval *time;
char *message;
FILE *ptr_asa_out;
#endif /* HAVE_ANSI */
{
static double sx;
double us, s, m, h;
double ds, dm, dh;
/* calculate the new microseconds, seconds, minutes, hours
and the differences since the last call */
us = (double) ((int) ((double) EPS_DOUBLE + time->tv_usec)) / 1.E6;
s = (double) ((int) ((double) EPS_DOUBLE + time->tv_sec)) + us;
ds = s - sx;
sx = s;
h = (int) ((double) EPS_DOUBLE + s / 3600.);
m = (int) ((double) EPS_DOUBLE + s / 60.) - 60. * h;
s -= (3600. * h + 60. * m);
dh = (int) ((double) EPS_DOUBLE + ds / 3600.);
dm = (int) ((double) EPS_DOUBLE + ds / 60.) - 60. * dh;
ds -= (3600. * dh + 60. * dm);
/* print the statistics */
fprintf (ptr_asa_out,
"%s:time: %gh %gm %gs; incr: %gh %gm %gs\n",
message, h, m, s, dh, dm, ds);
}
#else /* TIME_GETRUSAGE */
/* Note that on many machines the time resolution of this algorithm
* may be less than the other alternatives, e.g., rounding off the
* number of ticks to the nearest tens of thousands. Also, because
* time here is typically indexed by a long integer, there typically
* is a cycle of time in periods of fractions of an hour. For
* example, under Solaris 2.5.1: The value returned by clock() is
* defined in microseconds, since the first call to clock(), for
* compatibility with systems that have CPU clocks with much higher
* resolution. Because of this, the value returned will wrap around
* after accumulating only 2147 seconds of CPU time (about 36 minutes).
*
* Set TIME_GETRUSAGE to FALSE and TIME_STD to TRUE under
* Cygwin with -mno-cygwin
*
* See asa.h for two places where some additional modifications should
* be made under SunOS 4.1.x. */
#if HAVE_ANSI
void
print_time (char *message, FILE * ptr_asa_out)
#else
void
print_time (message, ptr_asa_out)
char *message;
FILE *ptr_asa_out;
#endif /* HAVE_ANSI */
{
aux_print_time (clock (), message, ptr_asa_out);
} /*print_time */
/***********************************************************************
* aux_print_time
* auxiliary print the time routine
***********************************************************************/
#if HAVE_ANSI
void
aux_print_time (clock_t time, char *message, FILE * ptr_asa_out)
#else
void
aux_print_time (time, message, ptr_asa_out)
clock_t time;
char *message;
FILE *ptr_asa_out;
#endif /* HAVE_ANSI */
{
static clock_t previousTime = -1;
clock_t diffTime;
double clocksPerSecF = CLOCKS_PER_SEC;
double timeF, diffF;
double s, m, h;
double ds, dm, dh;
if (previousTime != -1) {
diffTime = time - previousTime;
timeF = time;
diffF = diffTime;
previousTime = time;
s = timeF / clocksPerSecF;
ds = diffF / clocksPerSecF;
h = (int) ((double) EPS_DOUBLE + s / 3600.);
m = (int) ((double) EPS_DOUBLE + s / 60.) - 60. * h;
s -= (3600. * h + 60. * m);
dh = (int) ((double) EPS_DOUBLE + ds / 3600.);
dm = (int) ((double) EPS_DOUBLE + ds / 60.) - 60. * dh;
ds -= (3600. * dh + 60. * dm);
fprintf (ptr_asa_out,
"%s:time: %gh %gm %gs; incr: %gh %gm %gs\n",
message, h, m, s, dh, dm, ds);
} else {
/* The first call will be invalid - don't output anything. */
fprintf (ptr_asa_out, "TIMING PARAMETERS: ticks/sec: %lu\n",
CLOCKS_PER_SEC);
previousTime = time;
/* Output initial message. */
print_time (message, ptr_asa_out);
}
} /* aux_print_time */
#endif /* TIME_GETRUSAGE */
#endif /* TIME_CALC */
#if MULTI_MIN
#if HAVE_ANSI
static int
multi_compare (const void *ii, const void *jj)
#else /* HAVE_ANSI */
static int
multi_compare (ii, jj)
char *ii;
char *jj;
#endif /* HAVE_ANSI */
{
int i;
int j;
i = *(int *) ii;
j = *(int *) jj;
if (multi_cost_qsort[i] > multi_cost_qsort[j] + (double) EPS_DOUBLE)
return (1);
else if (multi_cost_qsort[i] < multi_cost_qsort[j] - (double) EPS_DOUBLE)
return (-1);
else
return (0);
}
#endif /* MULTI_MIN */
#if ASA_PARALLEL
#if HAVE_ANSI
static int
sort_parallel (const void *ii, const void *jj)
#else /* HAVE_ANSI */
static int
sort_parallel (ii, jj)
void *ii;
void *jj;
#endif /* HAVE_ANSI */
{
LONG_INT i;
LONG_INT j;
i = *(LONG_INT *) ii;
j = *(LONG_INT *) jj;
if (gener_block_state_qsort[i].cost > gener_block_state_qsort[j].cost)
return (1);
else if (gener_block_state_qsort[i].cost < gener_block_state_qsort[j].cost)
return (-1);
else
return (0);
}
#endif /* ASA_PARALLEL */
#if HAVE_ANSI
void
Exit_ASA (char *statement)
#else /* HAVE_ANSI */
void
Exit_ASA (statement)
char *statement;
#endif /* HAVE_ANSI */
{
#if INCL_STDOUT
printf ("\n\n*** EXIT calloc failed in ASA *** %s\n\n", statement);
#else
;
#endif /* INCL_STDOUT */
}
|
exchange_boundary.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
// perform a (intra-level) ghost zone exchange on vector id
// NOTE exchange_boundary() only exchanges the boundary.
// It will not enforce any boundary conditions
// BC's are either the responsibility of a separate function or should be fused into the stencil
// The argument shape indicates which of faces, edges, and corners on each box must be exchanged
// If the specified shape exceeds the range of defined shapes, the code will default to STENCIL_SHAPE_BOX (i.e. exchange faces, edges, and corners)
//#if CD
//#include "cd.h"
//#endif
void exchange_boundary(level_type * level, int id, int shape){
double _timeCommunicationStart = getTime();
double _timeStart,_timeEnd;
if(shape>=STENCIL_MAX_SHAPES)shape=STENCIL_SHAPE_BOX; // shape must be < STENCIL_MAX_SHAPES in order to safely index into exchange_ghosts[]
int my_tag = (level->tag<<4) | shape;
int buffer=0;
int n;
#ifdef USE_MPI
int nMessages = level->exchange_ghosts[shape].num_recvs + level->exchange_ghosts[shape].num_sends;
MPI_Request *recv_requests = level->exchange_ghosts[shape].requests;
MPI_Request *send_requests = level->exchange_ghosts[shape].requests + level->exchange_ghosts[shape].num_recvs;
// loop through packed list of MPI receives and prepost Irecv's...
if(level->exchange_ghosts[shape].num_recvs>0){
_timeStart = getTime();
#ifdef USE_MPI_THREAD_MULTIPLE
#pragma omp parallel for schedule(dynamic,1)
#endif
for(n=0;n<level->exchange_ghosts[shape].num_recvs;n++){
MPI_Irecv(level->exchange_ghosts[shape].recv_buffers[n],
level->exchange_ghosts[shape].recv_sizes[n],
MPI_DOUBLE,
level->exchange_ghosts[shape].recv_ranks[n],
my_tag,
MPI_COMM_WORLD,
&recv_requests[n]
);
}
_timeEnd = getTime();
level->timers.ghostZone_recv += (_timeEnd-_timeStart);
}
// pack MPI send buffers...
if(level->exchange_ghosts[shape].num_blocks[0]){
_timeStart = getTime();
PRAGMA_THREAD_ACROSS_BLOCKS(level,buffer,level->exchange_ghosts[shape].num_blocks[0])
for(buffer=0;buffer<level->exchange_ghosts[shape].num_blocks[0];buffer++){
CopyBlock(level,id,&level->exchange_ghosts[shape].blocks[0][buffer]);
}
_timeEnd = getTime();
level->timers.ghostZone_pack += (_timeEnd-_timeStart);
}
// loop through MPI send buffers and post Isend's...
if(level->exchange_ghosts[shape].num_sends>0){
_timeStart = getTime();
#ifdef USE_MPI_THREAD_MULTIPLE
#pragma omp parallel for schedule(dynamic,1)
#endif
for(n=0;n<level->exchange_ghosts[shape].num_sends;n++){
MPI_Isend(level->exchange_ghosts[shape].send_buffers[n],
level->exchange_ghosts[shape].send_sizes[n],
MPI_DOUBLE,
level->exchange_ghosts[shape].send_ranks[n],
my_tag,
MPI_COMM_WORLD,
&send_requests[n]
);
}
_timeEnd = getTime();
level->timers.ghostZone_send += (_timeEnd-_timeStart);
}
#endif
// exchange locally... try and hide within Isend latency...
if(level->exchange_ghosts[shape].num_blocks[1]){
_timeStart = getTime();
PRAGMA_THREAD_ACROSS_BLOCKS(level,buffer,level->exchange_ghosts[shape].num_blocks[1])
for(buffer=0;buffer<level->exchange_ghosts[shape].num_blocks[1];buffer++){
CopyBlock(level,id,&level->exchange_ghosts[shape].blocks[1][buffer]);
}
_timeEnd = getTime();
level->timers.ghostZone_local += (_timeEnd-_timeStart);
}
// wait for MPI to finish...
#ifdef USE_MPI
if(nMessages){
_timeStart = getTime();
MPI_Waitall(nMessages,level->exchange_ghosts[shape].requests,level->exchange_ghosts[shape].status);
_timeEnd = getTime();
level->timers.ghostZone_wait += (_timeEnd-_timeStart);
}
// unpack MPI receive buffers
if(level->exchange_ghosts[shape].num_blocks[2]){
_timeStart = getTime();
PRAGMA_THREAD_ACROSS_BLOCKS(level,buffer,level->exchange_ghosts[shape].num_blocks[2])
for(buffer=0;buffer<level->exchange_ghosts[shape].num_blocks[2];buffer++){
CopyBlock(level,id,&level->exchange_ghosts[shape].blocks[2][buffer]);
}
_timeEnd = getTime();
level->timers.ghostZone_unpack += (_timeEnd-_timeStart);
}
#endif
level->timers.ghostZone_total += (double)(getTime()-_timeCommunicationStart);
}
|
union_find.h | #pragma once
#include <unordered_map>
inline void atomic_add(int *ptr, int add_val) {
int64_t old_val;
int64_t new_val;
do {
old_val = *ptr;
new_val = old_val + add_val;
} while (!__sync_bool_compare_and_swap(ptr, old_val, new_val));
}
struct UnionFind {
int *parent;
explicit UnionFind(int size) {
parent = new int[size];
#pragma omp parallel for
for (int i = 0; i < size; i++)parent[i] = i;
}
~UnionFind() {
delete[] parent;
}
int FindRoot(int x) {
return (parent[x] < 0 || parent[x] == x) ? x : parent[x] = FindRoot(parent[x]);
}
void UnionThreadSafe(int u, int v) {
int rx, ry;
do {
rx = FindRoot(u);
ry = FindRoot(v);
int index;
if (rx < ry) {
index = rx;
rx = ry;
ry = index;
}
if (rx == ry)break;
} while (!__sync_bool_compare_and_swap(&(parent[rx]), rx, ry));
}
}; |
trsm_x_coo_n_hi_col.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#include <memory.h>
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_COO *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_INT m = A->rows;
ALPHA_Number diag[m];
memset(diag, '\0', m * sizeof(ALPHA_Number));
int num_thread = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for (ALPHA_INT r = 0; r < A->nnz; r++)
{
if(A->row_indx[r] == A->col_indx[r])
{
diag[A->row_indx[r]] = A->values[r];
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for(ALPHA_INT out_y_col = 0; out_y_col < columns; out_y_col++)
{
for (ALPHA_INT r = m - 1; r >= 0; r--)
{
ALPHA_Number temp;
alpha_setzero(temp);
for (ALPHA_INT cr = A->nnz - 1; cr >= 0; cr--)
{
int row = A->row_indx[cr];
int col = A->col_indx[cr];
if(row == r && col > r)
alpha_madde(temp, A->values[cr], y[out_y_col * ldy + col]);
}
ALPHA_Number t;
alpha_mul(t, alpha, x[out_y_col * ldx + r]);
alpha_sub(t, t, temp);
alpha_div(y[out_y_col * ldy + r], t, diag[r]);
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
hellOMP.c | #include <omp.h>
#include <stdio.h>
int main() {
int NumThreads = 10;
omp_set_num_threads(NumThreads);
#pragma omp parallel
{
printf("Hello world from thread %d\n",omp_get_thread_num());
}
return 0;
}
|
GB_unaryop__ainv_int32_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int32_uint64
// op(A') function: GB_tran__ainv_int32_uint64
// C type: int32_t
// A type: uint64_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, aij) \
int32_t z = (int32_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT32 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int32_uint64
(
int32_t *Cx, // Cx and Ax may be aliased
uint64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int32_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mandelbrot.c | /*
To compile:
gcc -O3 -o mandelbrot mandelbrot.c png_util.c -I. -lpng -lm -fopenmp
Or just type:
module load gcc
make
To create an image with 4096 x 4096 pixels (last argument will be used to set number of threads):
./mandelbrot 4096 4096 1
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "png_util.h"
// Q2a: add include for OpenMP header file here:
#include <omp.h>
#define MXITER 1000
typedef struct {
double r;
double i;
}complex_t;
// return iterations before z leaves mandelbrot set for given c
int testpoint(complex_t c){
int iter;
complex_t z;
double temp;
z = c;
for(iter=0; iter<MXITER; iter++){
temp = (z.r*z.r) - (z.i*z.i) + c.r;
z.i = z.r*z.i*2. + c.i;
z.r = temp;
if((z.r*z.r+z.i*z.i)>4.0){
return iter;
}
}
return iter;
}
// perform Mandelbrot iteration on a grid of numbers in the complex plane
// record the iteration counts in the count array
void mandelbrot(int Nre, int Nim, complex_t cmin, complex_t cmax, float *count){
int n,m;
complex_t c;
double dr = (cmax.r-cmin.r)/(Nre-1);
double di = (cmax.i-cmin.i)/(Nim-1);;
// Q2c: add a compiler directive to split the outer for loop amongst threads here
#pragma omp parallel for private(m, c)
for(n=0;n<Nim;++n){
for(m=0;m<Nre;++m){
c.r = cmin.r + dr*m;
c.i = cmin.i + di*n;
count[m+n*Nre] = testpoint(c);
}
}
}
int main(int argc, char **argv){
// to create a 4096x4096 pixel image [ last argument is placeholder for number of threads ]
// usage: ./mandelbrot 4096 4096 1
int Nre = atoi(argv[1]);
int Nim = atoi(argv[2]);
int Nthreads = atoi(argv[3]);
// Q2b: set the number of OpenMP threads to be Nthreads here:
omp_set_num_threads(Nthreads);
// storage for the iteration counts
float *count = (float*) malloc(Nre*Nim*sizeof(float));
// Parameters for a bounding box for "c" that generates an interesting image
const float centRe = -.759856, centIm= .125547;
const float diam = 0.151579;
complex_t cmin;
complex_t cmax;
cmin.r = centRe - 0.5*diam;
cmax.r = centRe + 0.5*diam;
cmin.i = centIm - 0.5*diam;
cmax.i = centIm + 0.5*diam;
// Q2d: complete this to read time before calling mandelbrot with OpenMP API wall clock time
double start;
start = omp_get_wtime();
// compute mandelbrot set
mandelbrot(Nre, Nim, cmin, cmax, count);
// Q2d: complete this to read time after calling mandelbrot using OpenMP wall clock time
double end = omp_get_wtime();
// print elapsed time
printf("elapsed = %g\n", end-start);
// output mandelbrot to png format image
FILE *fp = fopen("mandelbrot.png", "w");
write_hot_png(fp, Nre, Nim, count, 0, 80);
exit(0);
return 0;
}
|
Example_target.1.c | /*
* @@name: target.1c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: success
* @@version: omp_4.0
*/
extern void init(float*, float*, int);
extern void output(float*, int);
void vec_mult(int N)
{
int i;
float p[N], v1[N], v2[N];
init(v1, v2, N);
#pragma omp target
#pragma omp parallel for private(i)
for (i=0; i<N; i++)
p[i] = v1[i] * v2[i];
output(p, N);
}
|
threads.c | /*
* Copyright (c) 2003 Matteo Frigo
* Copyright (c) 2003 Massachusetts Institute of Technology
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
/* threads.c: Portable thread spawning for loops, via the X(spawn_loop)
function. The first portion of this file is a set of macros to
spawn and join threads on various systems. */
#include "threads.h"
/************************* Thread Glue *************************/
/* Adding support for a new shared memory thread API should be easy. You
simply do the following things (look at the POSIX and Solaris
threads code for examples):
* Invent a symbol of the form USING_FOO_THREADS to denote
the use of your thread API, and add an
#elif defined(USING_FOO_THREADS)
before the #else clause below. This is where you will put
your thread definitions. In this #elif, insert the following:
-- #include any header files needed to use the thread API.
-- Typedef fftw_thr_function to be a function pointer
of the type used as a argument to fftw_thr_spawn
(i.e. the entry function for a thread).
-- Define fftw_thr_id, via a typedef, to be the type
that is used for thread identifiers.
-- #define fftw_thr_spawn(tid_ptr, proc, data) to
call whatever function to spawn a new thread. The
new thread should call proc(data) as its starting point,
and tid_ptr is a pointer to a fftw_thr_id that
is set to an identifier for the thread. You can also
define this as a subroutine (put it in threads.c)
if it is too complicated for a macro. The prototype should
be:
void fftw_thr_spawn(fftw_thr_id *tid_ptr,
fftw_thr_function proc,
void *data);
-- #define fftw_thr_wait(tid) to block until the thread
whose identifier is tid has terminated. You can also
define this as a subroutine (put it in threads.c) if
it is too complicated for a macro. The prototype should be:
void fftw_thr_wait(fftw_thr_id tid);
* If semaphores are supported (which allows FFTW to pre-spawn the
threads), then you should #define HAVE_SEMAPHORES and:
-- typedef fftw_sem_id to the type for a semaphore id
-- #define fftw_sem_init(&id) to initialize the semaphore
id to zero (or equivalent)
-- #define fftw_sem_destroy(&id) to destroy the id
-- #define fftw_sem_wait(&id) to the equivalent of
the SYSV sem_wait
-- #define fftw_sem_post(&id) the equivalent of SYSV sem_post
THIS IS CURRENTLY EXPERIMENTAL ONLY.
* If you need to perform any initialization before using threads,
put your initialization code in the X(ithreads_init)() function
in threads.c, bracketed by the appropriate #ifdef of course.
* Also, of course, you should modify config.h to #define
USING_FOO_THREADS, or better yet modify and configure.ac so that
autoconf can automatically detect your threads library.
* Finally, if you do implement support for a new threads API, be
sure to let us know at fftw@fftw.org so that we can distribute
your code to others!
*/
/************************** Solaris Threads ****************************/
#if defined(USING_SOLARIS_THREADS)
/* Solaris threads glue. Tested. */
/* link with -lthread */
#include <thread.h>
/* Thread entry point: */
typedef void * (*fftw_thr_function) (void *);
typedef thread_t fftw_thr_id;
#define fftw_thr_spawn(tid_ptr, proc, data) \
thr_create(0,0,proc,data,THR_BOUND,tid_ptr)
#define fftw_thr_wait(tid) thr_join(tid,0,0)
/************************** BeOS Threads ****************************/
#elif defined(USING_BEOS_THREADS)
/* BeOS threads glue. Tested for DR8.2. */
#include <OS.h>
/* Thread entry point: */
typedef thread_entry fftw_thr_function;
typedef thread_id fftw_thr_id;
#define fftw_thr_spawn(tid_ptr, proc, data) { \
*(tid_ptr) = spawn_thread(proc,"FFTW",B_NORMAL_PRIORITY,data); \
resume_thread(*(tid_ptr)); \
}
/* wait_for_thread requires that we pass a valid pointer as the
second argument, even if we're not interested in the result. */
#define fftw_thr_wait(tid) {long exit_val;wait_for_thread(tid, &exit_val);}
/************************** MacOS Threads ****************************/
#elif defined(USING_MACOS_THREADS)
/* MacOS (old! old!) MP threads glue. Experimental, untested! I do not
have an MP MacOS system available to me...I just read the
documentation. There is actually a good chance that this will work
(since the code below is so short), but I make no guarantees.
Consider it to be a starting point for your own implementation.
I also had to insert some code in threads.c.
MacOS X has real SMP support, thank goodness; I'm leaving this
code here mainly for historical purposes. */
/* Using this code in the MacOS: (See the README file for general
documenation on the FFTW threads code.) To use this code, you have
to do two things. First of all, you have to #define the symbol
USING_MACOS_THREADS. This can be done at the top of this file
or perhaps in your compiler options. Second, you have to weak-link
your project to the MP library.
In your code, you should check at run-time with MPLibraryIsLoaded()
to see if the MP library is available. If it is not, it is
still safe to call the fftw threads routines...in this case,
however, you must always pass 1 for the nthreads parameter!
(Otherwise, you will probably want to pass the value of
MPProcessors() for the nthreads parameter.) */
#include <MP.h>
typedef TaskProc fftw_thr_function;
typedef MPQueueID fftw_thr_id;
#define fftw_thr_spawn(tid_ptr, proc, data) { \
MPTaskID task; \
MPCreateQueue(tid_ptr); \
MPCreateTask(proc,data,kMPUseDefaultStackSize,*(tid_ptr),0,0, \
kMPNormalTaskOptions,&task); \
}
#define fftw_thr_wait(tid) { \
void *param1,*param2,*param3; \
MPWaitOnQueue(tid,¶m1,¶m2,¶m3,kDurationForever); \
MPDeleteQueue(tid); \
}
/************************** Win32 Threads ****************************/
#elif defined(__WIN32__) || defined(_WIN32) || defined(_WINDOWS)
/* Win32 threads glue. We have not tested this code! (I just implemented
it by looking at a Win32 threads manual.) Users have reported that this
code works under NT using Microsoft compilers.
This code should be automatically used on Windows, assuming that
one of the above macros is defined by your compiler. You must also
link to the thread-safe version of the C runtime library. */
#include <windows.h>
#include <process.h>
typedef LPTHREAD_START_ROUTINE fftw_thr_function;
typedef HANDLE fftw_thr_id;
/* The following macros are based on a recommendation in the
July 1999 Microsoft Systems Journal (online), to substitute
a call to _beginthreadex for CreateThread. The former is
needed in order to make the C runtime library thread-safe
(in particular, our threads may call malloc/free). */
typedef unsigned (__stdcall *PTHREAD_START) (void *);
#define chBEGINTHREADEX(psa, cbStack, pfnStartAddr, \
pvParam, fdwCreate, pdwThreadID) \
((HANDLE) _beginthreadex( \
(void *) (psa), \
(unsigned) (cbStack), \
(PTHREAD_START) (pfnStartAddr), \
(void *) (pvParam), \
(unsigned) (fdwCreate), \
(unsigned *) (pdwThreadID)))
#define fftw_thr_spawn(tid_ptr, proc, data) { \
DWORD thrid; \
*(tid_ptr) = chBEGINTHREADEX((LPSECURITY_ATTRIBUTES) NULL, 0, \
(fftw_thr_function) proc, (LPVOID) data, \
0, &thrid); \
}
#define fftw_thr_wait(tid) { \
WaitForSingleObject(tid, INFINITE); \
CloseHandle(tid); \
}
/************************** Mach cthreads ****************************/
#elif defined(USING_MACH_THREADS)
#ifdef HAVE_MACH_CTHREADS_H
#include <mach/cthreads.h>
#elif defined(HAVE_CTHREADS_H)
#include <cthreads.h>
#elif defined(HAVE_CTHREAD_H)
#include <cthread.h>
#endif
typedef cthread_fn_t fftw_thr_function;
typedef cthread_t fftw_thr_id;
#define fftw_thr_spawn(tid_ptr, proc, data) \
*(tid_ptr) = cthread_fork(proc, (any_t) (data))
#define fftw_thr_wait(tid) cthread_join(tid)
/************************** MP directive Threads ****************************/
#elif defined(USING_OPENMP_THREADS) || defined(USING_SGIMP_THREADS)
/* Use MP compiler directives to induce parallelism, in which case
we don't need any of the thread spawning/waiting macros: */
typedef void * (*fftw_thr_function) (void *);
typedef char fftw_thr_id; /* dummy */
#define fftw_thr_spawn(tid_ptr, proc, data) ((proc)(data))
#define fftw_thr_wait(tid) (0) /* do nothing */
#define USING_COMPILER_THREADS 1
/************************** POSIX Threads ****************************/
#elif defined(USING_POSIX_THREADS) /* use the default, POSIX threads: */
/* POSIX threads glue. Tested. */
/* link with -lpthread, or better yet use ACX_PTHREAD in autoconf */
#include <pthread.h>
/* Thread entry point: */
typedef void * (*fftw_thr_function) (void *);
static pthread_attr_t fftw_pthread_attributes; /* attrs for POSIX threads */
static pthread_attr_t *fftw_pthread_attributes_p = 0;
typedef pthread_t fftw_thr_id;
#define fftw_thr_spawn(tid_ptr, proc, data) \
CK(!pthread_create(tid_ptr,fftw_pthread_attributes_p,proc,data))
#define fftw_thr_wait(tid) CK(!pthread_join(tid,0))
/* SYSV semaphores are disabled for now because, at least on my Linux
machine, they don't seem to offer much performance advantage. We
should problably use pthread mutices or condition variables
instead, for portability. */
#if 0
#define HAVE_SEMAPHORES 1
#include <semaphore.h>
typedef sem_t fftw_sem_id;
#define fftw_sem_init(pid) CK(!sem_init(pid, 0, 0))
#define fftw_sem_destroy(pid) CK(!sem_destroy(pid))
#define fftw_sem_wait(pid) CK(!sem_wait(pid))
#define fftw_sem_post(pid) CK(!sem_post(pid))
#endif /* 0 */
#elif defined(HAVE_THREADS)
# error HAVE_THREADS is defined without any USING_*_THREADS
#endif
#if 0 /* 1 for experimental pre-spawned threads via Linux spinlocks */
#ifndef HAVE_SEMAPHORES
#define HAVE_SEMAPHORES 1
/* from x86 linux/kernel.h */
/* Optimization barrier */
/* The "volatile" is due to gcc bugs */
#define barrier() __asm__ __volatile__("": : :"memory")
#include <asm/spinlock.h>
typedef spinlock_t fftw_sem_id;
#define fftw_sem_init(pid) { spin_lock_init(pid); spin_lock(pid); }
#define fftw_sem_destroy(pid) (void) (pid)
#define fftw_sem_wait(pid) { spin_unlock_wait(pid); spin_lock(pid); }
#define fftw_sem_post(pid) spin_unlock(pid)
#endif /* !HAVE_SEMAPHORES */
#endif /* 0 */
/***********************************************************************/
#ifdef HAVE_THREADS
#ifdef HAVE_SEMAPHORES
typedef struct worker_data_s {
fftw_thr_id tid;
fftw_sem_id sid_ready;
fftw_sem_id sid_done;
spawn_function proc;
spawn_data d;
struct worker_data_s *next;
} worker_data;
static void *do_work(worker_data *w)
WITH_ALIGNED_STACK({
while (1) {
fftw_sem_wait(&w->sid_ready);
if (!w->proc) break;
w->proc(&w->d);
fftw_sem_post(&w->sid_done);
}
return 0;
})
worker_data *workers = (worker_data *) 0;
/* make sure at least nworkers exist */
static void minimum_workforce(int nworkers)
{
worker_data *w = workers;
while (w) {
--nworkers;
w = w->next;
}
while (nworkers-- > 0) {
w = (worker_data *) MALLOC(sizeof(worker_data), OTHER);
w->next = workers;
fftw_sem_init(&w->sid_ready);
fftw_sem_init(&w->sid_done);
fftw_thr_spawn(&w->tid, (fftw_thr_function) do_work, (void *) w);
workers = w;
}
}
static void kill_workforce(void)
{
while (workers) {
worker_data *w = workers;
workers = w->next;
w->proc = (spawn_function) 0;
fftw_sem_post(&w->sid_ready);
fftw_thr_wait(w->tid);
fftw_sem_destroy(&w->sid_ready);
fftw_sem_destroy(&w->sid_done);
X(ifree)(w);
}
}
#endif /* HAVE_SEMAPHORES */
/* Distribute a loop from 0 to loopmax-1 over nthreads threads.
proc(d) is called to execute a block of iterations from d->min
to d->max-1. d->thr_num indicate the number of the thread
that is executing proc (from 0 to nthreads-1), and d->data is
the same as the data parameter passed to X(spawn_loop).
This function returns only after all the threads have completed. */
void X(spawn_loop)(int loopmax, int nthr,
spawn_function proc, void *data)
{
int block_size;
A(loopmax >= 0);
A(nthr > 0);
A(proc);
if (!loopmax) return;
/* Choose the block size and number of threads in order to (1)
minimize the critical path and (2) use the fewest threads that
achieve the same critical path (to minimize overhead).
e.g. if loopmax is 5 and nthr is 4, we should use only 3
threads with block sizes of 2, 2, and 1. */
block_size = (loopmax + nthr - 1) / nthr;
nthr = (loopmax + block_size - 1) / block_size;
if (nthr <= 1) {
spawn_data d;
d.min = 0; d.max = loopmax;
d.thr_num = 0;
d.data = data;
proc(&d);
}
else {
#if defined(USING_COMPILER_THREADS)
spawn_data d;
#elif defined(HAVE_SEMAPHORES)
spawn_data d;
worker_data *w;
#else
spawn_data *d;
fftw_thr_id *tid;
#endif
int i;
THREAD_ON; /* prevent debugging mode from failing under threads */
#if defined(USING_COMPILER_THREADS)
#if defined(USING_SGIMP_THREADS)
#pragma parallel local(d,i)
{
#pragma pfor iterate(i=0; nthr; 1)
#elif defined(USING_OPENMP_THREADS)
#pragma omp parallel for private(d)
#endif
for (i = 0; i < nthr; ++i) {
d.max = (d.min = i * block_size) + block_size;
if (d.max > loopmax)
d.max = loopmax;
d.thr_num = i;
d.data = data;
proc(&d);
}
#if defined(USING_SGIMP_THREADS)
}
#endif
#elif defined(HAVE_SEMAPHORES)
--nthr;
for (w = workers, i = 0; i < nthr; ++i) {
A(w);
w->d.max = (w->d.min = i * block_size) + block_size;
w->d.thr_num = i;
w->d.data = data;
w->proc = proc;
fftw_sem_post(&w->sid_ready);
w = w->next;
}
d.min = i * block_size;
d.max = loopmax;
d.thr_num = i;
d.data = data;
proc(&d);
for (w = workers, i = 0; i < nthr; ++i) {
A(w);
fftw_sem_wait(&w->sid_done);
}
#else /* explicit thread spawning: */
STACK_MALLOC(spawn_data *, d, sizeof(spawn_data) * nthr);
STACK_MALLOC(fftw_thr_id *, tid, sizeof(fftw_thr_id) * (--nthr));
for (i = 0; i < nthr; ++i) {
d[i].max = (d[i].min = i * block_size) + block_size;
d[i].thr_num = i;
d[i].data = data;
fftw_thr_spawn(&tid[i], (fftw_thr_function) proc,
(void *) (d + i));
}
d[i].min = i * block_size;
d[i].max = loopmax;
d[i].thr_num = i;
d[i].data = data;
proc(&d[i]);
for (i = 0; i < nthr; ++i)
fftw_thr_wait(tid[i]);
STACK_FREE(tid);
STACK_FREE(d);
#endif /* ! USING_COMPILER_THREADS */
THREAD_OFF; /* prevent debugging mode from failing under threads */
}
}
#else /* ! HAVE_THREADS */
void X(spawn_loop)(int loopmax, int nthr,
spawn_function proc, void *data)
{
spawn_data d;
UNUSED(nthr);
d.min = 0; d.max = loopmax;
d.thr_num = 0;
d.data = data;
proc(&d);
}
#endif
#ifdef HAVE_THREADS
void kdft_dit_register_hook(planner *p, kdft_dit k, const ct_desc *d)
{
REGISTER_SOLVER(p, X(mksolver_dft_ct_dit_thr)(k, d));
}
void khc2hc_dit_register_hook(planner *p, khc2hc k, const hc2hc_desc *d)
{
REGISTER_SOLVER(p, X(mksolver_rdft_hc2hc_dit_thr)(k, d));
}
void khc2hc_dif_register_hook(planner *p, khc2hc k, const hc2hc_desc *d)
{
REGISTER_SOLVER(p, X(mksolver_rdft_hc2hc_dif_thr)(k, d));
}
#endif /* HAVE_THREADS */
/* X(ithreads_init) does any initialization that is necessary to use
threads. It must be called before calling any fftw threads functions.
Returns 0 if successful, and non-zero if there is an error.
Do not call any fftw threads routines if X(ithreads_init)
is not successful! */
int X(ithreads_init)(void)
{
#ifdef USING_POSIX_THREADS
/* Set the thread creation attributes as necessary. If we don't
change anything, just use the default attributes (NULL). */
int err, attr, attr_changed = 0;
err = pthread_attr_init(&fftw_pthread_attributes); /* set to defaults */
if (err) return err;
/* Make sure that threads are joinable! (they aren't on AIX) */
err = pthread_attr_getdetachstate(&fftw_pthread_attributes, &attr);
if (err) return err;
if (attr != PTHREAD_CREATE_JOINABLE) {
err = pthread_attr_setdetachstate(&fftw_pthread_attributes,
PTHREAD_CREATE_JOINABLE);
if (err) return err;
attr_changed = 1;
}
/* Make sure threads parallelize (they don't by default on Solaris) */
err = pthread_attr_getscope(&fftw_pthread_attributes, &attr);
if (err) return err;
if (attr != PTHREAD_SCOPE_SYSTEM) {
err = pthread_attr_setscope(&fftw_pthread_attributes,
PTHREAD_SCOPE_SYSTEM);
/* IRIX lossage: PTHREAD_SCOPE_SYSTEM requires special
permissions, giving err == 1, but the default
(PTHREAD_SCOPE_PROCESS) already parallelizes over
multiple CPUs(?). So, we ignore err == 1. */
if (err == 0)
attr_changed = 1;
else if (err != 1)
return err;
}
if (attr_changed) /* we aren't using the defaults */
fftw_pthread_attributes_p = &fftw_pthread_attributes;
else {
fftw_pthread_attributes_p = NULL; /* use default attributes */
err = pthread_attr_destroy(&fftw_pthread_attributes);
if (err) return err;
}
#endif /* USING_POSIX_THREADS */
#ifdef USING_MACOS_THREADS
/* FIXME: don't have malloc hooks (yet) in fftw3 */
/* Must use MPAllocate and MPFree instead of malloc and free: */
if (MPLibraryIsLoaded()) {
MALLOC_hook = MPAllocate;
fftw_free_hook = MPFree;
}
#endif /* USING_MACOS_THREADS */
#if defined(USING_OPENMP_THREADS) && ! defined(_OPENMP)
#error OpenMP enabled but not using an OpenMP compiler
#endif
#ifdef HAVE_THREADS
X(kdft_dit_register_hook) = kdft_dit_register_hook;
X(khc2hc_dit_register_hook) = khc2hc_dit_register_hook;
X(khc2hc_dif_register_hook) = khc2hc_dif_register_hook;
return 0; /* no error */
#else
return 0; /* no threads, no error */
#endif
}
/* This function must be called before using nthreads > 1, with
the maximum number of threads that will be used. */
void X(threads_setmax)(int nthreads_max)
{
#ifdef HAVE_SEMAPHORES
minimum_workforce(nthreads_max - 1);
#else
UNUSED(nthreads_max);
#endif
}
void X(threads_cleanup)(void)
{
#ifdef USING_POSIX_THREADS
if (fftw_pthread_attributes_p) {
pthread_attr_destroy(fftw_pthread_attributes_p);
fftw_pthread_attributes_p = 0;
}
#endif /* USING_POSIX_THREADS */
#ifdef HAVE_SEMAPHORES
kill_workforce();
#endif
#ifdef HAVE_THREADS
X(kdft_dit_register_hook) = 0;
X(khc2hc_dit_register_hook) = 0;
X(khc2hc_dif_register_hook) = 0;
#endif
}
|
convolution_1x1_int8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
const int size = w * h;
Mat bottom_im2col = bottom_blob;
bottom_im2col.w = size;
bottom_im2col.h = 1;
im2col_sgemm_int8_neon(bottom_im2col, top_blob, kernel, opt);
}
static void conv1x1s2_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = w - 2 * outw + w;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const signed char* r0 = bottom_blob.channel(p);
signed char* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
outptr[0] = r0[0];
outptr[1] = r0[2];
outptr[2] = r0[4];
outptr[3] = r0[6];
r0 += 8;
outptr += 4;
}
for (; j + 1 < outw; j += 2)
{
outptr[0] = r0[0];
outptr[1] = r0[2];
r0 += 4;
outptr += 2;
}
for (; j < outw; j++)
{
outptr[0] = r0[0];
r0 += 2;
outptr += 1;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_int8_neon(bottom_blob_shrinked, top_blob, kernel, opt);
}
|
GB_unaryop__abs_uint64_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_uint64_uint64
// op(A') function: GB_tran__abs_uint64_uint64
// C type: uint64_t
// A type: uint64_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_uint64_uint64
(
uint64_t *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_uint64_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
owl_ndarray_conv_impl.h | /*
* OWL - OCaml Scientific and Engineering Computing
* Copyright (c) 2016-2020 Liang Wang <liang.wang@cl.cam.ac.uk>
*/
#ifndef OWL_CORE_CONV_IMPL
#define OWL_CORE_CONV_IMPL
/*
* Calculate the block sizes for convolution operations.
* Code heavily inspired by Eigen (http://eigen.tuxfamily.org/).
*/
#define IM2COL_THRESHOLD LONG_MAX // TODO: a temp hack to disable the second conv algorithm
#define ALIGN_SIZE 32 // for AVX address alignment
// The effect of calculating block size according to cache sizes is yet to be
// proved here since we use OpenBLAS GEMM directly; also, note that we
// calculate `InputMatrix x KernelMatrix`, not the other way around.
void compute_block_sizes(int* kp, int* mp, int* np, int typesize) {
int l1, l2, l3;
query_cache_sizes(&l1, &l2, &l3);
// set the cache sizes to small numbers when debugging
int k = *kp;
int m = *mp;
int n = *np;
if (fmaxf(k, fmaxf(m, n)) < 50) {
return;
}
int nr = 4;
int num_reg = 16;
int mr = num_reg / (2 * nr) * typesize;
int k_strip = 8;
int k_div = (mr + nr) * typesize;
int k_sub = mr * nr * typesize;
const int max_kc = fmaxf(((l1 - k_sub) / k_div) & (~(k_strip - 1)), 1);
const int old_k = k;
if (k > max_kc) {
k = (k % max_kc) == 0 ? max_kc
: max_kc - k_strip * ((max_kc - 1 - (k % max_kc)) / (k_strip * (k / max_kc + 1)));
//assert (old_k / k == old_k / max_kc);
}
int max_nc;
const int actual_l2 = 1572864; // l3 for debug; otherwise 1572864
const int lhs_bytes = m * k * typesize;
const int rest_l1 = l1 - k_sub - lhs_bytes;
if (rest_l1 >= nr * k * typesize) {
max_nc = rest_l1 / (k * typesize);
} else {
max_nc = (3 * actual_l2) / (4 * max_kc * typesize);
}
int nc = (int) (fminf(actual_l2 / (2 * k * typesize), max_nc)) & (~(nr - 1));
if (n > nc) {
n = (n % nc == 0) ? nc : (nc - nr * ((nc - (n % nc)) / (nr * (n / nc + 1))));
} else if (old_k == k) {
int kn_size = k * n * typesize;
int actual_lm = actual_l2;
int max_mc = m;
if (kn_size < 1024) {
actual_lm = l1;
} else if (l3 != 0 && kn_size <= 32768) {
actual_lm = l2;
max_mc = fminf(576, max_mc);
}
int mc = fminf(actual_lm / (3 * k * typesize), max_mc);
if (mc > mr) {
mc -= mc % mr;
}
else if (mc == 0) {
*kp = k; *mp = m; *np = n;
return;
}
m = (m % mc == 0) ? mc : (mc - mr * ((mc - (m % mc)) / (mr * (m / mc + 1))));
}
*kp = k; *mp = m; *np = n;
return;
}
#endif /* OWL_CORE_CONV_IMPL */
#ifdef OWL_ENABLE_TEMPLATE
#ifdef AVX_PSIZE
/*
* Fill in temporary input matrix from input tensor with vectorisation.
* Currently only support AVX instruciton set.
*/
void ACX_FUN_LOAD (load_sub_matrix_fast, spatial) (
TYPE* input_ptr, TYPE* output_ptr, int* cmk_ptr, int kc_strip, int k,
int kernel_ri, int input_ri, int in_channel, int idx_base, int cstart,
int rstart, int input_cols, int input_rows, short reverse_mode
) {
// assume output_ptr is aligned; if in_channel % AVX_PSIZE == 0, the input
// matrix can always be loaded consecutively by a step of AVX_PSIZE
for (int ik = 0; ik < kc_strip; ik += AVX_PSIZE) {
int kc = (k + ik) / kernel_ri;
int kri = (k + ik) - kc * kernel_ri;
int kr = kri / in_channel;
int ki = kri - kr * in_channel;
int input_col = kc + cstart;
int input_row = kr + rstart;
if (input_col < input_cols && input_col >= 0 &&
input_row < input_rows && input_row >= 0) {
int input_index = idx_base + input_col * input_ri
+ input_row * in_channel + ki;
if (reverse_mode == 0) {
AVX_TYPE v = AVX_LOADU(input_ptr + input_index);
AVX_STOREA(output_ptr + (*cmk_ptr), v);
}
else {
AVX_TYPE v1 = AVX_LOADA(output_ptr + (*cmk_ptr));
AVX_TYPE v2 = AVX_LOADU(input_ptr + input_index);
AVX_TYPE v = AVX_ADD(v1, v2);
AVX_STOREU(input_ptr + input_index, v);
}
}
*cmk_ptr += AVX_PSIZE;
}
return;
}
void ACX_FUN_LOAD (load_sub_matrix, spatial) (
TYPE* input_ptr, TYPE* output_ptr, int* cmk_ptr, int kc_strip, int actual_kc,
int k, int kernel_ri, int input_ri, int in_channel, int idx_base,
int cstart, int rstart, int input_cols, int input_rows,
int kernel_rows, short reverse_mode
){
int ik = 0;
// first, load `kc_strip` numbers with a step of AVX_PSIZE;
// assume `kc_strip % AVX_PSIZE == 0`
for ( ; ik < kc_strip; ik += AVX_PSIZE) {
const int cr_set[2] = {(k + ik) / in_channel,
(k + ik + AVX_PSIZE - 1) / in_channel};
const int c_set[2] = {cr_set[0] / kernel_rows,
cr_set[1] / kernel_rows};
const int cols[2] = {cstart + c_set[0], cstart + c_set[1]};
// out of bounds; set the next AVX_PSIZE numbers to 0
if (cols[0] >= input_cols || cols[1] < 0) {
*cmk_ptr += AVX_PSIZE;
continue;
}
else if (cols[0] == cols[1]) {
const int r_set[2] = {cr_set[0] - c_set[0] * kernel_rows,
cr_set[1] - c_set[1] * kernel_rows};
const int rows[2] = {rstart + r_set[0], rstart + r_set[1]};
// out of bounds; set the next AVX_PSIZE numbers to 0
if (rows[0] >= input_rows || rows[1] < 0) {
*cmk_ptr += AVX_PSIZE;
continue;
}
// next AVX_PSIZE numbers can be loaded consecutively
else if (rows[0] >= 0 && rows[1] < input_rows) {
int ki = k + ik - cr_set[0] * in_channel;
int input_index = idx_base + cols[0] * input_ri
+ rows[0] * in_channel + ki;
if (reverse_mode == 0) {
AVX_TYPE v = AVX_LOADU(input_ptr + input_index);
AVX_STOREU(output_ptr + (*cmk_ptr), v);
}
else {
AVX_TYPE v1 = AVX_LOADU(output_ptr + (*cmk_ptr));
AVX_TYPE v2 = AVX_LOADU(input_ptr + input_index);
AVX_TYPE v = AVX_ADD(v1, v2);
AVX_STOREU(input_ptr + input_index, v);
}
*cmk_ptr += AVX_PSIZE;
continue;
}
}
// previous special cases do not apply; calculate input index one by one
for (int ip = 0; ip < AVX_PSIZE; ip++) {
int kc = (k + ik + ip) / kernel_ri;
int kri = (k + ik + ip) - kc * kernel_ri;
int kr = kri / in_channel;
int ki = kri - kr * in_channel;
int input_col = kc + cstart;
int input_row = kr + rstart;
if (input_col < input_cols && input_col >= 0 &&
input_row < input_rows && input_row >= 0) {
int input_index = idx_base + input_col * input_ri
+ input_row * in_channel + ki;
if (reverse_mode == 0)
output_ptr[*cmk_ptr] = input_ptr[input_index];
else
input_ptr[input_index] += output_ptr[*cmk_ptr];
}
*cmk_ptr += 1;
}
}
// second, load the rest `actual_kc - kc_strip` numbers
for (; ik < actual_kc; ik++) {
int kc = (k + ik) / kernel_ri;
int kri = (k + ik) - kc * kernel_ri;
int kr = kri / in_channel;
int ki = kri - kr * in_channel;
int input_col = kc + cstart;
int input_row = kr + rstart;
if (input_col < input_cols && input_col >= 0 &&
input_row < input_rows && input_row >= 0) {
int input_index = idx_base + input_col * input_ri
+ input_row * in_channel + ki;
if (reverse_mode == 0)
output_ptr[*cmk_ptr] = input_ptr[input_index];
else
input_ptr[input_index] += output_ptr[*cmk_ptr];
}
*cmk_ptr += 1;
}
return;
}
#endif /* AVX_PSIZE */
/*
* GEBP-based implementation. See Goto et.al [08] for detail.
*/
CAMLprim value FUN_NATIVE (spatial) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vPadding, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_cri = out_channel * output_rows * output_cols;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int kernel_cr = kernel_cols * kernel_rows;
const int kernel_ri = kernel_rows * in_channel;
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
INIT;
int pr = 0, pc = 0;
if (padding != 1) {
pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
}
// if generated input matrix is small enough, use im2col implementation
int mat_size = kernel_cri * output_crb;
if (mat_size / kernel_cri == output_crb && mat_size < IM2COL_THRESHOLD) {
TYPE *inpt2d = (TYPE *) calloc(mat_size, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans,
output_crb, out_channel, kernel_cri, ALPHA,
inpt2d, kernel_cri, kernel_ptr, out_channel,
BETA, output_ptr, out_channel);
free(inpt2d);
return Val_unit;
}
int mc = output_crb;
int kc = kernel_cri;
int nc = out_channel;
compute_block_sizes(&kc, &nc, &mc, sizeof(TYPE));
#ifdef AVX_PSIZE
int fast_flag = (in_channel % AVX_PSIZE == 0);
TYPE *temp_mk = NULL;
if (posix_memalign((void**) &temp_mk, ALIGN_SIZE, mc * kc * sizeof(TYPE)))
exit(1);
#else
TYPE *temp_mk = (TYPE *) calloc(mc * kc, sizeof(TYPE));
if (temp_mk == NULL) exit(1);
#endif
TYPE *temp_kn = (TYPE *) calloc(nc * kc, sizeof(TYPE));
if (temp_kn == NULL) exit(1);
TYPE *temp_mn = (TYPE *) calloc(mc * nc, sizeof(TYPE));
if (temp_mn == NULL) exit(1);
for (int m = 0; m < output_crb; m += mc) {
int actual_mc = fminf(m + mc, output_crb) - m;
for (int k = 0; k < kernel_cri; k += kc) {
memset(temp_mk, 0, mc * kc * sizeof(TYPE));
int actual_kc = fminf(k + kc, kernel_cri) - k;
#ifdef AVX_PSIZE
int kc_strip = (actual_kc / AVX_PSIZE) * AVX_PSIZE;
#endif
// iterate along each row of the generated input matrix; processing four
// rows in parallel with the help of e.g. OpenMP should be possible
int cmk = 0;
for (int im = 0; im < actual_mc; im += 1) {
int b = (m + im) / output_cr;
int cr = (m + im) - b * output_cr;
int c = cr / output_rows;
int r = cr - c * output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int idx_base = b * input_cri;
// fill in the sub input matrix
#ifdef AVX_PSIZE
if (fast_flag) {
ACX_FUN_LOAD (load_sub_matrix_fast, spatial) (
input_ptr, temp_mk, &cmk, kc_strip, k, kernel_ri, input_ri,
in_channel, idx_base, cstart, rstart, input_cols, input_rows, 0);
}
else {
ACX_FUN_LOAD (load_sub_matrix, spatial) (
input_ptr, temp_mk, &cmk, kc_strip, actual_kc,
k, kernel_ri, input_ri, in_channel, idx_base,
cstart, rstart, input_cols, input_rows, kernel_rows, 0);
}
#else
for (int ik = 0; ik < actual_kc; ik += 1) {
int kc = (k + ik) / kernel_ri;
int kri = (k + ik) - kc * kernel_ri;
int kr = kri / in_channel;
int ki = kri - kr * in_channel;
int input_col = kc + cstart;
int input_row = kr + rstart;
if (input_col < input_cols && input_col >= 0 &&
input_row < input_rows && input_row >= 0) {
int input_index = idx_base + input_col * input_ri
+ input_row * in_channel + ki;
temp_mk[cmk] = input_ptr[input_index];
}
cmk++;
}
#endif
}
int idx_kn_base = k * out_channel;
for (int n = 0; n < out_channel; n += nc) {
int actual_nc = fminf(n + nc, out_channel) - n;
idx_kn_base += n;
// fill in the kernel matrix
int cnk = 0;
for (int ik = 0; ik < actual_kc; ik++) {
for (int jn = 0; jn < actual_nc; jn++) {
int index_kn = idx_kn_base + ik * out_channel + jn;
temp_kn[cnk++] = kernel_ptr[index_kn];
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans,
actual_mc, actual_nc, actual_kc, ALPHA,
temp_mk, actual_kc, temp_kn, actual_nc,
BETA, temp_mn, actual_nc);
int cmn = 0;
for (int ix = 0; ix < actual_mc; ix++) {
for (int iy = 0; iy < actual_nc; iy++) {
int index_mn = (ix + m) * out_channel + (iy + n);
output_ptr[index_mn] += temp_mn[cmn++];
}
}
}
}
}
free(temp_mk);
free(temp_kn);
free(temp_mn);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial) (value * argv, int argn) {
return FUN_NATIVE (spatial) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_input) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int kernel_ri = kernel_rows * in_channel;
int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
memset(input_ptr, 0, batches * input_cri * sizeof(TYPE));
INIT;
int mat_size = kernel_cri * output_crb;
if (mat_size / kernel_cri == output_crb && mat_size < IM2COL_THRESHOLD) {
TYPE *inpt2d = (TYPE *) calloc(mat_size, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
GEMM(CblasRowMajor, CblasNoTrans, CblasTrans,
output_crb, kernel_cri, out_channel, ALPHA,
output_ptr, out_channel, kernel_ptr, out_channel,
BETA, inpt2d, kernel_cri);
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
input_ptr[input_idx] += inpt2d[i * kernel_cri + cnt];
}
++cnt;
}
}
}
}
free(inpt2d);
return Val_unit;
}
int mc = output_crb;
int kc = kernel_cri;
int nc = out_channel;
compute_block_sizes(&mc, &kc, &nc, sizeof(TYPE));
#ifdef AVX_PSIZE
int fast_flag = (in_channel % AVX_PSIZE == 0);
TYPE *temp_mk = NULL;
if (posix_memalign((void**) &temp_mk, ALIGN_SIZE, mc * kc * sizeof(TYPE)))
exit(1);
#else
TYPE *temp_mk = (TYPE *) calloc(mc * kc, sizeof(TYPE));
if (temp_mk == NULL) exit(1);
#endif
TYPE *temp_kn = (TYPE *) calloc(nc * kc, sizeof(TYPE));
if (temp_kn == NULL) exit(1);
TYPE *temp_mn = (TYPE *) calloc(mc * nc, sizeof(TYPE));
if (temp_mn == NULL) exit(1);
for (int m = 0; m < output_crb; m += mc) {
int actual_mc = fminf(m + mc, output_crb) - m;
int idx_mn_base = m * out_channel;
for (int k = 0; k < kernel_cri; k += kc) {
int actual_kc = fminf(k + kc, kernel_cri) - k;
int idx_kn_base = k * out_channel;
#ifdef AVX_PSIZE
int kc_strip = (actual_kc / AVX_PSIZE) * AVX_PSIZE;
#endif
for (int n = 0; n < out_channel; n += nc) {
int actual_nc = fminf(n + nc, out_channel) - n;
idx_kn_base += n;
idx_mn_base += n;
int cnk = 0;
for (int ik = 0; ik < actual_kc; ik++) {
for (int jn = 0; jn < actual_nc; jn++) {
int index_kn = idx_kn_base + ik * out_channel + jn;
temp_kn[cnk++] = kernel_ptr[index_kn];
}
}
int cmn = 0;
for (int ix = 0; ix < actual_mc; ix++) {
for (int iy = 0; iy < actual_nc; iy++) {
int index_mn = idx_mn_base + ix * out_channel + iy;
temp_mn[cmn++] = output_ptr[index_mn];
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasTrans,
actual_mc, actual_kc, actual_nc, ALPHA,
temp_mn, actual_nc, temp_kn, actual_nc,
BETA, temp_mk, actual_kc);
int cmk = 0;
for (int im = 0; im < actual_mc; im += 1) {
int b = (m + im) / output_cr;
int cr = (m + im) - b * output_cr;
int c = cr / output_rows;
int r = cr - c * output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
int idx_mk_base = b * input_cri;
#ifdef AVX_PSIZE
if (fast_flag) {
ACX_FUN_LOAD (load_sub_matrix_fast, spatial) (
input_ptr, temp_mk, &cmk, kc_strip, k, kernel_ri, input_ri,
in_channel, idx_mk_base, cstart, rstart, input_cols, input_rows, 1);
}
else {
ACX_FUN_LOAD (load_sub_matrix, spatial) (
input_ptr, temp_mk, &cmk, kc_strip, actual_kc,
k, kernel_ri, input_ri, in_channel, idx_mk_base,
cstart, rstart, input_cols, input_rows, kernel_rows, 1);
}
#else
for (int ik = 0; ik < actual_kc; ik += 1) {
int kc = (k + ik) / kernel_ri;
int kri = (k + ik) - kc * kernel_ri;
int kr = kri / in_channel;
int ki = kri - kr * in_channel;
int input_col = kc + cstart;
int input_row = kr + rstart;
if (input_col < input_cols && input_col >= 0 &&
input_row < input_rows && input_row >= 0) {
int input_index = idx_mk_base + input_col * input_ri
+ input_row * in_channel + ki;
input_ptr[input_index] += temp_mk[cmk];
}
cmk++;
}
#endif
}
}
}
}
free(temp_mk);
free(temp_kn);
free(temp_mn);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_input) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_input) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_kernel) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int kernel_rio = out_channel * in_channel * kernel_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int kernel_ri = kernel_rows * in_channel;
int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
memset(kernel_ptr, 0, kernel_cols * kernel_rio * sizeof(TYPE));
INIT;
int mat_size = kernel_cri * output_crb;
if (mat_size / kernel_cri == output_crb && mat_size < IM2COL_THRESHOLD) {
TYPE *inpt2d = (TYPE *) calloc(mat_size, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
GEMM(CblasRowMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_cri, output_crb, ALPHA,
output_ptr, out_channel, inpt2d, kernel_cri,
BETA, kern2d, kernel_cri);
int cnt = 0;
for (int j = 0; j < kernel_cri; ++j) {
for (int i = 0; i < out_channel; ++i) {
kernel_ptr[cnt++] = kern2d[i * kernel_cri + j];
}
}
free(inpt2d);
free(kern2d);
return Val_unit;
}
int mc = output_crb;
int kc = kernel_cri;
int nc = out_channel;
compute_block_sizes(&mc, &kc, &nc, sizeof(TYPE));
#ifdef AVX_PSIZE
int fast_flag = (in_channel % AVX_PSIZE == 0);
TYPE *temp_mk = NULL;
if (posix_memalign((void**) &temp_mk, ALIGN_SIZE, mc * kc * sizeof(TYPE)))
exit(1);
#else
TYPE *temp_mk = (TYPE *) calloc(mc * kc, sizeof(TYPE));
if (temp_mk == NULL) exit(1);
#endif
TYPE *temp_kn = (TYPE *) calloc(nc * kc, sizeof(TYPE));
if (temp_kn == NULL) exit(1);
TYPE *temp_mn = (TYPE *) calloc(mc * nc, sizeof(TYPE));
if (temp_mn == NULL) exit(1);
for (int m = 0; m < output_crb; m += mc) {
int actual_mc = fminf(m + mc, output_crb) - m;
int idx_mn_base = m * out_channel;
for (int k = 0; k < kernel_cri; k += kc) {
int actual_kc = fminf(k + kc, kernel_cri) - k;
int idx_kn_base = k * out_channel;
memset(temp_mk, 0, mc * kc * sizeof(TYPE));
#ifdef AVX_PSIZE
int kc_strip = (actual_kc / AVX_PSIZE) * AVX_PSIZE;
#endif
int cmk = 0;
for (int im = 0; im < actual_mc; im += 1) {
int b = (m + im) / output_cr;
int cr = (m + im) - b * output_cr;
int c = cr / output_rows;
int r = cr - c * output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int idx_mk_base = b * input_cri;
#ifdef AVX_PSIZE
if (fast_flag) {
ACX_FUN_LOAD (load_sub_matrix_fast, spatial) (
input_ptr, temp_mk, &cmk, kc_strip, k, kernel_ri, input_ri,
in_channel, idx_mk_base, cstart, rstart, input_cols, input_rows, 0);
}
else {
ACX_FUN_LOAD (load_sub_matrix, spatial) (
input_ptr, temp_mk, &cmk, kc_strip, actual_kc,
k, kernel_ri, input_ri, in_channel, idx_mk_base,
cstart, rstart, input_cols, input_rows, kernel_rows, 0);
}
#else
for (int ik = 0; ik < actual_kc; ik += 1) {
int kc = (k + ik) / kernel_ri;
int kri = (k + ik) - kc * kernel_ri;
int kr = kri / in_channel;
int ki = kri - kr * in_channel;
int input_col = kc + cstart;
int input_row = kr + rstart;
if (input_col < input_cols && input_col >= 0 &&
input_row < input_rows && input_row >= 0) {
int input_index = idx_mk_base + input_col * input_ri
+ input_row * in_channel + ki;
temp_mk[cmk] = input_ptr[input_index];
}
cmk++;
}
#endif
}
for (int n = 0; n < out_channel; n += nc) {
int actual_nc = fminf(n + nc, out_channel) - n;
idx_mn_base += n;
idx_kn_base += n;
int cmn = 0;
for (int ix = 0; ix < actual_mc; ix++) {
for (int iy = 0; iy < actual_nc; iy++) {
int index_mn = idx_mn_base + ix * out_channel + iy;
temp_mn[cmn++] = output_ptr[index_mn];
}
}
memset(temp_kn, 0, nc * kc * sizeof(TYPE));
GEMM(CblasRowMajor, CblasTrans, CblasNoTrans,
actual_nc, actual_kc, actual_mc, ALPHA,
temp_mn, actual_nc, temp_mk, actual_kc,
BETA, temp_kn, actual_kc);
int cnk = 0;
for (int jn = 0; jn < actual_nc; jn++) {
for (int ik = 0; ik < actual_kc; ik++) {
int index_kn = idx_kn_base + ik * out_channel + jn;
kernel_ptr[index_kn] = temp_kn[cnk++];
}
}
}
}
}
free(temp_mk);
free(temp_kn);
free(temp_mn);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_kernel) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_kernel) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
/*
* im2col implementation
*/
CAMLprim value FUN_NATIVE (spatial_im2col) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vPadding, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_cri = out_channel * output_rows * output_cols;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
INIT;
int pr = 0, pc = 0;
if (padding != 1) {
pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
}
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans,
output_crb, out_channel, kernel_cri, ALPHA,
inpt2d, kernel_cri, kernel_ptr, out_channel,
BETA, output_ptr, out_channel);
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_im2col) (value * argv, int argn) {
return FUN_NATIVE (spatial_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_kernel_im2col) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int kernel_rio = out_channel * in_channel * kernel_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
INIT;
TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
memset(kernel_ptr, 0, kernel_cols * kernel_rio * sizeof(TYPE));
int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
GEMM(CblasRowMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_cri, output_crb, ALPHA,
output_ptr, out_channel, inpt2d, kernel_cri,
BETA, kern2d, kernel_cri);
int cnt = 0;
for (int j = 0; j < kernel_cri; ++j) {
for (int i = 0; i < out_channel; ++i) {
kernel_ptr[cnt++] = kern2d[i * kernel_cri + j];
}
}
free(inpt2d);
free(kern2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_kernel_im2col) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_kernel_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_input_im2col) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(input_ptr, 0, batches * input_cri * sizeof(TYPE));
INIT;
int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
GEMM(CblasRowMajor, CblasNoTrans, CblasTrans,
output_crb, kernel_cri, out_channel, ALPHA,
output_ptr, out_channel, kernel_ptr, out_channel,
BETA, inpt2d, kernel_cri);
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
input_ptr[input_idx] += inpt2d[i * kernel_cri + cnt];
}
++cnt;
}
}
}
}
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_input_im2col) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_input_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (cuboid_im2col) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vPadding
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(output_ptr, 0, batches * output_crdo * sizeof(TYPE));
INIT;
int pd = 0, pr = 0, pc = 0;
if (padding != 1) {
pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
}
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_drcb; ++i) {
int bt = i / output_drc;
int jkd = i % output_drc;
int j = jkd / output_dr;
int kd = jkd % output_dr;
int k = kd / output_dpts;
int d = kd % output_dpts;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
for (int h = 0; h < in_channel; ++h) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
inpt2d[i * kernel_idrc + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans,
output_drcb, out_channel, kernel_idrc, ALPHA,
inpt2d, kernel_idrc, kernel_ptr, out_channel,
BETA, output_ptr, out_channel);
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_im2col) (value * argv, int argn) {
return FUN_NATIVE (cuboid_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17], argv[18]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward_kernel_im2col) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int kernel_rdio = out_channel * in_channel * kernel_dpts * kernel_rows;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
INIT;
TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_idrc * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
memset(kernel_ptr, 0, kernel_cols * kernel_rdio * sizeof(TYPE));
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_drcb; ++i) {
int bt = i / output_drc;
int jkd = i % output_drc;
int j = jkd / output_dr;
int kd = jkd % output_dr;
int k = kd / output_dpts;
int d = kd % output_dpts;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
for (int h = 0; h < in_channel; ++h) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
inpt2d[i * kernel_idrc + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
}
GEMM(CblasRowMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_idrc, output_drcb, ALPHA,
output_ptr, out_channel, inpt2d, kernel_idrc,
BETA, kern2d, kernel_idrc);
int cnt = 0;
for (int j = 0; j < kernel_idrc; ++j) {
for (int i = 0; i < out_channel; ++i) {
kernel_ptr[cnt++] = kern2d[i * kernel_idrc + j];
}
}
free(inpt2d);
free(kern2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward_kernel_im2col) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward_kernel_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward_input_im2col) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(input_ptr, 0, batches * input_crdi * sizeof(TYPE));
INIT;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
GEMM(CblasRowMajor, CblasNoTrans, CblasTrans,
output_drcb, kernel_idrc, out_channel, ALPHA,
output_ptr, out_channel, kernel_ptr, out_channel,
BETA, inpt2d, kernel_idrc);
for (int i = 0; i < output_drcb; ++i) {
int bt = i / output_drc;
int jkd = i % output_drc;
int j = jkd / output_dr;
int kd = jkd % output_dr;
int k = kd / output_dpts;
int d = kd % output_dpts;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
for (int h = 0; h < in_channel; ++h) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
input_ptr[input_idx] += inpt2d[i * kernel_idrc + cnt];
}
++cnt;
}
}
}
}
}
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward_input_im2col) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward_input_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17]
);
}
/*
* memory-efficient implementation
*/
CAMLprim value FUN_NATIVE (spatial_mec) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vPadding, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = input_rows * in_channel;
const int output_cri = out_channel * output_rows * output_cols;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int kernel_rio = kernel_rows * in_channel * out_channel;
const int kernel_io = in_channel * out_channel;
const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride;
const int output_bco = out_channel * output_cols * batches;
const int inpt2d_cols = padded_input_rows * kernel_cols * in_channel;
const int inpt2d_rows = batches * output_cols;
const int inpt2d_step = inpt2d_rows * kernel_cols * in_channel * row_stride;
TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
TYPE *output2d = (TYPE *) calloc(batches * output_cri, sizeof(TYPE));
if (output2d == NULL) exit(1);
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
INIT;
int pr = 0, pc = 0;
if (padding != 1) {
pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
}
int cnt = 0;
int kidx = 0;
for (int o = 0; o < out_channel; ++o) {
for (int r = 0; r < kernel_rows; ++r) {
for (int c = 0; c < kernel_cols; ++c) {
for (int i = 0; i < in_channel; ++i) {
kidx = c * kernel_rio + r * kernel_io + i * out_channel + o;
kern2d[cnt++] = kernel_ptr[kidx];
}
}
}
}
for (int i = 0; i < inpt2d_rows; ++i) {
int bt = i / output_cols;
int c = i % output_cols;
const int cstart = c * col_stride - pc;
const int cend = cstart + kernel_cols;
const int rstart = 0 - pr;
const int rend = rstart + padded_input_rows;
int counter = 0;
for (int a = rstart; a < rend; ++a) {
for (int b = cstart; b < cend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (b < input_cols && b >= 0 &&
a < input_rows && a >= 0) {
int input_idx = bt * input_cri + b * input_ri + a * in_channel + h;
inpt2d[counter * inpt2d_rows + i] = input_ptr[input_idx];
}
counter++;
}
}
}
}
for (int i = 0; i < output_rows; ++i) {
GEMM(CblasColMajor, CblasNoTrans, CblasNoTrans,
inpt2d_rows, out_channel, kernel_cri, ALPHA,
inpt2d + inpt2d_step * i, inpt2d_rows, kern2d, kernel_cri,
BETA, output2d + output_bco * i, inpt2d_rows);
}
cnt = 0;
for (int j = 0; j < inpt2d_rows; ++j) {
for (int i = 0; i < output_rows * out_channel; ++i) {
output_ptr[cnt++] = output2d[i * inpt2d_rows + j];
}
}
free(inpt2d);
free(kern2d);
free(output2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_mec) (value * argv, int argn) {
return FUN_NATIVE (spatial_mec) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_kernel_mec) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_ro = output_rows * out_channel;
const int output_crb = output_rows * output_cols * batches;
const int kernel_io = in_channel * out_channel;
const int kernel_rio = kernel_rows * in_channel * out_channel;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride;
const int output_bco = out_channel * output_cols * batches;
const int inpt2d_cols = padded_input_rows * kernel_cols * in_channel;
const int inpt2d_rows = batches * output_cols;
const int inpt2d_step = batches * output_cols * kernel_cols * in_channel * row_stride;
TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
TYPE *output2d = (TYPE *) calloc(output_crb * out_channel, sizeof(TYPE));
if (output2d == NULL) exit(1);
memset(kernel_ptr, 0, kernel_cols * kernel_rio * sizeof(TYPE));
INIT;
int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
for (int i = 0; i < inpt2d_rows; ++i) {
int bt = i / output_cols;
int c = i % output_cols;
const int cstart = c * col_stride - pc;
const int cend = cstart + kernel_cols;
const int rstart = 0 - pr;
const int rend = rstart + padded_input_rows;
int counter = 0;
for (int a = rstart; a < rend; ++a) {
for (int b = cstart; b < cend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (b < input_cols && b >= 0 &&
a < input_rows && a >= 0) {
int input_idx =
bt * input_cri + b * input_ri + a * in_channel + h;
inpt2d[counter * inpt2d_rows + i] = input_ptr[input_idx];
}
counter++;
}
}
}
}
int cnt = 0;
for (int j = 0; j < inpt2d_rows; ++j) {
for (int i = 0; i < output_ro; ++i) {
output2d[i * inpt2d_rows + j] = output_ptr[cnt++];
}
}
for (int i = 0; i < output_rows; ++i) {
GEMM(CblasColMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_cri, inpt2d_rows, ALPHA,
output2d + output_bco * i, inpt2d_rows,
inpt2d + inpt2d_step * i, inpt2d_rows,
ALPHA, kern2d, out_channel);
}
cnt = 0;
int kidx = 0;
for (int r = 0; r < kernel_rows; ++r) {
for (int c = 0; c < kernel_cols; ++c) {
for (int i = 0; i < in_channel; ++i) {
for (int o = 0; o < out_channel; ++o) {
kidx = c * kernel_rio + r * kernel_io + i * out_channel + o;
kernel_ptr[kidx] = kern2d[cnt++];
}
}
}
}
free(inpt2d);
free(kern2d);
free(output2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_kernel_mec) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_kernel_mec) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_input_mec) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_ro = output_rows * out_channel;
const int output_crb = output_rows * output_cols * batches;
const int kernel_io = in_channel * out_channel;
const int kernel_rio = kernel_rows * in_channel * out_channel;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride;
const int output_bco = out_channel * output_cols * batches;
const int inpt2d_cols = padded_input_rows * kernel_cols * in_channel;
const int inpt2d_rows = batches * output_cols;
const int inpt2d_step = batches * output_cols * kernel_cols * in_channel * row_stride;
TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
TYPE *output2d = (TYPE *) calloc(output_crb * out_channel, sizeof(TYPE));
if (output2d == NULL) exit(1);
memset(input_ptr, 0, batches * input_cri * sizeof(TYPE));
INIT;
int pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
int cnt = 0;
for (int j = 0; j < inpt2d_rows; ++j) {
for (int i = 0; i < output_ro; ++i) {
output2d[i * inpt2d_rows + j] = output_ptr[cnt++];
}
}
cnt = 0;
int kidx = 0;
for (int o = 0; o < out_channel; ++o) {
for (int r = 0; r < kernel_rows; ++r) {
for (int c = 0; c < kernel_cols; ++c) {
for (int i = 0; i < in_channel; ++i) {
kidx = c * kernel_rio + r * kernel_io + i * out_channel + o;
kern2d[cnt++] = kernel_ptr[kidx];
}
}
}
}
for (int i = 0; i < output_rows; ++i) {
GEMM(CblasColMajor, CblasNoTrans, CblasTrans,
inpt2d_rows, kernel_cri, out_channel, ALPHA,
output2d + output_bco * i, inpt2d_rows,
kern2d, kernel_cri, ALPHA,
inpt2d + inpt2d_step * i, inpt2d_rows);
}
for (int i = 0; i < inpt2d_rows; ++i) {
int bt = i / output_cols;
int c = i % output_cols;
const int cstart = c * col_stride - pc;
const int cend = cstart + kernel_cols;
const int rstart = 0 - pr;
const int rend = rstart + padded_input_rows;
const int input_idx_base = bt * input_cri;
int counter = 0;
for (int a = rstart; a < rend; ++a) {
for (int b = cstart; b < cend; ++b) {
for (int h = 0; h < in_channel; ++h) {
if (b < input_cols && b >= 0 &&
a < input_rows && a >= 0) {
int input_idx = input_idx_base + b * input_ri + a * in_channel + h;
input_ptr[input_idx] += inpt2d[counter * inpt2d_rows + i];
}
counter++;
}
}
}
}
free(inpt2d);
free(kern2d);
free(output2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_input_mec) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_input_mec) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (cuboid_mec) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vPadding
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_rdo = out_channel * output_dpts * output_rows;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
const int kernel_rdio = kernel_rows * kernel_dpts * in_channel * out_channel;
const int kernel_dio = kernel_dpts * in_channel * out_channel;
const int kernel_io = in_channel * out_channel;
const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride;
const int output_bcdo = out_channel * output_cols * output_dpts * batches;
const int inpt2d_cols = padded_input_rows * kernel_cols * kernel_dpts * in_channel;
const int inpt2d_rows = batches * output_cols * output_dpts;
const int inpt2d_step = inpt2d_rows * kernel_cols * kernel_dpts * in_channel * row_stride;
INIT;
int pd = 0, pr = 0, pc = 0;
if (padding != 1) {
pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
}
TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_idrc * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
TYPE *output2d = (TYPE *) calloc(output_drcb * out_channel, sizeof(TYPE));
if (output2d == NULL) exit(1);
memset(output_ptr, 0, output_drcb * out_channel * sizeof(TYPE));
int cnt = 0;
int kidx = 0;
for (int o = 0; o < out_channel; ++o) {
for (int r = 0; r < kernel_rows; ++r) {
for (int c = 0; c < kernel_cols; ++c) {
for (int d = 0; d < kernel_dpts; ++d) {
for (int i = 0; i < in_channel; ++i) {
kidx = c * kernel_rdio + r * kernel_dio +
d * kernel_io + i * out_channel + o;
kern2d[cnt++] = kernel_ptr[kidx];
}
}
}
}
}
const int rstart = 0 - pr;
const int rend = rstart + padded_input_rows;
for (int i = 0; i < inpt2d_rows; ++i) {
int bt = i / (output_cols * output_dpts);
int cd = i % (output_cols * output_dpts);
int ct = cd / output_dpts;
int dt = cd % output_dpts;
const int cstart = ct * col_stride - pc;
const int dstart = dt * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int dend = dstart + kernel_dpts;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int r = rstart; r < rend; ++r) {
for (int c = cstart; c < cend; ++c) {
for (int d = dstart; d < dend; ++d) {
for (int h = 0; h < in_channel; ++h) {
if (c >= 0 && c < input_cols &&
r >= 0 && r < input_rows &&
d >= 0 && d < input_dpts) {
int input_idx = input_idx_base + c * input_rdi +
r * input_di + d * in_channel + h;
inpt2d[cnt * inpt2d_rows + i] += input_ptr[input_idx];
}
++cnt;
}
}
}
}
}
for (int i = 0; i < output_rows; ++i) {
GEMM(CblasColMajor, CblasNoTrans, CblasNoTrans,
inpt2d_rows, out_channel, kernel_idrc, ALPHA,
inpt2d + inpt2d_step * i, inpt2d_rows, kern2d, kernel_idrc,
BETA, output2d + output_bcdo * i, inpt2d_rows);
}
cnt = 0;
int oidx = 0;
for (int r = 0; r < output_rows; ++r) {
for (int o = 0; o < out_channel; ++o) {
for (int b = 0; b < batches; ++b) {
for (int c = 0; c < output_cols; ++c) {
for (int d = 0; d < output_dpts; ++d) {
oidx = b * output_crdo + c * output_rdo +
r * output_dpts * out_channel + d * out_channel + o;
output_ptr[oidx] = output2d[cnt++];
}
}
}
}
}
free(inpt2d);
free(kern2d);
free(output2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_mec) (value * argv, int argn) {
return FUN_NATIVE (cuboid_mec) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17], argv[18]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward_kernel_mec) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_rdo = out_channel * output_dpts * output_rows;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
const int kernel_rdio = kernel_rows * kernel_dpts * in_channel * out_channel;
const int kernel_dio = kernel_dpts * in_channel * out_channel;
const int kernel_io = in_channel * out_channel;
const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride;
const int output_bcdo = out_channel * output_cols * output_dpts * batches;
const int inpt2d_cols = padded_input_rows * kernel_cols * kernel_dpts * in_channel;
const int inpt2d_rows = batches * output_cols * output_dpts;
const int inpt2d_step = inpt2d_rows * kernel_cols * kernel_dpts * in_channel * row_stride;
TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_idrc * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
TYPE *output2d = (TYPE *) calloc(output_drcb * out_channel, sizeof(TYPE));
if (output2d == NULL) exit(1);
memset(kernel_ptr, 0, kernel_idrc * out_channel * sizeof(TYPE));
INIT;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
int cnt;
const int rstart = 0 - pr;
const int rend = rstart + padded_input_rows;
for (int i = 0; i < inpt2d_rows; ++i) {
int bt = i / (output_cols * output_dpts);
int cd = i % (output_cols * output_dpts);
int ct = cd / output_dpts;
int dt = cd % output_dpts;
const int cstart = ct * col_stride - pc;
const int dstart = dt * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int dend = dstart + kernel_dpts;
const int input_idx_base = bt * input_crdi;
cnt = 0;
for (int r = rstart; r < rend; ++r) {
for (int c = cstart; c < cend; ++c) {
for (int d = dstart; d < dend; ++d) {
for (int h = 0; h < in_channel; ++h) {
if (c >= 0 && c < input_cols &&
r >= 0 && r < input_rows &&
d >= 0 && d < input_dpts) {
int input_idx = input_idx_base + c * input_rdi +
r * input_di + d * in_channel + h;
inpt2d[cnt * inpt2d_rows + i] += input_ptr[input_idx];
}
++cnt;
}
}
}
}
}
cnt = 0;
int oidx = 0;
for (int r = 0; r < output_rows; ++r) {
for (int o = 0; o < out_channel; ++o) {
for (int b = 0; b < batches; ++b) {
for (int c = 0; c < output_cols; ++c) {
for (int d = 0; d < output_dpts; ++d) {
oidx = b * output_crdo + c * output_rdo +
r * output_dpts * out_channel + d * out_channel + o;
output2d[cnt++] = output_ptr[oidx];
}
}
}
}
}
for (int i = 0; i < output_rows; ++i) {
GEMM(CblasColMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_idrc, inpt2d_rows, ALPHA,
output2d + output_bcdo * i, inpt2d_rows,
inpt2d + inpt2d_step * i, inpt2d_rows,
ALPHA, kern2d, out_channel);
}
cnt = 0;
int kidx = 0;
for (int r = 0; r < kernel_rows; ++r) {
for (int c = 0; c < kernel_cols; ++c) {
for (int d = 0; d < kernel_dpts; ++d) {
for (int i = 0; i < in_channel; ++i) {
for (int o = 0; o < out_channel; ++o) {
kidx = c * kernel_rdio + r * kernel_dio +
d * kernel_io + i * out_channel + o;
kernel_ptr[kidx] = kern2d[cnt++];
}
}
}
}
}
free(inpt2d);
free(kern2d);
free(output2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward_kernel_mec) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward_kernel_mec) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward_input_mec) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_rdo = out_channel * output_dpts * output_rows;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
const int kernel_rdio = kernel_rows * kernel_dpts * in_channel * out_channel;
const int kernel_dio = kernel_dpts * in_channel * out_channel;
const int kernel_io = in_channel * out_channel;
const int padded_input_rows = kernel_rows + (output_rows - 1) * row_stride;
const int output_bcdo = out_channel * output_cols * output_dpts * batches;
const int inpt2d_cols = padded_input_rows * kernel_cols * kernel_dpts * in_channel;
const int inpt2d_rows = batches * output_cols * output_dpts;
const int inpt2d_step = inpt2d_rows * kernel_cols * kernel_dpts * in_channel * row_stride;
TYPE *inpt2d = (TYPE *) calloc(inpt2d_cols * inpt2d_rows, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_idrc * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
TYPE *output2d = (TYPE *) calloc(output_drcb * out_channel, sizeof(TYPE));
if (output2d == NULL) exit(1);
memset(input_ptr, 0, batches * input_crdi * sizeof(TYPE));
INIT;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
int cnt = 0;
int oidx = 0;
for (int r = 0; r < output_rows; ++r) {
for (int o = 0; o < out_channel; ++o) {
for (int b = 0; b < batches; ++b) {
for (int c = 0; c < output_cols; ++c) {
for (int d = 0; d < output_dpts; ++d) {
oidx = b * output_crdo + c * output_rdo +
r * output_dpts * out_channel + d * out_channel + o;
output2d[cnt++] = output_ptr[oidx];
}
}
}
}
}
cnt = 0;
int kidx = 0;
for (int o = 0; o < out_channel; ++o) {
for (int r = 0; r < kernel_rows; ++r) {
for (int c = 0; c < kernel_cols; ++c) {
for (int d = 0; d < kernel_dpts; ++d) {
for (int i = 0; i < in_channel; ++i) {
kidx = c * kernel_rdio + r * kernel_dio +
d * kernel_io + i * out_channel + o;
kern2d[cnt++] = kernel_ptr[kidx];
}
}
}
}
}
for (int i = 0; i < output_rows; ++i) {
GEMM(CblasColMajor, CblasNoTrans, CblasTrans,
inpt2d_rows, kernel_idrc, out_channel, ALPHA,
output2d + output_bcdo * i, inpt2d_rows,
kern2d, kernel_idrc, ALPHA,
inpt2d + inpt2d_step * i, inpt2d_rows);
}
const int rstart = 0 - pr;
const int rend = rstart + padded_input_rows;
for (int i = 0; i < inpt2d_rows; ++i) {
int bt = i / (output_cols * output_dpts);
int cd = i % (output_cols * output_dpts);
int ct = cd / output_dpts;
int dt = cd % output_dpts;
const int cstart = ct * col_stride - pc;
const int dstart = dt * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int dend = dstart + kernel_dpts;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int r = rstart; r < rend; ++r) {
for (int c = cstart; c < cend; ++c) {
for (int d = dstart; d < dend; ++d) {
for (int h = 0; h < in_channel; ++h) {
if (c >= 0 && c < input_cols &&
r >= 0 && r < input_rows &&
d >= 0 && d < input_dpts) {
int input_idx = input_idx_base + c * input_rdi +
r * input_di + d * in_channel + h;
input_ptr[input_idx] += inpt2d[cnt * inpt2d_rows + i];
}
++cnt;
}
}
}
}
}
free(inpt2d);
free(kern2d);
free(output2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward_input_mec) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward_input_mec) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17]
);
}
/*
* naive implementation
*/
CAMLprim value FUN_NATIVE (spatial_naive) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vPadding, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_cri = out_channel * output_rows * output_cols;
const int output_cr = output_rows * output_cols;
const int output_ri = out_channel * output_rows;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
const int kernel_rio = out_channel * in_channel * kernel_rows;
const int kernel_io = out_channel * in_channel;
const int ksize = kernel_cols * kernel_rows;
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
INIT;
int pr = 0, pc = 0;
if (padding != 1) {
pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
}
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_cri;
for (int j = 0; j < output_cols; ++j) {
for (int k = 0; k < output_rows; ++k) {
const int output_idx_base = i * output_cri + j * output_ri + k * out_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
for (int l = 0; l < out_channel; ++l) {
TYPE sum = 0.;
for (int h = 0; h < in_channel; ++h) {
TYPE input_val, kernel_val;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
input_val = *(input_ptr + input_idx);
} else {
input_val = 0.;
}
int kernel_index =
(a - cstart) * kernel_rio + (b - rstart) * kernel_io + h * out_channel + l;
kernel_val = *(kernel_ptr + kernel_index);
sum += input_val * kernel_val;
}
}
}
int output_idx = output_idx_base + l;
*(output_ptr + output_idx) = sum;
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_naive) (value * argv, int argn) {
return FUN_NATIVE (spatial_naive) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_kernel_naive) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int kernel_rio = out_channel * in_channel * kernel_rows;
const int kernel_io = out_channel * in_channel;
const int output_cri = out_channel * output_rows * output_cols;
const int output_ri = out_channel * output_rows;
memset(kernel_ptr, 0, kernel_cols * kernel_rio * sizeof(TYPE));
INIT;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
for (int i = 0; i < batches; ++i) {
for (int j = 0; j < output_cols; ++j) {
for (int k = 0; k < output_rows; ++k) {
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
for (int l = 0; l < out_channel; ++l) {
int output_idx =
i * output_cri + j * output_ri + k * out_channel + l;
TYPE output_val = *(output_ptr + output_idx);
for (int h = 0; h < in_channel; ++h) {
TYPE input_val = 0.;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows) {
int input_idx =
i * input_cri + a * input_ri + b * in_channel + h;
input_val = *(input_ptr + input_idx);
} else {
input_val = 0.;
}
int kernel_index =
(a - cstart) * kernel_rio + (b - rstart) * kernel_io + h * out_channel + l;
*(kernel_ptr + kernel_index) += output_val * input_val;
}
}
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_kernel_naive) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_kernel_naive) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (spatial_backward_input_naive) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int kernel_rio = out_channel * in_channel * kernel_rows;
const int kernel_io = out_channel * in_channel;
const int output_cri = out_channel * output_rows * output_cols;
const int output_ri = out_channel * output_rows;
memset(input_ptr, 0, batches * input_cri * sizeof(TYPE));
INIT;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
for (int i = 0; i < batches; ++i) {
for (int j = 0; j < output_cols; ++j) {
for (int k = 0; k < output_rows; ++k) {
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
for (int l = 0; l < out_channel; ++l) {
int output_idx =
i * output_cri + j * output_ri + k * out_channel + l;
TYPE output_val = *(output_ptr + output_idx);
for (int h = 0; h < in_channel; ++h) {
TYPE kernel_val = 0.;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
int kernel_index =
(a - cstart) * kernel_rio + (b - rstart) * kernel_io + h * out_channel + l;
kernel_val = *(kernel_ptr + kernel_index);
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows) {
int input_idx =
i * input_cri + a * input_ri + b * in_channel + h;
*(input_ptr + input_idx) += output_val * kernel_val;
}
}
}
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward_input_naive) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward_input_naive) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (cuboid_naive) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vPadding
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int kernel_rdio = out_channel * in_channel * kernel_dpts * kernel_rows;
const int kernel_dio = out_channel * in_channel * kernel_dpts;
const int kernel_io = out_channel * in_channel;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_rdo = out_channel * output_dpts * output_rows;
const int output_do = out_channel * output_dpts;
INIT;
int pd = 0, pr = 0, pc = 0;
if (padding != 1) {
pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
}
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_crdi;
for (int j = 0; j < output_cols; ++j) {
for (int k = 0; k < output_rows; ++k) {
for (int d = 0; d < output_dpts; ++d) {
const int output_idx_base =
i * output_crdo +
j * output_rdo +
k * output_do +
d * out_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
for (int l = 0; l < out_channel; ++l) {
TYPE sum = 0.;
int output_idx = output_idx_base + l;
for (int h = 0; h < in_channel; ++h) {
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
TYPE input_val, kernel_val;
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
input_val = *(input_ptr + input_idx);
} else {
input_val = 0.;
}
int kernel_index =
(a - cstart) * kernel_rdio +
(b - rstart) * kernel_dio +
(c - dstart) * kernel_io +
h * out_channel + l;
kernel_val = *(kernel_ptr + kernel_index);
sum += input_val * kernel_val;
}
}
}
}
*(output_ptr + output_idx) = sum;
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_naive) (value * argv, int argn) {
return FUN_NATIVE (cuboid_naive) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16], argv[17], argv[18]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward_kernel_naive) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int kernel_rdio = out_channel * in_channel * kernel_dpts * kernel_rows;
const int kernel_dio = out_channel * in_channel * kernel_dpts;
const int kernel_io = out_channel * in_channel;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_rdo = out_channel * output_dpts * output_rows;
const int output_do = out_channel * output_dpts;
memset(kernel_ptr, 0, kernel_cols * kernel_rdio * sizeof(TYPE));
INIT;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_crdi;
for (int j = 0; j < output_cols; ++j) {
for (int k = 0; k < output_rows; ++k) {
for (int d = 0; d < output_dpts; ++d) {
const int output_idx_base =
i * output_crdo +
j * output_rdo +
k * output_do +
d * out_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
for (int l = 0; l < out_channel; ++l) {
int output_idx = output_idx_base + l;
TYPE output_val = *(output_ptr + output_idx);
for (int h = 0; h < in_channel; ++h) {
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
TYPE input_val = 0.;
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
input_val = *(input_ptr + input_idx);
}
int kernel_index =
(a - cstart) * kernel_rdio +
(b - rstart) * kernel_dio +
(c - dstart) * kernel_io +
h * out_channel + l;
*(kernel_ptr + kernel_index) += output_val * input_val;
}
}
}
}
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward_kernel_naive) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward_kernel_naive) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16], argv[17]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward_input_naive) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int kernel_rdio = out_channel * in_channel * kernel_dpts * kernel_rows;
const int kernel_dio = out_channel * in_channel * kernel_dpts;
const int kernel_io = out_channel * in_channel;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_rdo = out_channel * output_dpts * output_rows;
const int output_do = out_channel * output_dpts;
memset(input_ptr, 0, batches * input_crdi * sizeof(TYPE));
INIT;
int pc = (col_stride * (output_cols - 1) + kernel_cols - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_crdi;
for (int j = 0; j < output_cols; ++j) {
for (int k = 0; k < output_rows; ++k) {
for (int d = 0; d < output_dpts; ++d) {
const int output_idx_base =
i * output_crdo +
j * output_rdo +
k * output_do +
d * out_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
for (int l = 0; l < out_channel; ++l) {
int output_idx = output_idx_base + l;
TYPE output_val = *(output_ptr + output_idx);
for (int h = 0; h < in_channel; ++h) {
TYPE kernel_val;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
int kernel_index =
(a - cstart) * kernel_rdio +
(b - rstart) * kernel_dio +
(c - dstart) * kernel_io +
h * out_channel + l;
kernel_val = *(kernel_ptr + kernel_index);
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
*(input_ptr + input_idx) += output_val * kernel_val;
}
}
}
}
}
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward_input_naive) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward_input_naive) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16], argv[17]
);
}
/*
* dilated convolution
*/
CAMLprim value FUN_NATIVE (dilated_spatial_im2col) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vPadding, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_cri = out_channel * output_rows * output_cols;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
INIT;
TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1);
int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1);
int pr = 0, pc = 0;
if (padding != 1) {
pr = (row_stride * ( output_rows - 1) + kernel_rows_up - input_rows) / 2;
pc = (col_stride * ( output_cols - 1) + kernel_cols_up - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
}
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - pc;
const int rstart = r * row_stride - pr;
const int cend = cstart + kernel_cols_up;
const int rend = rstart + kernel_rows_up;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; a += col_in_stride) {
for (int b = rstart; b < rend; b += row_in_stride) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans,
output_crb, out_channel, kernel_cri, ALPHA,
inpt2d, kernel_cri, kernel_ptr, out_channel,
BETA, output_ptr, out_channel);
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (dilated_spatial_im2col) (value * argv, int argn) {
return FUN_NATIVE (dilated_spatial_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16]
);
}
CAMLprim value FUN_NATIVE (dilated_spatial_backward_kernel_im2col) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int kernel_rio = out_channel * in_channel * kernel_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
INIT;
TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_cri * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
memset(kernel_ptr, 0, kernel_cols * kernel_rio * sizeof(TYPE));
int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1);
int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1);
int pad_rows = row_stride * (output_rows - 1) + kernel_rows_up - input_rows;
int pad_cols = col_stride * (output_cols - 1) + kernel_cols_up - input_cols;
int p_top = pad_rows / 2;
int p_left = pad_cols / 2;
if (p_top < 0) p_top = 0;
if (p_left < 0) p_left = 0;
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - p_left;
const int rstart = r * row_stride - p_top;
const int cend = cstart + kernel_cols_up;
const int rend = rstart + kernel_rows_up;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; a += col_in_stride) {
for (int b = rstart; b < rend; b += row_in_stride) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
inpt2d[i * kernel_cri + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
GEMM(CblasRowMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_cri, output_crb, ALPHA,
output_ptr, out_channel, inpt2d, kernel_cri,
BETA, kern2d, kernel_cri);
int cnt = 0;
for (int j = 0; j < kernel_cri; ++j) {
for (int i = 0; i < out_channel; ++i) {
kernel_ptr[cnt++] = kern2d[i * kernel_cri + j];
}
}
free(inpt2d);
free(kern2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (dilated_spatial_backward_kernel_im2col) (value * argv, int argn) {
return FUN_NATIVE (dilated_spatial_backward_kernel_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (dilated_spatial_backward_input_im2col) (
value vInput_ptr, value vKernel_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows, value vOut_channel,
value vRow_stride, value vCol_stride,
value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int out_channel = Long_val(vOut_channel);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = in_channel * input_rows * input_cols;
const int input_ri = in_channel * input_rows;
const int output_ri = out_channel * output_rows;
const int output_cr = output_rows * output_cols;
const int output_crb = output_rows * output_cols * batches;
const int kernel_cri = kernel_cols * kernel_rows * in_channel;
INIT;
TYPE *inpt2d = (TYPE *) calloc(kernel_cri * output_crb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(input_ptr, 0, batches * input_cri * sizeof(TYPE));
int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1);
int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1);
int pad_rows = row_stride * (output_rows - 1) + kernel_rows_up - input_rows;
int pad_cols = col_stride * (output_cols - 1) + kernel_cols_up - input_cols;
int p_top = pad_rows / 2;
int p_left = pad_cols / 2;
if (p_top < 0) p_top = 0;
if (p_left < 0) p_left = 0;
GEMM(CblasRowMajor, CblasNoTrans, CblasTrans,
output_crb, kernel_cri, out_channel, ALPHA,
output_ptr, out_channel, kernel_ptr, out_channel,
BETA, inpt2d, kernel_cri);
for (int i = 0; i < output_crb; ++i) {
int bt = i / output_cr;
int cr = i % output_cr;
int c = cr / output_rows;
int r = cr % output_rows;
const int cstart = c * col_stride - p_left;
const int rstart = r * row_stride - p_top;
const int cend = cstart + kernel_cols_up;
const int rend = rstart + kernel_rows_up;
const int input_idx_base = bt * input_cri;
int cnt = 0;
for (int a = cstart; a < cend; a += col_in_stride) {
for (int b = rstart; b < rend; b += row_in_stride) {
for (int h = 0; h < in_channel; ++h) {
if (a < input_cols && a >= 0 &&
b < input_rows && b >= 0) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + h;
input_ptr[input_idx] += inpt2d[i * kernel_cri + cnt];
}
++cnt;
}
}
}
}
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (dilated_spatial_backward_input_im2col) (value * argv, int argn) {
return FUN_NATIVE (dilated_spatial_backward_input_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]
);
}
CAMLprim value FUN_NATIVE (dilated_cuboid_im2col) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vDpt_in_stride, value vRow_in_stride, value vCol_in_stride,
value vPadding
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int dpt_in_stride = Long_val(vDpt_in_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
int padding = Long_val(vPadding);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_crdo = out_channel * output_dpts * output_rows * output_cols;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(output_ptr, 0, batches * output_crdo * sizeof(TYPE));
INIT;
int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1);
int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1);
int kernel_dpts_up = kernel_dpts + (kernel_dpts - 1) * (dpt_in_stride - 1);
int pd = 0, pr = 0, pc = 0;
if (padding != 1) {
pc = (col_stride * (output_cols - 1) + kernel_cols_up - input_cols) / 2;
pr = (row_stride * (output_rows - 1) + kernel_rows_up - input_rows) / 2;
pd = (dpt_stride * (output_dpts - 1) + kernel_dpts_up - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
}
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_drcb; ++i) {
int bt = i / output_drc;
int jkd = i % output_drc;
int j = jkd / output_dr;
int kd = jkd % output_dr;
int k = kd / output_dpts;
int d = kd % output_dpts;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols_up;
const int rend = rstart + kernel_rows_up;
const int dend = dstart + kernel_dpts_up;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int a = cstart; a < cend; a += col_in_stride) {
for (int b = rstart; b < rend; b += row_in_stride) {
for (int c = dstart; c < dend; c += dpt_in_stride) {
for (int h = 0; h < in_channel; ++h) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
inpt2d[i * kernel_idrc + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
}
GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans,
output_drcb, out_channel, kernel_idrc, ALPHA,
inpt2d, kernel_idrc, kernel_ptr, out_channel,
BETA, output_ptr, out_channel);
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (dilated_cuboid_im2col) (value * argv, int argn) {
return FUN_NATIVE (dilated_cuboid_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17], argv[18], argv[19], argv[20], argv[21]
);
}
CAMLprim value FUN_NATIVE (dilated_cuboid_backward_kernel_im2col) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vDpt_in_stride, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int dpt_in_stride = Long_val(vDpt_in_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int kernel_rdio = out_channel * in_channel * kernel_dpts * kernel_rows;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
INIT;
TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
TYPE *kern2d = (TYPE *) calloc(kernel_idrc * out_channel, sizeof(TYPE));
if (kern2d == NULL) exit(1);
memset(kernel_ptr, 0, kernel_cols * kernel_rdio * sizeof(TYPE));
int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1);
int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1);
int kernel_dpts_up = kernel_dpts + (kernel_dpts - 1) * (dpt_in_stride - 1);
int pc = (col_stride * (output_cols - 1) + kernel_cols_up - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows_up - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts_up - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < output_drcb; ++i) {
int bt = i / output_drc;
int jkd = i % output_drc;
int j = jkd / output_dr;
int kd = jkd % output_dr;
int k = kd / output_dpts;
int d = kd % output_dpts;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols_up;
const int rend = rstart + kernel_rows_up;
const int dend = dstart + kernel_dpts_up;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int a = cstart; a < cend; a += col_in_stride) {
for (int b = rstart; b < rend; b += row_in_stride) {
for (int c = dstart; c < dend; c += dpt_in_stride) {
for (int h = 0; h < in_channel; ++h) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
inpt2d[i * kernel_idrc + cnt] = input_ptr[input_idx];
}
++cnt;
}
}
}
}
}
GEMM(CblasRowMajor, CblasTrans, CblasNoTrans,
out_channel, kernel_idrc, output_drcb, ALPHA,
output_ptr, out_channel, inpt2d, kernel_idrc,
BETA, kern2d, kernel_idrc);
int cnt = 0;
for (int j = 0; j < kernel_idrc; ++j) {
for (int i = 0; i < out_channel; ++i) {
kernel_ptr[cnt++] = kern2d[i * kernel_idrc + j];
}
}
free(inpt2d);
free(kern2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (dilated_cuboid_backward_kernel_im2col) (value * argv, int argn) {
return FUN_NATIVE (dilated_cuboid_backward_kernel_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17], argv[18], argv[19], argv[20]
);
}
CAMLprim value FUN_NATIVE (dilated_cuboid_backward_input_im2col) (
value vInput, value vKernel, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows,
value vOutput_dpts, value vOut_channel,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vDpt_in_stride, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *KE = Caml_ba_array_val(vKernel);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *kernel_ptr = (TYPE *) KE->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int out_channel = Long_val(vOut_channel);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int dpt_in_stride = Long_val(vDpt_in_stride);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_crdi = in_channel * input_dpts * input_rows * input_cols;
const int input_rdi = in_channel * input_dpts * input_rows;
const int input_di = in_channel * input_dpts;
const int output_dr = output_dpts * output_rows;
const int output_drc = output_dpts * output_rows * output_cols;
const int output_drcb = output_dpts * output_rows * output_cols * batches;
const int kernel_idrc = in_channel * kernel_dpts * kernel_rows * kernel_cols;
TYPE *inpt2d = (TYPE *) calloc(kernel_idrc * output_drcb, sizeof(TYPE));
if (inpt2d == NULL) exit(1);
memset(input_ptr, 0, batches * input_crdi * sizeof(TYPE));
INIT;
int kernel_cols_up = kernel_cols + (kernel_cols - 1) * (col_in_stride - 1);
int kernel_rows_up = kernel_rows + (kernel_rows - 1) * (row_in_stride - 1);
int kernel_dpts_up = kernel_dpts + (kernel_dpts - 1) * (dpt_in_stride - 1);
int pc = (col_stride * (output_cols - 1) + kernel_cols_up - input_cols) / 2;
int pr = (row_stride * (output_rows - 1) + kernel_rows_up - input_rows) / 2;
int pd = (dpt_stride * (output_dpts - 1) + kernel_dpts_up - input_dpts) / 2;
if (pc < 0) pc = 0;
if (pr < 0) pr = 0;
if (pd < 0) pd = 0;
GEMM(CblasRowMajor, CblasNoTrans, CblasTrans,
output_drcb, kernel_idrc, out_channel, ALPHA,
output_ptr, out_channel, kernel_ptr, out_channel,
BETA, inpt2d, kernel_idrc);
for (int i = 0; i < output_drcb; ++i) {
int bt = i / output_drc;
int jkd = i % output_drc;
int j = jkd / output_dr;
int kd = jkd % output_dr;
int k = kd / output_dpts;
int d = kd % output_dpts;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols_up;
const int rend = rstart + kernel_rows_up;
const int dend = dstart + kernel_dpts_up;
const int input_idx_base = bt * input_crdi;
int cnt = 0;
for (int a = cstart; a < cend; a += col_in_stride) {
for (int b = rstart; b < rend; b += row_in_stride) {
for (int c = dstart; c < dend; c += dpt_in_stride) {
for (int h = 0; h < in_channel; ++h) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + h;
input_ptr[input_idx] += inpt2d[i * kernel_idrc + cnt];
}
++cnt;
}
}
}
}
}
free(inpt2d);
return Val_unit;
}
CAMLprim value FUN_BYTE (dilated_cuboid_backward_input_im2col) (value * argv, int argn) {
return FUN_NATIVE (dilated_cuboid_backward_input_im2col) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17], argv[18], argv[19], argv[20]
);
}
#endif /* OWL_ENABLE_TEMPLATE */
|
resize.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% RRRR EEEEE SSSSS IIIII ZZZZZ EEEEE %
% R R E SS I ZZ E %
% RRRR EEE SSS I ZZZ EEE %
% R R E SS I ZZ E %
% R R EEEEE SSSSS IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Image Resize Methods %
% %
% Software Design %
% John Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/blob.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/draw.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/memory_.h"
#include "magick/magick.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel.h"
#include "magick/option.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/resize.h"
#include "magick/resize-private.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/utility.h"
#include "magick/version.h"
#if defined(MAGICKCORE_LQR_DELEGATE)
#include <lqr.h>
#endif
/*
Typedef declarations.
*/
struct _ResizeFilter
{
MagickRealType
(*filter)(const MagickRealType,const ResizeFilter *),
(*window)(const MagickRealType,const ResizeFilter *),
support, /* filter region of support - the filter support limit */
window_support, /* window support, usally equal to support (expert only) */
scale, /* dimension scaling to fit window support (usally 1.0) */
blur, /* x-scale (blur-sharpen) */
coefficient[7]; /* cubic coefficents for BC-cubic spline filters */
size_t
signature;
};
/*
Forward declaractions.
*/
static MagickRealType
I0(MagickRealType x),
BesselOrderOne(MagickRealType),
Sinc(const MagickRealType, const ResizeFilter *),
SincFast(const MagickRealType, const ResizeFilter *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F i l t e r F u n c t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% These are the various filter and windowing functions that are provided.
%
% They are internal to this module only. See AcquireResizeFilterInfo() for
% details of the access to these functions, via the GetResizeFilterSupport()
% and GetResizeFilterWeight() API interface.
%
% The individual filter functions have this format...
%
% static MagickRealtype *FilterName(const MagickRealType x,
% const MagickRealType support)
%
% A description of each parameter follows:
%
% o x: the distance from the sampling point generally in the range of 0 to
% support. The GetResizeFilterWeight() ensures this a positive value.
%
% o resize_filter: current filter information. This allows function to
% access support, and possibly other pre-calculated information defining
% the functions.
%
*/
#define MagickPIL ((MagickRealType) 3.14159265358979323846264338327950288420L)
static MagickRealType Jinc(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
See Pratt "Digital Image Processing" p.97 for Jinc/Bessel functions.
http://mathworld.wolfram.com/JincFunction.html and page 11 of
http://www.ph.ed.ac.uk/%7ewjh/teaching/mo/slides/lens/lens.pdf
The original "zoom" program by Paul Heckbert called this "Bessel".
But really it is more accurately named "Jinc".
*/
if (x == 0.0)
return(0.5*MagickPIL);
return(BesselOrderOne(MagickPIL*x)/x);
}
static MagickRealType Blackman(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Blackman: 2nd order cosine windowing function:
0.42 + 0.5 cos(pi x) + 0.08 cos(2pi x)
Refactored by Chantal Racette and Nicolas Robidoux to one trig
call and five flops.
*/
const MagickRealType cospix = cos((double) (MagickPIL*x));
return(0.34+cospix*(0.5+cospix*0.16));
}
static MagickRealType Bohman(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Bohman: 2rd Order cosine windowing function:
(1-x) cos(pi x) + sin(pi x) / pi.
Refactored by Nicolas Robidoux to one trig call, one sqrt call,
and 7 flops, taking advantage of the fact that the support of
Bohman is 1 (so that we know that sin(pi x) >= 0).
*/
const double cospix = cos((double) (MagickPIL*x));
const double sinpix = sqrt(1.0-cospix*cospix);
return((1.0-x)*cospix+(1.0/MagickPIL)*sinpix);
}
static MagickRealType Box(const MagickRealType magick_unused(x),
const ResizeFilter *magick_unused(resize_filter))
{
/*
A Box filter is a equal weighting function (all weights equal).
DO NOT LIMIT results by support or resize point sampling will work
as it requests points beyond its normal 0.0 support size.
*/
return(1.0);
}
static MagickRealType CubicBC(const MagickRealType x,
const ResizeFilter *resize_filter)
{
/*
Cubic Filters using B,C determined values:
Mitchell-Netravali B= 1/3 C= 1/3 "Balanced" cubic spline filter
Catmull-Rom B= 0 C= 1/2 Interpolatory and exact on linears
Cubic B-Spline B= 1 C= 0 Spline approximation of Gaussian
Hermite B= 0 C= 0 Spline with small support (= 1)
See paper by Mitchell and Netravali, Reconstruction Filters in Computer
Graphics Computer Graphics, Volume 22, Number 4, August 1988
http://www.cs.utexas.edu/users/fussell/courses/cs384g/lectures/mitchell/
Mitchell.pdf.
Coefficents are determined from B,C values:
P0 = ( 6 - 2*B )/6 = coeff[0]
P1 = 0
P2 = (-18 +12*B + 6*C )/6 = coeff[1]
P3 = ( 12 - 9*B - 6*C )/6 = coeff[2]
Q0 = ( 8*B +24*C )/6 = coeff[3]
Q1 = ( -12*B -48*C )/6 = coeff[4]
Q2 = ( 6*B +30*C )/6 = coeff[5]
Q3 = ( - 1*B - 6*C )/6 = coeff[6]
which are used to define the filter:
P0 + P1*x + P2*x^2 + P3*x^3 0 <= x < 1
Q0 + Q1*x + Q2*x^2 + Q3*x^3 1 <= x < 2
which ensures function is continuous in value and derivative
(slope).
*/
if (x < 1.0)
return(resize_filter->coefficient[0]+x*(x*
(resize_filter->coefficient[1]+x*resize_filter->coefficient[2])));
if (x < 2.0)
return(resize_filter->coefficient[3]+x*(resize_filter->coefficient[4]+x*
(resize_filter->coefficient[5]+x*resize_filter->coefficient[6])));
return(0.0);
}
static MagickRealType Gaussian(const MagickRealType x,
const ResizeFilter *resize_filter)
{
/*
Gaussian with a fixed sigma = 1/2
Gaussian Formula (1D) ...
exp( -(x^2)/((2.0*sigma^2) ) / sqrt(2*PI)sigma^2))
The constants are pre-calculated...
exp( -coeff[0]*(x^2)) ) * coeff[1]
However the multiplier coefficent (1) is not needed and not used.
Gaussian Formula (2D) ...
exp( -(x^2)/((2.0*sigma^2) ) / (PI*sigma^2) )
Note that it is only a change in the normalization multiplier
which is not needed or used when gausian is used as a filter.
This separates the gaussian 'sigma' value from the 'blur/support'
settings allowing for its use in special 'small sigma' gaussians,
without the filter 'missing' pixels because the support becomes too
small.
*/
return(exp((double)(-resize_filter->coefficient[0]*x*x)));
}
static MagickRealType Hanning(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Cosine window function:
.5+.5cos(pi x).
*/
const MagickRealType cospix = cos((double) (MagickPIL*x));
return(0.5+0.5*cospix);
}
static MagickRealType Hamming(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Offset cosine window function:
.54 + .46 cos(pi x).
*/
const MagickRealType cospix = cos((double) (MagickPIL*x));
return(0.54+0.46*cospix);
}
static MagickRealType Kaiser(const MagickRealType x,
const ResizeFilter *resize_filter)
{
/*
Kaiser Windowing Function (bessel windowing)
Alpha (c[0]) is a free value from 5 to 8 (defaults to 6.5).
A scaling factor (c[1]) is not needed as filter is normalized
*/
return(resize_filter->coefficient[1]*
I0(resize_filter->coefficient[0]*sqrt((double) (1.0-x*x))));
}
static MagickRealType Lagrange(const MagickRealType x,
const ResizeFilter *resize_filter)
{
MagickRealType
value;
register ssize_t
i;
ssize_t
n,
order;
/*
Lagrange piecewise polynomial fit of sinc: N is the 'order' of the
lagrange function and depends on the overall support window size
of the filter. That is: for a support of 2, it gives a lagrange-4
(piecewise cubic function).
"n" identifies the piece of the piecewise polynomial.
See Survey: Interpolation Methods, IEEE Transactions on Medical
Imaging, Vol 18, No 11, November 1999, p1049-1075, -- Equation 27
on p1064.
*/
if (x > resize_filter->support)
return(0.0);
order=(ssize_t) (2.0*resize_filter->window_support); /* number of pieces */
/*n=(ssize_t)((1.0*order)/2.0+x); -- which piece does x belong to */
n = (ssize_t)(resize_filter->window_support + x);
value=1.0f;
for (i=0; i < order; i++)
if (i != n)
value*=(n-i-x)/(n-i);
return(value);
}
static MagickRealType Quadratic(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
2rd order (quadratic) B-Spline approximation of Gaussian.
*/
if (x < 0.5)
return(0.75-x*x);
if (x < 1.5)
return(0.5*(x-1.5)*(x-1.5));
return(0.0);
}
static MagickRealType Sinc(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Scaled sinc(x) function using a trig call:
sinc(x) == sin(pi x)/(pi x).
*/
if (x != 0.0)
{
const MagickRealType pix = (MagickRealType) (MagickPIL*x);
return(sin((double) pix)/pix);
}
return((MagickRealType) 1.0);
}
static MagickRealType SincFast(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Approximations of the sinc function sin(pi x)/(pi x) over the
interval [-4,4] constructed by Nicolas Robidoux and Chantal
Racette with funding from the Natural Sciences and Engineering
Research Council of Canada.
Although the approximations are polynomials (for low order of
approximation) and quotients of polynomials (for higher order of
approximation) and consequently are similar in form to Taylor
polynomials/Pade approximants, the approximations are computed
with a completely different technique.
Summary: These approximations are "the best" in terms of bang
(accuracy) for the buck (flops). More specifically: Among the
polynomial quotients that can be computed using a fixed number of
flops (with a given "+ - * / budget"), the chosen polynomial
quotient is the one closest to the approximated function with
respect to maximum absolute relative error over the given
interval.
The Remez algorithm, as implemented in the boost library's minimax
package, is the key to the construction:
http://www.boost.org/doc/libs/1_36_0/libs/math/doc/...
...sf_and_dist/html/math_toolkit/backgrounders/remez.html
*/
/*
If outside of the interval of approximation, use the standard trig
formula.
*/
if (x > 4.0)
{
const MagickRealType pix = (MagickRealType) (MagickPIL*x);
return(sin((double) pix)/pix);
}
{
/*
The approximations only depend on x^2 (sinc is an even
function).
*/
const MagickRealType xx = x*x;
#if MAGICKCORE_QUANTUM_DEPTH <= 8
/*
Maximum absolute relative error 6.3e-6 < 1/2^17.
*/
const MagickRealType c0 = 0.173610016489197553621906385078711564924e-2L;
const MagickRealType c1 = -0.384186115075660162081071290162149315834e-3L;
const MagickRealType c2 = 0.393684603287860108352720146121813443561e-4L;
const MagickRealType c3 = -0.248947210682259168029030370205389323899e-5L;
const MagickRealType c4 = 0.107791837839662283066379987646635416692e-6L;
const MagickRealType c5 = -0.324874073895735800961260474028013982211e-8L;
const MagickRealType c6 = 0.628155216606695311524920882748052490116e-10L;
const MagickRealType c7 = -0.586110644039348333520104379959307242711e-12L;
const MagickRealType p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7))))));
return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p);
#elif MAGICKCORE_QUANTUM_DEPTH <= 16
/*
Max. abs. rel. error 2.2e-8 < 1/2^25.
*/
const MagickRealType c0 = 0.173611107357320220183368594093166520811e-2L;
const MagickRealType c1 = -0.384240921114946632192116762889211361285e-3L;
const MagickRealType c2 = 0.394201182359318128221229891724947048771e-4L;
const MagickRealType c3 = -0.250963301609117217660068889165550534856e-5L;
const MagickRealType c4 = 0.111902032818095784414237782071368805120e-6L;
const MagickRealType c5 = -0.372895101408779549368465614321137048875e-8L;
const MagickRealType c6 = 0.957694196677572570319816780188718518330e-10L;
const MagickRealType c7 = -0.187208577776590710853865174371617338991e-11L;
const MagickRealType c8 = 0.253524321426864752676094495396308636823e-13L;
const MagickRealType c9 = -0.177084805010701112639035485248501049364e-15L;
const MagickRealType p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*(c7+xx*(c8+xx*c9))))))));
return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p);
#else
/*
Max. abs. rel. error 1.2e-12 < 1/2^39.
*/
const MagickRealType c0 = 0.173611111110910715186413700076827593074e-2L;
const MagickRealType c1 = -0.289105544717893415815859968653611245425e-3L;
const MagickRealType c2 = 0.206952161241815727624413291940849294025e-4L;
const MagickRealType c3 = -0.834446180169727178193268528095341741698e-6L;
const MagickRealType c4 = 0.207010104171026718629622453275917944941e-7L;
const MagickRealType c5 = -0.319724784938507108101517564300855542655e-9L;
const MagickRealType c6 = 0.288101675249103266147006509214934493930e-11L;
const MagickRealType c7 = -0.118218971804934245819960233886876537953e-13L;
const MagickRealType p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7))))));
const MagickRealType d0 = 1.0L;
const MagickRealType d1 = 0.547981619622284827495856984100563583948e-1L;
const MagickRealType d2 = 0.134226268835357312626304688047086921806e-2L;
const MagickRealType d3 = 0.178994697503371051002463656833597608689e-4L;
const MagickRealType d4 = 0.114633394140438168641246022557689759090e-6L;
const MagickRealType q = d0+xx*(d1+xx*(d2+xx*(d3+xx*d4)));
return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)/q*p);
#endif
}
}
static MagickRealType Triangle(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
1st order (linear) B-Spline, bilinear interpolation, Tent 1D
filter, or a Bartlett 2D Cone filter. Also used as a
Bartlett Windowing function for Sinc().
*/
if (x < 1.0)
return(1.0-x);
return(0.0);
}
static MagickRealType Welsh(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Welsh parabolic windowing filter.
*/
if (x < 1.0)
return(1.0-x*x);
return(0.0);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e R e s i z e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireResizeFilter() allocates the ResizeFilter structure. Choose from
% these filters:
%
% FIR (Finite impulse Response) Filters
% Box Triangle Quadratic
% Cubic Hermite Catrom
% Mitchell
%
% IIR (Infinite impulse Response) Filters
% Gaussian Sinc Jinc (Bessel)
%
% Windowed Sinc/Jinc Filters
% Blackman Hanning Hamming
% Kaiser Lanczos
%
% Special purpose Filters
% SincFast LanczosSharp Lanczos2D Lanczos2DSharp Robidoux
%
% The users "-filter" selection is used to lookup the default 'expert'
% settings for that filter from a internal table. However any provided
% 'expert' settings (see below) may override this selection.
%
% FIR filters are used as is, and are limited to that filters support window
% (unless over-ridden). 'Gaussian' while classed as an IIR filter, is also
% simply clipped by its support size (currently 1.5 or approximatally 3*sigma
% as recommended by many references)
%
% The special a 'cylindrical' filter flag will promote the default 4-lobed
% Windowed Sinc filter to a 3-lobed Windowed Jinc equivalent, which is better
% suited to this style of image resampling. This typically happens when using
% such a filter for images distortions.
%
% Directly requesting 'Sinc', 'Jinc' function as a filter will force the use
% of function without any windowing, or promotion for cylindrical usage. This
% is not recommended, except by image processing experts, especially as part
% of expert option filter function selection.
%
% Two forms of the 'Sinc' function are available: Sinc and SincFast. Sinc is
% computed using the traditional sin(pi*x)/(pi*x); it is selected if the user
% specifically specifies the use of a Sinc filter. SincFast uses highly
% accurate (and fast) polynomial (low Q) and rational (high Q) approximations,
% and will be used by default in most cases.
%
% The Lanczos filter is a special 3-lobed Sinc-windowed Sinc filter (promoted
% to Jinc-windowed Jinc for cylindrical (Elliptical Weighted Average) use).
% The Sinc version is the most popular windowed filter.
%
% LanczosSharp is a slightly sharpened (blur=0.9812505644269356 < 1) form of
% the Lanczos filter, specifically designed for EWA distortion (as a
% Jinc-Jinc); it can also be used as a slightly sharper orthogonal Lanczos
% (Sinc-Sinc) filter. The chosen blur value comes as close as possible to
% satisfying the following condition without changing the character of the
% corresponding EWA filter:
%
% 'No-Op' Vertical and Horizontal Line Preservation Condition: Images with
% only vertical or horizontal features are preserved when performing 'no-op"
% with EWA distortion.
%
% The Lanczos2 and Lanczos2Sharp filters are 2-lobe versions of the Lanczos
% filters. The 'sharp' version uses a blur factor of 0.9549963639785485,
% again chosen because the resulting EWA filter comes as close as possible to
% satisfying the above condition.
%
% Robidoux is another filter tuned for EWA. It is the Keys cubic filter
% defined by B=(228 - 108 sqrt(2))/199. Robidoux satisfies the "'No-Op'
% Vertical and Horizontal Line Preservation Condition" exactly, and it
% moderately blurs high frequency 'pixel-hash' patterns under no-op. It turns
% out to be close to both Mitchell and Lanczos2Sharp. For example, its first
% crossing is at (36 sqrt(2) + 123)/(72 sqrt(2) + 47), almost the same as the
% first crossing of both Mitchell and Lanczos2Sharp.
%
% 'EXPERT' OPTIONS:
%
% These artifact "defines" are not recommended for production use without
% expert knowledge of resampling, filtering, and the effects they have on the
% resulting resampled (resize ro distorted) image.
%
% They can be used to override any and all filter default, and it is
% recommended you make good use of "filter:verbose" to make sure that the
% overall effect of your selection (before and after) is as expected.
%
% "filter:verbose" controls whether to output the exact results of the
% filter selections made, as well as plotting data for graphing the
% resulting filter over the filters support range.
%
% "filter:filter" select the main function associated with this filter
% name, as the weighting function of the filter. This can be used to
% set a windowing function as a weighting function, for special
% purposes, such as graphing.
%
% If a "filter:window" operation has not been provided, a 'Box'
% windowing function will be set to denote that no windowing function is
% being used.
%
% "filter:window" Select this windowing function for the filter. While any
% filter could be used as a windowing function, using the 'first lobe' of
% that filter over the whole support window, using a non-windowing
% function is not advisible. If no weighting filter function is specifed
% a 'SincFast' filter is used.
%
% "filter:lobes" Number of lobes to use for the Sinc/Jinc filter. This a
% simpler method of setting filter support size that will correctly
% handle the Sinc/Jinc switch for an operators filtering requirements.
% Only integers should be given.
%
% "filter:support" Set the support size for filtering to the size given.
% This not recommended for Sinc/Jinc windowed filters (lobes should be
% used instead). This will override any 'filter:lobes' option.
%
% "filter:win-support" Scale windowing function to this size instead. This
% causes the windowing (or self-windowing Lagrange filter) to act is if
% the support window it much much larger than what is actually supplied
% to the calling operator. The filter however is still clipped to the
% real support size given, by the support range suppiled to the caller.
% If unset this will equal the normal filter support size.
%
% "filter:blur" Scale the filter and support window by this amount. A value
% > 1 will generally result in a more burred image with more ringing
% effects, while a value <1 will sharpen the resulting image with more
% aliasing effects.
%
% "filter:sigma" The sigma value to use for the Gaussian filter only.
% Defaults to '1/2'. Using a different sigma effectively provides a
% method of using the filter as a 'blur' convolution. Particularly when
% using it for Distort.
%
% "filter:b"
% "filter:c" Override the preset B,C values for a Cubic type of filter.
% If only one of these are given it is assumes to be a 'Keys' type of
% filter such that B+2C=1, where Keys 'alpha' value = C.
%
% Examples:
%
% Set a true un-windowed Sinc filter with 10 lobes (very slow):
% -define filter:filter=Sinc
% -define filter:lobes=8
%
% Set an 8 lobe Lanczos (Sinc or Jinc) filter:
% -filter Lanczos
% -define filter:lobes=8
%
% The format of the AcquireResizeFilter method is:
%
% ResizeFilter *AcquireResizeFilter(const Image *image,
% const FilterTypes filter_type, const MagickBooleanType radial,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filter: the filter type, defining a preset filter, window and support.
% The artifact settings listed above will override those selections.
%
% o blur: blur the filter by this amount, use 1.0 if unknown. Image
% artifact "filter:blur" will override this API call usage, including any
% internal change (such as for cylindrical usage).
%
% o radial: use a 1D orthogonal filter (Sinc) or 2D cylindrical (radial)
% filter (Jinc).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ResizeFilter *AcquireResizeFilter(const Image *image,
const FilterTypes filter,const MagickRealType blur,
const MagickBooleanType cylindrical,ExceptionInfo *exception)
{
const char
*artifact;
FilterTypes
filter_type,
window_type;
MagickRealType
B,
C,
value;
register ResizeFilter
*resize_filter;
/*
Table Mapping given Filter, into Weighting and Windowing functions. A
'Box' windowing function means its a simble non-windowed filter. An
'SincFast' filter function could be upgraded to a 'Jinc' filter if a
"cylindrical", unless a 'Sinc' or 'SincFast' filter was specifically
requested.
WARNING: The order of this tabel must match the order of the FilterTypes
enumeration specified in "resample.h", or the filter names will not match
the filter being setup.
You can check filter setups with the "filter:verbose" setting.
*/
static struct
{
FilterTypes
filter,
window;
} const mapping[SentinelFilter] =
{
{ UndefinedFilter, BoxFilter }, /* Undefined (default to Box) */
{ PointFilter, BoxFilter }, /* SPECIAL: Nearest neighbour */
{ BoxFilter, BoxFilter }, /* Box averaging filter */
{ TriangleFilter, BoxFilter }, /* Linear interpolation filter */
{ HermiteFilter, BoxFilter }, /* Hermite interpolation filter */
{ SincFastFilter, HanningFilter }, /* Hanning -- cosine-sinc */
{ SincFastFilter, HammingFilter }, /* Hamming -- '' variation */
{ SincFastFilter, BlackmanFilter }, /* Blackman -- 2*cosine-sinc */
{ GaussianFilter, BoxFilter }, /* Gaussian blur filter */
{ QuadraticFilter, BoxFilter }, /* Quadratic Gaussian approx */
{ CubicFilter, BoxFilter }, /* Cubic B-Spline */
{ CatromFilter, BoxFilter }, /* Cubic-Keys interpolator */
{ MitchellFilter, BoxFilter }, /* 'Ideal' Cubic-Keys filter */
{ JincFilter, BoxFilter }, /* Raw 3-lobed Jinc function */
{ SincFilter, BoxFilter }, /* Raw 4-lobed Sinc function */
{ SincFastFilter, BoxFilter }, /* Raw fast sinc ("Pade"-type) */
{ SincFastFilter, KaiserFilter }, /* Kaiser -- square root-sinc */
{ SincFastFilter, WelshFilter }, /* Welsh -- parabolic-sinc */
{ SincFastFilter, CubicFilter }, /* Parzen -- cubic-sinc */
{ SincFastFilter, BohmanFilter }, /* Bohman -- 2*cosine-sinc */
{ SincFastFilter, TriangleFilter }, /* Bartlett -- triangle-sinc */
{ LagrangeFilter, BoxFilter }, /* Lagrange self-windowing */
{ LanczosFilter, LanczosFilter }, /* Lanczos Sinc-Sinc filters */
{ LanczosSharpFilter, LanczosSharpFilter }, /* | these require */
{ Lanczos2Filter, Lanczos2Filter }, /* | special handling */
{ Lanczos2SharpFilter,Lanczos2SharpFilter },
{ RobidouxFilter, BoxFilter }, /* Cubic Keys tuned for EWA */
};
/*
Table mapping the filter/window from the above table to an actual function.
The default support size for that filter as a weighting function, the range
to scale with to use that function as a sinc windowing function, (typ 1.0).
Note that the filter_type -> function is 1 to 1 except for Sinc(),
SincFast(), and CubicBC() functions, which may have multiple filter to
function associations.
See "filter:verbose" handling below for the function -> filter mapping.
*/
static struct
{
MagickRealType
(*function)(const MagickRealType, const ResizeFilter*),
lobes, /* Default lobes/support size of the weighting filter. */
scale, /* Support when function used as a windowing function
Typically equal to the location of the first zero crossing. */
B,C; /* BC-spline coefficients, ignored if not a CubicBC filter. */
} const filters[SentinelFilter] =
{
{ Box, 0.5, 0.5, 0.0, 0.0 }, /* Undefined (default to Box) */
{ Box, 0.0, 0.5, 0.0, 0.0 }, /* Point (special handling) */
{ Box, 0.5, 0.5, 0.0, 0.0 }, /* Box */
{ Triangle, 1.0, 1.0, 0.0, 0.0 }, /* Triangle */
{ CubicBC, 1.0, 1.0, 0.0, 0.0 }, /* Hermite (cubic B=C=0) */
{ Hanning, 1.0, 1.0, 0.0, 0.0 }, /* Hanning, cosine window */
{ Hamming, 1.0, 1.0, 0.0, 0.0 }, /* Hamming, '' variation */
{ Blackman, 1.0, 1.0, 0.0, 0.0 }, /* Blackman, 2*cosine window */
{ Gaussian, 2.0, 1.5, 0.0, 0.0 }, /* Gaussian */
{ Quadratic, 1.5, 1.5, 0.0, 0.0 }, /* Quadratic gaussian */
{ CubicBC, 2.0, 2.0, 1.0, 0.0 }, /* Cubic B-Spline (B=1,C=0) */
{ CubicBC, 2.0, 1.0, 0.0, 0.5 }, /* Catmull-Rom (B=0,C=1/2) */
{ CubicBC, 2.0, 8.0/7.0, 1./3., 1./3. }, /* Mitchell (B=C=1/3) */
{ Jinc, 3.0, 1.2196698912665045, 0.0, 0.0 }, /* Raw 3-lobed Jinc */
{ Sinc, 4.0, 1.0, 0.0, 0.0 }, /* Raw 4-lobed Sinc */
{ SincFast, 4.0, 1.0, 0.0, 0.0 }, /* Raw fast sinc ("Pade"-type) */
{ Kaiser, 1.0, 1.0, 0.0, 0.0 }, /* Kaiser (square root window) */
{ Welsh, 1.0, 1.0, 0.0, 0.0 }, /* Welsh (parabolic window) */
{ CubicBC, 2.0, 2.0, 1.0, 0.0 }, /* Parzen (B-Spline window) */
{ Bohman, 1.0, 1.0, 0.0, 0.0 }, /* Bohman, 2*Cosine window */
{ Triangle, 1.0, 1.0, 0.0, 0.0 }, /* Bartlett (triangle window) */
{ Lagrange, 2.0, 1.0, 0.0, 0.0 }, /* Lagrange sinc approximation */
{ SincFast, 3.0, 1.0, 0.0, 0.0 }, /* Lanczos, 3-lobed Sinc-Sinc */
{ SincFast, 3.0, 1.0, 0.0, 0.0 }, /* lanczos, Sharpened */
{ SincFast, 2.0, 1.0, 0.0, 0.0 }, /* Lanczos, 2-lobed */
{ SincFast, 2.0, 1.0, 0.0, 0.0 }, /* Lanczos2, sharpened */
{ CubicBC, 2.0, 1.1685777620836932,
0.37821575509399867, 0.31089212245300067 }
/* Robidoux: Keys cubic close to Lanczos2D sharpened */
};
/*
The known zero crossings of the Jinc() or more accurately the Jinc(x*PI)
function being used as a filter. It is used by the "filter:lobes" expert
setting and for 'lobes' for Jinc functions in the previous table. This way
users do not have to deal with the highly irrational lobe sizes of the Jinc
filter.
Values taken from
http://cose.math.bas.bg/webMathematica/webComputing/BesselZeros.jsp using
Jv-function with v=1, then dividing by PI.
*/
static MagickRealType
jinc_zeros[16] =
{
1.2196698912665045,
2.2331305943815286,
3.2383154841662362,
4.2410628637960699,
5.2427643768701817,
6.2439216898644877,
7.244759868719957,
8.2453949139520427,
9.2458926849494673,
10.246293348754916,
11.246622794877883,
12.246898461138105,
13.247132522181061,
14.247333735806849,
15.2475085630373,
16.247661874700962
};
/*
Allocate resize filter.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(UndefinedFilter < filter && filter < SentinelFilter);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
resize_filter=(ResizeFilter *) AcquireMagickMemory(sizeof(*resize_filter));
if (resize_filter == (ResizeFilter *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
/*
Defaults for the requested filter.
*/
filter_type=mapping[filter].filter;
window_type=mapping[filter].window;
resize_filter->blur = blur; /* function argument blur factor */
/* Promote 1D Windowed Sinc Filters to a 2D Windowed Jinc filters */
if (cylindrical != MagickFalse && filter_type == SincFastFilter
&& filter != SincFastFilter )
filter_type=JincFilter;
/* Expert filter setting override */
artifact=GetImageArtifact(image,"filter:filter");
if (artifact != (const char *) NULL)
{
ssize_t
option;
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
{ /* Raw filter request - no window function. */
filter_type=(FilterTypes) option;
window_type=BoxFilter;
}
/* Filter override with a specific window function. */
artifact=GetImageArtifact(image,"filter:window");
if (artifact != (const char *) NULL)
{
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
window_type=(FilterTypes) option;
}
}
else
{
/* Window specified, but no filter function? Assume Sinc/Jinc. */
artifact=GetImageArtifact(image,"filter:window");
if (artifact != (const char *) NULL)
{
ssize_t
option;
option=ParseCommandOption(MagickFilterOptions,MagickFalse,
artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
{
filter_type=cylindrical != MagickFalse ?
JincFilter : SincFastFilter;
window_type=(FilterTypes) option;
}
}
}
/* Assign the real functions to use for the filters selected. */
resize_filter->filter=filters[filter_type].function;
resize_filter->support=filters[filter_type].lobes;
resize_filter->window=filters[window_type].function;
resize_filter->scale=filters[window_type].scale;
resize_filter->signature=MagickSignature;
/* Filter Modifications for orthogonal/cylindrical usage */
if (cylindrical != MagickFalse)
switch (filter_type)
{
case BoxFilter:
/* Support for Cylindrical Box should be sqrt(2)/2 */
resize_filter->support=(MagickRealType) MagickSQ1_2;
break;
case LanczosFilter:
case LanczosSharpFilter:
case Lanczos2Filter:
case Lanczos2SharpFilter:
resize_filter->filter=filters[JincFilter].function;
resize_filter->window=filters[JincFilter].function;
resize_filter->scale=filters[JincFilter].scale;
/* number of lobes (support window size) remain unchanged */
break;
default:
break;
}
/* Global Sharpening (regardless of orthoginal/cylindrical) */
switch (filter_type)
{
case LanczosSharpFilter:
resize_filter->blur *= 0.9812505644269356;
break;
case Lanczos2SharpFilter:
resize_filter->blur *= 0.9549963639785485;
break;
default:
break;
}
/*
** Other Expert Option Modifications
*/
/* User Gaussian Sigma Override - no support change */
value = 0.5; /* guassian sigma default, half pixel */
if ( GaussianFilter ) {
artifact=GetImageArtifact(image,"filter:sigma");
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
/* Define coefficents for Gaussian */
resize_filter->coefficient[0]=1.0/(2.0*value*value); /* X scaling */
resize_filter->coefficient[1]=(MagickRealType) (1.0/(Magick2PI*value*
value)); /* normalization */
}
/* User Kaiser Alpha Override - no support change */
if ( KaiserFilter ) {
value=6.5; /* default alpha value for Kaiser bessel windowing function */
artifact=GetImageArtifact(image,"filter:alpha");
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
/* Define coefficents for Kaiser Windowing Function */
resize_filter->coefficient[0]=value; /* X scaling */
resize_filter->coefficient[1]=1.0/I0(value); /* normalization */
}
/* Blur Override */
artifact=GetImageArtifact(image,"filter:blur");
if (artifact != (const char *) NULL)
resize_filter->blur*=StringToDouble(artifact,(char **) NULL);
if (resize_filter->blur < MagickEpsilon)
resize_filter->blur=(MagickRealType) MagickEpsilon;
/* Support Overrides */
artifact=GetImageArtifact(image,"filter:lobes");
if (artifact != (const char *) NULL)
{
ssize_t
lobes;
lobes=(ssize_t) StringToLong(artifact);
if (lobes < 1)
lobes=1;
resize_filter->support=(MagickRealType) lobes;
}
/* Convert a Jinc function lobes value to a real support value */
if (resize_filter->filter == Jinc)
{
if (resize_filter->support > 16)
resize_filter->support=jinc_zeros[15]; /* largest entry in table */
else
resize_filter->support = jinc_zeros[((long)resize_filter->support)-1];
}
/* expert override of the support setting */
artifact=GetImageArtifact(image,"filter:support");
if (artifact != (const char *) NULL)
resize_filter->support=fabs(StringToDouble(artifact,(char **) NULL));
/*
Scale windowing function separately to the support 'clipping'
window that calling operator is planning to actually use. (Expert
override)
*/
resize_filter->window_support=resize_filter->support; /* default */
artifact=GetImageArtifact(image,"filter:win-support");
if (artifact != (const char *) NULL)
resize_filter->window_support=fabs(StringToDouble(artifact,(char **) NULL));
/*
Adjust window function scaling to match windowing support for
weighting function. This avoids a division on every filter call.
*/
resize_filter->scale /= resize_filter->window_support;
/*
* Set Cubic Spline B,C values, calculate Cubic coefficients.
*/
B=0.0;
C=0.0;
if ((filters[filter_type].function == CubicBC) ||
(filters[window_type].function == CubicBC))
{
B=filters[filter_type].B;
C=filters[filter_type].C;
if (filters[window_type].function == CubicBC)
{
B=filters[window_type].B;
C=filters[window_type].C;
}
artifact=GetImageArtifact(image,"filter:b");
if (artifact != (const char *) NULL)
{
B=StringToDouble(artifact,(char **) NULL);
C=(1.0-B)/2.0; /* Calculate C to get a Keys cubic filter. */
artifact=GetImageArtifact(image,"filter:c"); /* user C override */
if (artifact != (const char *) NULL)
C=StringToDouble(artifact,(char **) NULL);
}
else
{
artifact=GetImageArtifact(image,"filter:c");
if (artifact != (const char *) NULL)
{
C=StringToDouble(artifact,(char **) NULL);
B=1.0-2.0*C; /* Calculate B to get a Keys cubic filter. */
}
}
/* Convert B,C values into Cubic Coefficents. See CubicBC(). */
{
const double twoB = B+B;
resize_filter->coefficient[0]=1.0-(1.0/3.0)*B;
resize_filter->coefficient[1]=-3.0+twoB+C;
resize_filter->coefficient[2]=2.0-1.5*B-C;
resize_filter->coefficient[3]=(4.0/3.0)*B+4.0*C;
resize_filter->coefficient[4]=-8.0*C-twoB;
resize_filter->coefficient[5]=B+5.0*C;
resize_filter->coefficient[6]=(-1.0/6.0)*B-C;
}
}
/*
Expert Option Request for verbose details of the resulting filter.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp master
{
#endif
artifact=GetImageArtifact(image,"filter:verbose");
if (IsMagickTrue(artifact))
{
double
support,
x;
/*
Set the weighting function properly when the weighting
function may not exactly match the filter of the same name.
EG: a Point filter is really uses a Box weighting function
with a different support than is typically used.
*/
if (resize_filter->filter == Box) filter_type=BoxFilter;
if (resize_filter->filter == Sinc) filter_type=SincFilter;
if (resize_filter->filter == SincFast) filter_type=SincFastFilter;
if (resize_filter->filter == Jinc) filter_type=JincFilter;
if (resize_filter->filter == CubicBC) filter_type=CubicFilter;
if (resize_filter->window == Box) window_type=BoxFilter;
if (resize_filter->window == Sinc) window_type=SincFilter;
if (resize_filter->window == SincFast) window_type=SincFastFilter;
if (resize_filter->window == Jinc) window_type=JincFilter;
if (resize_filter->window == CubicBC) window_type=CubicFilter;
/*
Report Filter Details.
*/
support=GetResizeFilterSupport(resize_filter); /* practical_support */
(void) FormatLocaleFile(stdout,"# Resize Filter (for graphing)\n#\n");
(void) FormatLocaleFile(stdout,"# filter = %s\n",
CommandOptionToMnemonic(MagickFilterOptions,filter_type));
(void) FormatLocaleFile(stdout,"# window = %s\n",
CommandOptionToMnemonic(MagickFilterOptions, window_type));
(void) FormatLocaleFile(stdout,"# support = %.*g\n",
GetMagickPrecision(),(double) resize_filter->support);
(void) FormatLocaleFile(stdout,"# win-support = %.*g\n",
GetMagickPrecision(),(double) resize_filter->window_support);
(void) FormatLocaleFile(stdout,"# scale_blur = %.*g\n",
GetMagickPrecision(), (double)resize_filter->blur);
if ( filter_type == GaussianFilter )
(void) FormatLocaleFile(stdout,"# gaussian_sigma = %.*g\n",
GetMagickPrecision(), (double)value);
if ( filter_type == KaiserFilter )
(void) FormatLocaleFile(stdout,"# kaiser_alpha = %.*g\n",
GetMagickPrecision(), (double)value);
(void) FormatLocaleFile(stdout,"# practical_support = %.*g\n",
GetMagickPrecision(), (double)support);
if ( filter_type == CubicFilter || window_type == CubicFilter )
(void) FormatLocaleFile(stdout,"# B,C = %.*g,%.*g\n",
GetMagickPrecision(),(double)B, GetMagickPrecision(),(double)C);
(void) FormatLocaleFile(stdout,"\n");
/*
Output values of resulting filter graph -- for graphing
filter result.
*/
for (x=0.0; x <= support; x+=0.01f)
(void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",x,GetMagickPrecision(),
(double) GetResizeFilterWeight(resize_filter,x));
/* A final value so gnuplot can graph the 'stop' properly. */
(void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",support,
GetMagickPrecision(),0.0);
}
/* Output the above once only for each image - remove setting */
(void) DeleteImageArtifact((Image *) image,"filter:verbose");
#if defined(MAGICKCORE_OPENMP_SUPPORT)
}
#endif
return(resize_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveResizeImage() adaptively resize image with pixel resampling.
%
% The format of the AdaptiveResizeImage method is:
%
% Image *AdaptiveResizeImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveResizeImage(const Image *image,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
#define AdaptiveResizeImageTag "Resize/Image"
CacheView
*image_view,
*resize_view;
Image
*resize_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Adaptively resize image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((columns == 0) || (rows == 0))
return((Image *) NULL);
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
resize_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (resize_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(resize_image,DirectClass) == MagickFalse)
{
InheritException(exception,&resize_image->exception);
resize_image=DestroyImage(resize_image);
return((Image *) NULL);
}
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
resize_view=AcquireCacheView(resize_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status) omp_throttle(1)
#endif
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
MagickPixelPacket
pixel;
PointInfo
offset;
register IndexPacket
*restrict resize_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
continue;
resize_indexes=GetCacheViewAuthenticIndexQueue(resize_view);
offset.y=((MagickRealType) (y+0.5)*image->rows/resize_image->rows);
GetMagickPixelPacket(image,&pixel);
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
offset.x=((MagickRealType) (x+0.5)*image->columns/resize_image->columns);
(void) InterpolateMagickPixelPacket(image,image_view,
MeshInterpolatePixel,offset.x-0.5,offset.y-0.5,&pixel,exception);
SetPixelPacket(resize_image,&pixel,q,resize_indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
continue;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_AdaptiveResizeImage)
#endif
proceed=SetImageProgress(image,AdaptiveResizeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
resize_image=DestroyImage(resize_image);
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ B e s s e l O r d e r O n e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BesselOrderOne() computes the Bessel function of x of the first kind of
% order 0. This is used to create the Jinc() filter function below.
%
% Reduce x to |x| since j1(x)= -j1(-x), and for x in (0,8]
%
% j1(x) = x*j1(x);
%
% For x in (8,inf)
%
% j1(x) = sqrt(2/(pi*x))*(p1(x)*cos(x1)-q1(x)*sin(x1))
%
% where x1 = x-3*pi/4. Compute sin(x1) and cos(x1) as follow:
%
% cos(x1) = cos(x)cos(3pi/4)+sin(x)sin(3pi/4)
% = 1/sqrt(2) * (sin(x) - cos(x))
% sin(x1) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4)
% = -1/sqrt(2) * (sin(x) + cos(x))
%
% The format of the BesselOrderOne method is:
%
% MagickRealType BesselOrderOne(MagickRealType x)
%
% A description of each parameter follows:
%
% o x: MagickRealType value.
%
*/
#undef I0
static MagickRealType I0(MagickRealType x)
{
MagickRealType
sum,
t,
y;
register ssize_t
i;
/*
Zeroth order Bessel function of the first kind.
*/
sum=1.0;
y=x*x/4.0;
t=y;
for (i=2; t > MagickEpsilon; i++)
{
sum+=t;
t*=y/((MagickRealType) i*i);
}
return(sum);
}
#undef J1
static MagickRealType J1(MagickRealType x)
{
MagickRealType
p,
q;
register ssize_t
i;
static const double
Pone[] =
{
0.581199354001606143928050809e+21,
-0.6672106568924916298020941484e+20,
0.2316433580634002297931815435e+19,
-0.3588817569910106050743641413e+17,
0.2908795263834775409737601689e+15,
-0.1322983480332126453125473247e+13,
0.3413234182301700539091292655e+10,
-0.4695753530642995859767162166e+7,
0.270112271089232341485679099e+4
},
Qone[] =
{
0.11623987080032122878585294e+22,
0.1185770712190320999837113348e+20,
0.6092061398917521746105196863e+17,
0.2081661221307607351240184229e+15,
0.5243710262167649715406728642e+12,
0.1013863514358673989967045588e+10,
0.1501793594998585505921097578e+7,
0.1606931573481487801970916749e+4,
0.1e+1
};
p=Pone[8];
q=Qone[8];
for (i=7; i >= 0; i--)
{
p=p*x*x+Pone[i];
q=q*x*x+Qone[i];
}
return(p/q);
}
#undef P1
static MagickRealType P1(MagickRealType x)
{
MagickRealType
p,
q;
register ssize_t
i;
static const double
Pone[] =
{
0.352246649133679798341724373e+5,
0.62758845247161281269005675e+5,
0.313539631109159574238669888e+5,
0.49854832060594338434500455e+4,
0.2111529182853962382105718e+3,
0.12571716929145341558495e+1
},
Qone[] =
{
0.352246649133679798068390431e+5,
0.626943469593560511888833731e+5,
0.312404063819041039923015703e+5,
0.4930396490181088979386097e+4,
0.2030775189134759322293574e+3,
0.1e+1
};
p=Pone[5];
q=Qone[5];
for (i=4; i >= 0; i--)
{
p=p*(8.0/x)*(8.0/x)+Pone[i];
q=q*(8.0/x)*(8.0/x)+Qone[i];
}
return(p/q);
}
#undef Q1
static MagickRealType Q1(MagickRealType x)
{
MagickRealType
p,
q;
register ssize_t
i;
static const double
Pone[] =
{
0.3511751914303552822533318e+3,
0.7210391804904475039280863e+3,
0.4259873011654442389886993e+3,
0.831898957673850827325226e+2,
0.45681716295512267064405e+1,
0.3532840052740123642735e-1
},
Qone[] =
{
0.74917374171809127714519505e+4,
0.154141773392650970499848051e+5,
0.91522317015169922705904727e+4,
0.18111867005523513506724158e+4,
0.1038187585462133728776636e+3,
0.1e+1
};
p=Pone[5];
q=Qone[5];
for (i=4; i >= 0; i--)
{
p=p*(8.0/x)*(8.0/x)+Pone[i];
q=q*(8.0/x)*(8.0/x)+Qone[i];
}
return(p/q);
}
static MagickRealType BesselOrderOne(MagickRealType x)
{
MagickRealType
p,
q;
if (x == 0.0)
return(0.0);
p=x;
if (x < 0.0)
x=(-x);
if (x < 8.0)
return(p*J1(x));
q=sqrt((double) (2.0/(MagickPI*x)))*(P1(x)*(1.0/sqrt(2.0)*(sin((double) x)-
cos((double) x)))-8.0/x*Q1(x)*(-1.0/sqrt(2.0)*(sin((double) x)+
cos((double) x))));
if (p < 0.0)
q=(-q);
return(q);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y R e s i z e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyResizeFilter() destroy the resize filter.
%
% The format of the DestroyResizeFilter method is:
%
% ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter)
%
% A description of each parameter follows:
%
% o resize_filter: the resize filter.
%
*/
MagickExport ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickSignature);
resize_filter->signature=(~MagickSignature);
resize_filter=(ResizeFilter *) RelinquishMagickMemory(resize_filter);
return(resize_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t R e s i z e F i l t e r S u p p o r t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetResizeFilterSupport() return the current support window size for this
% filter. Note that this may have been enlarged by filter:blur factor.
%
% The format of the GetResizeFilterSupport method is:
%
% MagickRealType GetResizeFilterSupport(const ResizeFilter *resize_filter)
%
% A description of each parameter follows:
%
% o filter: Image filter to use.
%
*/
MagickExport MagickRealType GetResizeFilterSupport(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickSignature);
return(resize_filter->support*resize_filter->blur);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t R e s i z e F i l t e r W e i g h t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetResizeFilterWeight evaluates the specified resize filter at the point x
% which usally lies between zero and the filters current 'support' and
% returns the weight of the filter function at that point.
%
% The format of the GetResizeFilterWeight method is:
%
% MagickRealType GetResizeFilterWeight(const ResizeFilter *resize_filter,
% const MagickRealType x)
%
% A description of each parameter follows:
%
% o filter: the filter type.
%
% o x: the point.
%
*/
MagickExport MagickRealType GetResizeFilterWeight(
const ResizeFilter *resize_filter,const MagickRealType x)
{
MagickRealType
scale,
weight,
x_blur;
/*
Windowing function - scale the weighting filter by this amount.
*/
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickSignature);
x_blur=fabs((double) x)/resize_filter->blur; /* X offset with blur scaling */
if ((resize_filter->window_support < MagickEpsilon) ||
(resize_filter->window == Box))
scale=1.0; /* Point or Box Filter -- avoid division by zero */
else
{
scale=resize_filter->scale;
scale=resize_filter->window(x_blur*scale,resize_filter);
}
weight=scale*resize_filter->filter(x_blur,resize_filter);
return(weight);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g n i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagnifyImage() is a convenience method that scales an image proportionally
% to twice its size.
%
% The format of the MagnifyImage method is:
%
% Image *MagnifyImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MagnifyImage(const Image *image,ExceptionInfo *exception)
{
Image
*magnify_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
magnify_image=ResizeImage(image,2*image->columns,2*image->rows,CubicFilter,
1.0,exception);
return(magnify_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M i n i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MinifyImage() is a convenience method that scales an image proportionally
% to half its size.
%
% The format of the MinifyImage method is:
%
% Image *MinifyImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MinifyImage(const Image *image,ExceptionInfo *exception)
{
Image
*minify_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
minify_image=ResizeImage(image,image->columns/2,image->rows/2,CubicFilter,1.0,
exception);
return(minify_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s a m p l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResampleImage() resize image in terms of its pixel size, so that when
% displayed at the given resolution it will be the same size in terms of
% real world units as the original image at the original resolution.
%
% The format of the ResampleImage method is:
%
% Image *ResampleImage(Image *image,const double x_resolution,
% const double y_resolution,const FilterTypes filter,const double blur,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be resized to fit the given resolution.
%
% o x_resolution: the new image x resolution.
%
% o y_resolution: the new image y resolution.
%
% o filter: Image filter to use.
%
% o blur: the blur factor where > 1 is blurry, < 1 is sharp.
%
*/
MagickExport Image *ResampleImage(const Image *image,const double x_resolution,
const double y_resolution,const FilterTypes filter,const double blur,
ExceptionInfo *exception)
{
#define ResampleImageTag "Resample/Image"
Image
*resample_image;
size_t
height,
width;
/*
Initialize sampled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
width=(size_t) (x_resolution*image->columns/(image->x_resolution == 0.0 ?
72.0 : image->x_resolution)+0.5);
height=(size_t) (y_resolution*image->rows/(image->y_resolution == 0.0 ?
72.0 : image->y_resolution)+0.5);
resample_image=ResizeImage(image,width,height,filter,blur,exception);
if (resample_image != (Image *) NULL)
{
resample_image->x_resolution=x_resolution;
resample_image->y_resolution=y_resolution;
}
return(resample_image);
}
#if defined(MAGICKCORE_LQR_DELEGATE)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i q u i d R e s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LiquidRescaleImage() rescales image with seam carving.
%
% The format of the LiquidRescaleImage method is:
%
% Image *LiquidRescaleImage(const Image *image,
% const size_t columns,const size_t rows,
% const double delta_x,const double rigidity,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the rescaled image.
%
% o rows: the number of rows in the rescaled image.
%
% o delta_x: maximum seam transversal step (0 means straight seams).
%
% o rigidity: introduce a bias for non-straight seams (typically 0).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *LiquidRescaleImage(const Image *image,const size_t columns,
const size_t rows,const double delta_x,const double rigidity,
ExceptionInfo *exception)
{
#define LiquidRescaleImageTag "Rescale/Image"
CacheView
*rescale_view;
const char
*map;
guchar
*packet;
Image
*rescale_image;
int
x,
y;
LqrCarver
*carver;
LqrRetVal
lqr_status;
MagickBooleanType
status;
MagickPixelPacket
pixel;
unsigned char
*pixels;
/*
Liquid rescale image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((columns == 0) || (rows == 0))
return((Image *) NULL);
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
if ((columns <= 2) || (rows <= 2))
return(ResizeImage(image,columns,rows,image->filter,image->blur,exception));
if ((columns >= (2*image->columns)) || (rows >= (2*image->rows)))
{
Image
*resize_image;
size_t
height,
width;
/*
Honor liquid resize size limitations.
*/
for (width=image->columns; columns >= (2*width-1); width*=2);
for (height=image->rows; rows >= (2*height-1); height*=2);
resize_image=ResizeImage(image,width,height,image->filter,image->blur,
exception);
if (resize_image == (Image *) NULL)
return((Image *) NULL);
rescale_image=LiquidRescaleImage(resize_image,columns,rows,delta_x,
rigidity,exception);
resize_image=DestroyImage(resize_image);
return(rescale_image);
}
map="RGB";
if (image->matte == MagickFalse)
map="RGBA";
if (image->colorspace == CMYKColorspace)
{
map="CMYK";
if (image->matte == MagickFalse)
map="CMYKA";
}
pixels=(unsigned char *) AcquireQuantumMemory(image->columns,image->rows*
strlen(map)*sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
return((Image *) NULL);
status=ExportImagePixels(image,0,0,image->columns,image->rows,map,CharPixel,
pixels,exception);
if (status == MagickFalse)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
carver=lqr_carver_new(pixels,image->columns,image->rows,strlen(map));
if (carver == (LqrCarver *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
lqr_status=lqr_carver_init(carver,(int) delta_x,rigidity);
lqr_status=lqr_carver_resize(carver,columns,rows);
(void) lqr_status;
rescale_image=CloneImage(image,lqr_carver_get_width(carver),
lqr_carver_get_height(carver),MagickTrue,exception);
if (rescale_image == (Image *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return((Image *) NULL);
}
if (SetImageStorageClass(rescale_image,DirectClass) == MagickFalse)
{
InheritException(exception,&rescale_image->exception);
rescale_image=DestroyImage(rescale_image);
return((Image *) NULL);
}
GetMagickPixelPacket(rescale_image,&pixel);
(void) lqr_carver_scan_reset(carver);
rescale_view=AcquireCacheView(rescale_image);
while (lqr_carver_scan(carver,&x,&y,&packet) != 0)
{
register IndexPacket
*restrict rescale_indexes;
register PixelPacket
*restrict q;
q=QueueCacheViewAuthenticPixels(rescale_view,x,y,1,1,exception);
if (q == (PixelPacket *) NULL)
break;
rescale_indexes=GetCacheViewAuthenticIndexQueue(rescale_view);
pixel.red=QuantumRange*(packet[0]/255.0);
pixel.green=QuantumRange*(packet[1]/255.0);
pixel.blue=QuantumRange*(packet[2]/255.0);
if (image->colorspace != CMYKColorspace)
{
if (image->matte == MagickFalse)
pixel.opacity=QuantumRange*(packet[3]/255.0);
}
else
{
pixel.index=QuantumRange*(packet[3]/255.0);
if (image->matte == MagickFalse)
pixel.opacity=QuantumRange*(packet[4]/255.0);
}
SetPixelPacket(rescale_image,&pixel,q,rescale_indexes);
if (SyncCacheViewAuthenticPixels(rescale_view,exception) == MagickFalse)
break;
}
rescale_view=DestroyCacheView(rescale_view);
/*
Relinquish resources.
*/
lqr_carver_destroy(carver);
return(rescale_image);
}
#else
MagickExport Image *LiquidRescaleImage(const Image *image,
const size_t magick_unused(columns),const size_t magick_unused(rows),
const double magick_unused(delta_x),const double magick_unused(rigidity),
ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
(void) ThrowMagickException(exception,GetMagickModule(),MissingDelegateError,
"DelegateLibrarySupportNotBuiltIn","`%s' (LQR)",image->filename);
return((Image *) NULL);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResizeImage() scales an image to the desired dimensions, using the given
% filter (see AcquireFilterInfo()).
%
% If an undefined filter is given the filter defaults to Mitchell for a
% colormapped image, a image with a matte channel, or if the image is
% enlarged. Otherwise the filter defaults to a Lanczos.
%
% ResizeImage() was inspired by Paul Heckbert's "zoom" program.
%
% The format of the ResizeImage method is:
%
% Image *ResizeImage(Image *image,const size_t columns,
% const size_t rows,const FilterTypes filter,const double blur,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o filter: Image filter to use.
%
% o blur: the blur factor where > 1 is blurry, < 1 is sharp. Typically set
% this to 1.0.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _ContributionInfo
{
MagickRealType
weight;
ssize_t
pixel;
} ContributionInfo;
static ContributionInfo **DestroyContributionThreadSet(
ContributionInfo **contribution)
{
register ssize_t
i;
assert(contribution != (ContributionInfo **) NULL);
for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++)
if (contribution[i] != (ContributionInfo *) NULL)
contribution[i]=(ContributionInfo *) RelinquishMagickMemory(
contribution[i]);
contribution=(ContributionInfo **) RelinquishMagickMemory(contribution);
return(contribution);
}
static ContributionInfo **AcquireContributionThreadSet(const size_t count)
{
register ssize_t
i;
ContributionInfo
**contribution;
size_t
number_threads;
number_threads=GetOpenMPMaximumThreads();
contribution=(ContributionInfo **) AcquireQuantumMemory(number_threads,
sizeof(*contribution));
if (contribution == (ContributionInfo **) NULL)
return((ContributionInfo **) NULL);
(void) ResetMagickMemory(contribution,0,number_threads*sizeof(*contribution));
for (i=0; i < (ssize_t) number_threads; i++)
{
contribution[i]=(ContributionInfo *) AcquireQuantumMemory(count,
sizeof(**contribution));
if (contribution[i] == (ContributionInfo *) NULL)
return(DestroyContributionThreadSet(contribution));
}
return(contribution);
}
static inline double MagickMax(const double x,const double y)
{
if (x > y)
return(x);
return(y);
}
static inline double MagickMin(const double x,const double y)
{
if (x < y)
return(x);
return(y);
}
static MagickBooleanType HorizontalFilter(const ResizeFilter *resize_filter,
const Image *image,Image *resize_image,const MagickRealType x_factor,
const MagickSizeType span,MagickOffsetType *offset,ExceptionInfo *exception)
{
#define ResizeImageTag "Resize/Image"
CacheView
*image_view,
*resize_view;
ClassType
storage_class;
ContributionInfo
**restrict contributions;
MagickBooleanType
status;
MagickPixelPacket
zero;
MagickRealType
scale,
support;
ssize_t
x;
/*
Apply filter to resize horizontally from image to resize image.
*/
scale=MagickMax(1.0/x_factor+MagickEpsilon,1.0);
support=scale*GetResizeFilterSupport(resize_filter);
storage_class=support > 0.5 ? DirectClass : image->storage_class;
if (SetImageStorageClass(resize_image,storage_class) == MagickFalse)
{
InheritException(exception,&resize_image->exception);
return(MagickFalse);
}
if (support < 0.5)
{
/*
Support too small even for nearest neighbour: Reduce to point
sampling.
*/
support=(MagickRealType) 0.5;
scale=1.0;
}
contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0));
if (contributions == (ContributionInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
status=MagickTrue;
scale=1.0/scale;
(void) ResetMagickMemory(&zero,0,sizeof(zero));
image_view=AcquireCacheView(image);
resize_view=AcquireCacheView(resize_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for shared(status)
#endif
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
MagickRealType
center,
density;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ContributionInfo
*restrict contribution;
register IndexPacket
*restrict resize_indexes;
register PixelPacket
*restrict q;
register ssize_t
y;
ssize_t
n,
start,
stop;
if (status == MagickFalse)
continue;
center=(MagickRealType) (x+0.5)/x_factor;
start=(ssize_t) MagickMax(center-support+0.5,0.0);
stop=(ssize_t) MagickMin(center+support+0.5,(double) image->columns);
density=0.0;
contribution=contributions[GetOpenMPThreadId()];
for (n=0; n < (stop-start); n++)
{
contribution[n].pixel=start+n;
contribution[n].weight=GetResizeFilterWeight(resize_filter,scale*
((MagickRealType) (start+n)-center+0.5));
density+=contribution[n].weight;
}
if ((density != 0.0) && (density != 1.0))
{
register ssize_t
i;
/*
Normalize.
*/
density=1.0/density;
for (i=0; i < n; i++)
contribution[i].weight*=density;
}
p=GetCacheViewVirtualPixels(image_view,contribution[0].pixel,0,(size_t)
(contribution[n-1].pixel-contribution[0].pixel+1),image->rows,exception);
q=QueueCacheViewAuthenticPixels(resize_view,x,0,1,resize_image->rows,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
resize_indexes=GetCacheViewAuthenticIndexQueue(resize_view);
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
MagickPixelPacket
pixel;
MagickRealType
alpha;
register ssize_t
i;
ssize_t
j;
pixel=zero;
if (image->matte == MagickFalse)
{
for (i=0; i < n; i++)
{
j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[i].pixel-contribution[0].pixel);
alpha=contribution[i].weight;
pixel.red+=alpha*GetPixelRed(p+j);
pixel.green+=alpha*GetPixelGreen(p+j);
pixel.blue+=alpha*GetPixelBlue(p+j);
pixel.opacity+=alpha*GetPixelOpacity(p+j);
}
SetPixelRed(q,ClampToQuantum(pixel.red));
SetPixelGreen(q,ClampToQuantum(pixel.green));
SetPixelBlue(q,ClampToQuantum(pixel.blue));
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
if ((image->colorspace == CMYKColorspace) &&
(resize_image->colorspace == CMYKColorspace))
{
for (i=0; i < n; i++)
{
j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[i].pixel-contribution[0].pixel);
alpha=contribution[i].weight;
pixel.index+=alpha*GetPixelIndex(indexes+j);
}
SetPixelIndex(resize_indexes+y,ClampToQuantum(
pixel.index));
}
}
else
{
MagickRealType
gamma;
gamma=0.0;
for (i=0; i < n; i++)
{
j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[i].pixel-contribution[0].pixel);
alpha=contribution[i].weight*QuantumScale*
GetPixelAlpha(p+j);
pixel.red+=alpha*GetPixelRed(p+j);
pixel.green+=alpha*GetPixelGreen(p+j);
pixel.blue+=alpha*GetPixelBlue(p+j);
pixel.opacity+=contribution[i].weight*GetPixelOpacity(p+j);
gamma+=alpha;
}
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
SetPixelRed(q,ClampToQuantum(gamma*pixel.red));
SetPixelGreen(q,ClampToQuantum(gamma*pixel.green));
SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue));
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
if ((image->colorspace == CMYKColorspace) &&
(resize_image->colorspace == CMYKColorspace))
{
for (i=0; i < n; i++)
{
j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[i].pixel-contribution[0].pixel);
alpha=contribution[i].weight*QuantumScale*
GetPixelAlpha(p+j);
pixel.index+=alpha*GetPixelIndex(indexes+j);
}
SetPixelIndex(resize_indexes+y,ClampToQuantum(gamma*
pixel.index));
}
}
if ((resize_image->storage_class == PseudoClass) &&
(image->storage_class == PseudoClass))
{
i=(ssize_t) (MagickMin(MagickMax(center,(double) start),(double) stop-
1.0)+0.5);
j=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[i-start].pixel-contribution[0].pixel);
SetPixelIndex(resize_indexes+y,GetPixelIndex(
indexes+j));
}
q++;
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_HorizontalFilter)
#endif
proceed=SetImageProgress(image,ResizeImageTag,(*offset)++,span);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
contributions=DestroyContributionThreadSet(contributions);
return(status);
}
static MagickBooleanType VerticalFilter(const ResizeFilter *resize_filter,
const Image *image,Image *resize_image,const MagickRealType y_factor,
const MagickSizeType span,MagickOffsetType *offset,ExceptionInfo *exception)
{
CacheView
*image_view,
*resize_view;
ClassType
storage_class;
ContributionInfo
**restrict contributions;
MagickBooleanType
status;
MagickPixelPacket
zero;
MagickRealType
scale,
support;
ssize_t
y;
/*
Apply filter to resize vertically from image to resize image.
*/
scale=MagickMax(1.0/y_factor+MagickEpsilon,1.0);
support=scale*GetResizeFilterSupport(resize_filter);
storage_class=support > 0.5 ? DirectClass : image->storage_class;
if (SetImageStorageClass(resize_image,storage_class) == MagickFalse)
{
InheritException(exception,&resize_image->exception);
return(MagickFalse);
}
if (support < 0.5)
{
/*
Support too small even for nearest neighbour: Reduce to point
sampling.
*/
support=(MagickRealType) 0.5;
scale=1.0;
}
contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0));
if (contributions == (ContributionInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
status=MagickTrue;
scale=1.0/scale;
(void) ResetMagickMemory(&zero,0,sizeof(zero));
image_view=AcquireCacheView(image);
resize_view=AcquireCacheView(resize_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for shared(status)
#endif
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
MagickRealType
center,
density;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ContributionInfo
*restrict contribution;
register IndexPacket
*restrict resize_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
ssize_t
n,
start,
stop;
if (status == MagickFalse)
continue;
center=(MagickRealType) (y+0.5)/y_factor;
start=(ssize_t) MagickMax(center-support+0.5,0.0);
stop=(ssize_t) MagickMin(center+support+0.5,(double) image->rows);
density=0.0;
contribution=contributions[GetOpenMPThreadId()];
for (n=0; n < (stop-start); n++)
{
contribution[n].pixel=start+n;
contribution[n].weight=GetResizeFilterWeight(resize_filter,scale*
((MagickRealType) (start+n)-center+0.5));
density+=contribution[n].weight;
}
if ((density != 0.0) && (density != 1.0))
{
register ssize_t
i;
/*
Normalize.
*/
density=1.0/density;
for (i=0; i < n; i++)
contribution[i].weight*=density;
}
p=GetCacheViewVirtualPixels(image_view,0,contribution[0].pixel,
image->columns,(size_t) (contribution[n-1].pixel-contribution[0].pixel+1),
exception);
q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
resize_indexes=GetCacheViewAuthenticIndexQueue(resize_view);
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
MagickPixelPacket
pixel;
MagickRealType
alpha;
register ssize_t
i;
ssize_t
j;
pixel=zero;
if (image->matte == MagickFalse)
{
for (i=0; i < n; i++)
{
j=(ssize_t) ((contribution[i].pixel-contribution[0].pixel)*
image->columns+x);
alpha=contribution[i].weight;
pixel.red+=alpha*GetPixelRed(p+j);
pixel.green+=alpha*GetPixelGreen(p+j);
pixel.blue+=alpha*GetPixelBlue(p+j);
pixel.opacity+=alpha*GetPixelOpacity(p+j);
}
SetPixelRed(q,ClampToQuantum(pixel.red));
SetPixelGreen(q,ClampToQuantum(pixel.green));
SetPixelBlue(q,ClampToQuantum(pixel.blue));
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
if ((image->colorspace == CMYKColorspace) &&
(resize_image->colorspace == CMYKColorspace))
{
for (i=0; i < n; i++)
{
j=(ssize_t) ((contribution[i].pixel-contribution[0].pixel)*
image->columns+x);
alpha=contribution[i].weight;
pixel.index+=alpha*GetPixelIndex(indexes+j);
}
SetPixelIndex(resize_indexes+x,ClampToQuantum(
pixel.index));
}
}
else
{
MagickRealType
gamma;
gamma=0.0;
for (i=0; i < n; i++)
{
j=(ssize_t) ((contribution[i].pixel-contribution[0].pixel)*
image->columns+x);
alpha=contribution[i].weight*QuantumScale*
GetPixelAlpha(p+j);
pixel.red+=alpha*GetPixelRed(p+j);
pixel.green+=alpha*GetPixelGreen(p+j);
pixel.blue+=alpha*GetPixelBlue(p+j);
pixel.opacity+=contribution[i].weight*GetPixelOpacity(p+j);
gamma+=alpha;
}
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
SetPixelRed(q,ClampToQuantum(gamma*pixel.red));
SetPixelGreen(q,ClampToQuantum(gamma*pixel.green));
SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue));
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
if ((image->colorspace == CMYKColorspace) &&
(resize_image->colorspace == CMYKColorspace))
{
for (i=0; i < n; i++)
{
j=(ssize_t) ((contribution[i].pixel-contribution[0].pixel)*
image->columns+x);
alpha=contribution[i].weight*QuantumScale*
GetPixelAlpha(p+j);
pixel.index+=alpha*GetPixelIndex(indexes+j);
}
SetPixelIndex(resize_indexes+x,ClampToQuantum(gamma*
pixel.index));
}
}
if ((resize_image->storage_class == PseudoClass) &&
(image->storage_class == PseudoClass))
{
i=(ssize_t) (MagickMin(MagickMax(center,(double) start),(double) stop-
1.0)+0.5);
j=(ssize_t) ((contribution[i-start].pixel-contribution[0].pixel)*
image->columns+x);
SetPixelIndex(resize_indexes+x,
GetPixelIndex(indexes+j));
}
q++;
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_VerticalFilter)
#endif
proceed=SetImageProgress(image,ResizeImageTag,(*offset)++,span);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
contributions=DestroyContributionThreadSet(contributions);
return(status);
}
MagickExport Image *ResizeImage(const Image *image,const size_t columns,
const size_t rows,const FilterTypes filter,const double blur,
ExceptionInfo *exception)
{
#define WorkLoadFactor 0.265
FilterTypes
filter_type;
Image
*filter_image,
*resize_image;
MagickOffsetType
offset;
MagickRealType
x_factor,
y_factor;
MagickSizeType
span;
MagickStatusType
status;
ResizeFilter
*resize_filter;
/*
Acquire resize image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows) &&
(filter == UndefinedFilter) && (blur == 1.0))
return(CloneImage(image,0,0,MagickTrue,exception));
resize_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (resize_image == (Image *) NULL)
return(resize_image);
/*
Acquire resize filter.
*/
x_factor=(MagickRealType) columns/(MagickRealType) image->columns;
y_factor=(MagickRealType) rows/(MagickRealType) image->rows;
if ((x_factor*y_factor) > WorkLoadFactor)
filter_image=CloneImage(image,columns,image->rows,MagickTrue,exception);
else
filter_image=CloneImage(image,image->columns,rows,MagickTrue,exception);
if (filter_image == (Image *) NULL)
return(DestroyImage(resize_image));
filter_type=LanczosFilter;
if (filter != UndefinedFilter)
filter_type=filter;
else
if ((x_factor == 1.0) && (y_factor == 1.0))
filter_type=PointFilter;
else
if ((image->storage_class == PseudoClass) ||
(image->matte != MagickFalse) || ((x_factor*y_factor) > 1.0))
filter_type=MitchellFilter;
resize_filter=AcquireResizeFilter(image,filter_type,blur,MagickFalse,
exception);
/*
Resize image.
*/
offset=0;
if ((x_factor*y_factor) > WorkLoadFactor)
{
span=(MagickSizeType) (filter_image->columns+rows);
status=HorizontalFilter(resize_filter,image,filter_image,x_factor,span,
&offset,exception);
status&=VerticalFilter(resize_filter,filter_image,resize_image,y_factor,
span,&offset,exception);
}
else
{
span=(MagickSizeType) (filter_image->rows+columns);
status=VerticalFilter(resize_filter,image,filter_image,y_factor,span,
&offset,exception);
status&=HorizontalFilter(resize_filter,filter_image,resize_image,x_factor,
span,&offset,exception);
}
/*
Free resources.
*/
filter_image=DestroyImage(filter_image);
resize_filter=DestroyResizeFilter(resize_filter);
if (status == MagickFalse)
{
resize_image=DestroyImage(resize_image);
return((Image *) NULL);
}
resize_image->type=image->type;
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S a m p l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SampleImage() scales an image to the desired dimensions with pixel
% sampling. Unlike other scaling methods, this method does not introduce
% any additional color into the scaled image.
%
% The format of the SampleImage method is:
%
% Image *SampleImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the sampled image.
%
% o rows: the number of rows in the sampled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SampleImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define SampleImageTag "Sample/Image"
CacheView
*image_view,
*sample_view;
Image
*sample_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
x;
ssize_t
*x_offset,
y;
/*
Initialize sampled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
sample_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (sample_image == (Image *) NULL)
return((Image *) NULL);
/*
Allocate scan line buffer and column offset buffers.
*/
x_offset=(ssize_t *) AcquireQuantumMemory((size_t) sample_image->columns,
sizeof(*x_offset));
if (x_offset == (ssize_t *) NULL)
{
sample_image=DestroyImage(sample_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (x=0; x < (ssize_t) sample_image->columns; x++)
x_offset[x]=(ssize_t) (((MagickRealType) x+0.5)*image->columns/
sample_image->columns);
/*
Sample each row.
*/
status=MagickTrue;
progress=0;
image_view=AcquireCacheView(image);
sample_view=AcquireCacheView(sample_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) sample_image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict sample_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
ssize_t
y_offset;
if (status == MagickFalse)
continue;
y_offset=(ssize_t) (((MagickRealType) y+0.5)*image->rows/
sample_image->rows);
p=GetCacheViewVirtualPixels(image_view,0,y_offset,image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(sample_view,0,y,sample_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
sample_indexes=GetCacheViewAuthenticIndexQueue(sample_view);
/*
Sample each column.
*/
for (x=0; x < (ssize_t) sample_image->columns; x++)
*q++=p[x_offset[x]];
if ((image->storage_class == PseudoClass) ||
(image->colorspace == CMYKColorspace))
for (x=0; x < (ssize_t) sample_image->columns; x++)
SetPixelIndex(sample_indexes+x,
GetPixelIndex(indexes+x_offset[x]));
if (SyncCacheViewAuthenticPixels(sample_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SampleImage)
#endif
proceed=SetImageProgress(image,SampleImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
sample_view=DestroyCacheView(sample_view);
x_offset=(ssize_t *) RelinquishMagickMemory(x_offset);
sample_image->type=image->type;
return(sample_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleImage() changes the size of an image to the given dimensions.
%
% The format of the ScaleImage method is:
%
% Image *ScaleImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ScaleImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define ScaleImageTag "Scale/Image"
CacheView
*image_view,
*scale_view;
Image
*scale_image;
MagickBooleanType
next_column,
next_row,
proceed;
MagickPixelPacket
pixel,
*scale_scanline,
*scanline,
*x_vector,
*y_vector,
zero;
MagickRealType
alpha;
PointInfo
scale,
span;
register ssize_t
i;
ssize_t
number_rows,
y;
/*
Initialize scaled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((columns == 0) || (rows == 0))
return((Image *) NULL);
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
scale_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (scale_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(scale_image,DirectClass) == MagickFalse)
{
InheritException(exception,&scale_image->exception);
scale_image=DestroyImage(scale_image);
return((Image *) NULL);
}
/*
Allocate memory.
*/
x_vector=(MagickPixelPacket *) AcquireQuantumMemory((size_t) image->columns,
sizeof(*x_vector));
scanline=x_vector;
if (image->rows != scale_image->rows)
scanline=(MagickPixelPacket *) AcquireQuantumMemory((size_t) image->columns,
sizeof(*scanline));
scale_scanline=(MagickPixelPacket *) AcquireQuantumMemory((size_t)
scale_image->columns,sizeof(*scale_scanline));
y_vector=(MagickPixelPacket *) AcquireQuantumMemory((size_t) image->columns,
sizeof(*y_vector));
if ((scanline == (MagickPixelPacket *) NULL) ||
(scale_scanline == (MagickPixelPacket *) NULL) ||
(x_vector == (MagickPixelPacket *) NULL) ||
(y_vector == (MagickPixelPacket *) NULL))
{
scale_image=DestroyImage(scale_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Scale image.
*/
number_rows=0;
next_row=MagickTrue;
span.y=1.0;
scale.y=(double) scale_image->rows/(double) image->rows;
(void) ResetMagickMemory(y_vector,0,(size_t) image->columns*
sizeof(*y_vector));
GetMagickPixelPacket(image,&pixel);
(void) ResetMagickMemory(&zero,0,sizeof(zero));
i=0;
image_view=AcquireCacheView(image);
scale_view=AcquireCacheView(scale_image);
for (y=0; y < (ssize_t) scale_image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict scale_indexes;
register MagickPixelPacket
*restrict s,
*restrict t;
register PixelPacket
*restrict q;
register ssize_t
x;
q=QueueCacheViewAuthenticPixels(scale_view,0,y,scale_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
alpha=1.0;
scale_indexes=GetCacheViewAuthenticIndexQueue(scale_view);
if (scale_image->rows == image->rows)
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,i++,image->columns,1,
exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (image->matte != MagickFalse)
alpha=QuantumScale*GetPixelAlpha(p);
x_vector[x].red=(MagickRealType) (alpha*GetPixelRed(p));
x_vector[x].green=(MagickRealType) (alpha*GetPixelGreen(p));
x_vector[x].blue=(MagickRealType) (alpha*GetPixelBlue(p));
if (image->matte != MagickFalse)
x_vector[x].opacity=(MagickRealType) GetPixelOpacity(p);
if (indexes != (IndexPacket *) NULL)
x_vector[x].index=(MagickRealType) (alpha*GetPixelIndex(indexes+x));
p++;
}
}
else
{
/*
Scale Y direction.
*/
while (scale.y < span.y)
{
if ((next_row != MagickFalse) &&
(number_rows < (ssize_t) image->rows))
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,i++,image->columns,1,
exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (image->matte != MagickFalse)
alpha=QuantumScale*GetPixelAlpha(p);
x_vector[x].red=(MagickRealType) (alpha*GetPixelRed(p));
x_vector[x].green=(MagickRealType) (alpha*GetPixelGreen(p));
x_vector[x].blue=(MagickRealType) (alpha*GetPixelBlue(p));
if (image->matte != MagickFalse)
x_vector[x].opacity=(MagickRealType) GetPixelOpacity(p);
if (indexes != (IndexPacket *) NULL)
x_vector[x].index=(MagickRealType) (alpha*
GetPixelIndex(indexes+x));
p++;
}
number_rows++;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
y_vector[x].red+=scale.y*x_vector[x].red;
y_vector[x].green+=scale.y*x_vector[x].green;
y_vector[x].blue+=scale.y*x_vector[x].blue;
if (scale_image->matte != MagickFalse)
y_vector[x].opacity+=scale.y*x_vector[x].opacity;
if (scale_indexes != (IndexPacket *) NULL)
y_vector[x].index+=scale.y*x_vector[x].index;
}
span.y-=scale.y;
scale.y=(double) scale_image->rows/(double) image->rows;
next_row=MagickTrue;
}
if ((next_row != MagickFalse) && (number_rows < (ssize_t) image->rows))
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,i++,image->columns,1,
exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (image->matte != MagickFalse)
alpha=QuantumScale*GetPixelAlpha(p);
x_vector[x].red=(MagickRealType) (alpha*GetPixelRed(p));
x_vector[x].green=(MagickRealType) (alpha*GetPixelGreen(p));
x_vector[x].blue=(MagickRealType) (alpha*GetPixelBlue(p));
if (image->matte != MagickFalse)
x_vector[x].opacity=(MagickRealType) GetPixelOpacity(p);
if (indexes != (IndexPacket *) NULL)
x_vector[x].index=(MagickRealType) (alpha*
GetPixelIndex(indexes+x));
p++;
}
number_rows++;
next_row=MagickFalse;
}
s=scanline;
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel.red=y_vector[x].red+span.y*x_vector[x].red;
pixel.green=y_vector[x].green+span.y*x_vector[x].green;
pixel.blue=y_vector[x].blue+span.y*x_vector[x].blue;
if (image->matte != MagickFalse)
pixel.opacity=y_vector[x].opacity+span.y*x_vector[x].opacity;
if (scale_indexes != (IndexPacket *) NULL)
pixel.index=y_vector[x].index+span.y*x_vector[x].index;
s->red=pixel.red;
s->green=pixel.green;
s->blue=pixel.blue;
if (scale_image->matte != MagickFalse)
s->opacity=pixel.opacity;
if (scale_indexes != (IndexPacket *) NULL)
s->index=pixel.index;
s++;
y_vector[x]=zero;
}
scale.y-=span.y;
if (scale.y <= 0)
{
scale.y=(double) scale_image->rows/(double) image->rows;
next_row=MagickTrue;
}
span.y=1.0;
}
if (scale_image->columns == image->columns)
{
/*
Transfer scanline to scaled image.
*/
s=scanline;
for (x=0; x < (ssize_t) scale_image->columns; x++)
{
if (scale_image->matte != MagickFalse)
alpha=QuantumScale*(QuantumRange-s->opacity);
alpha=1.0/(fabs(alpha) <= MagickEpsilon ? 1.0 : alpha);
SetPixelRed(q,ClampToQuantum(alpha*s->red));
SetPixelGreen(q,ClampToQuantum(alpha*s->green));
SetPixelBlue(q,ClampToQuantum(alpha*s->blue));
if (scale_image->matte != MagickFalse)
SetPixelOpacity(q,ClampToQuantum(s->opacity));
if (scale_indexes != (IndexPacket *) NULL)
SetPixelIndex(scale_indexes+x,ClampToQuantum(alpha*s->index));
q++;
s++;
}
}
else
{
/*
Scale X direction.
*/
pixel=zero;
next_column=MagickFalse;
span.x=1.0;
s=scanline;
t=scale_scanline;
for (x=0; x < (ssize_t) image->columns; x++)
{
scale.x=(double) scale_image->columns/(double) image->columns;
while (scale.x >= span.x)
{
if (next_column != MagickFalse)
{
pixel=zero;
t++;
}
pixel.red+=span.x*s->red;
pixel.green+=span.x*s->green;
pixel.blue+=span.x*s->blue;
if (image->matte != MagickFalse)
pixel.opacity+=span.x*s->opacity;
if (scale_indexes != (IndexPacket *) NULL)
pixel.index+=span.x*s->index;
t->red=pixel.red;
t->green=pixel.green;
t->blue=pixel.blue;
if (scale_image->matte != MagickFalse)
t->opacity=pixel.opacity;
if (scale_indexes != (IndexPacket *) NULL)
t->index=pixel.index;
scale.x-=span.x;
span.x=1.0;
next_column=MagickTrue;
}
if (scale.x > 0)
{
if (next_column != MagickFalse)
{
pixel=zero;
next_column=MagickFalse;
t++;
}
pixel.red+=scale.x*s->red;
pixel.green+=scale.x*s->green;
pixel.blue+=scale.x*s->blue;
if (scale_image->matte != MagickFalse)
pixel.opacity+=scale.x*s->opacity;
if (scale_indexes != (IndexPacket *) NULL)
pixel.index+=scale.x*s->index;
span.x-=scale.x;
}
s++;
}
if (span.x > 0)
{
s--;
pixel.red+=span.x*s->red;
pixel.green+=span.x*s->green;
pixel.blue+=span.x*s->blue;
if (scale_image->matte != MagickFalse)
pixel.opacity+=span.x*s->opacity;
if (scale_indexes != (IndexPacket *) NULL)
pixel.index+=span.x*s->index;
}
if ((next_column == MagickFalse) &&
((ssize_t) (t-scale_scanline) < (ssize_t) scale_image->columns))
{
t->red=pixel.red;
t->green=pixel.green;
t->blue=pixel.blue;
if (scale_image->matte != MagickFalse)
t->opacity=pixel.opacity;
if (scale_indexes != (IndexPacket *) NULL)
t->index=pixel.index;
}
/*
Transfer scanline to scaled image.
*/
t=scale_scanline;
for (x=0; x < (ssize_t) scale_image->columns; x++)
{
if (scale_image->matte != MagickFalse)
alpha=QuantumScale*(QuantumRange-t->opacity);
alpha=1.0/(fabs(alpha) <= MagickEpsilon ? 1.0 : alpha);
SetPixelRed(q,ClampToQuantum(alpha*t->red));
SetPixelGreen(q,ClampToQuantum(alpha*t->green));
SetPixelBlue(q,ClampToQuantum(alpha*t->blue));
if (scale_image->matte != MagickFalse)
SetPixelOpacity(q,ClampToQuantum(t->opacity));
if (scale_indexes != (IndexPacket *) NULL)
SetPixelIndex(scale_indexes+x,ClampToQuantum(alpha*t->index));
t++;
q++;
}
}
if (SyncCacheViewAuthenticPixels(scale_view,exception) == MagickFalse)
break;
proceed=SetImageProgress(image,ScaleImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
scale_view=DestroyCacheView(scale_view);
image_view=DestroyCacheView(image_view);
/*
Free allocated memory.
*/
y_vector=(MagickPixelPacket *) RelinquishMagickMemory(y_vector);
scale_scanline=(MagickPixelPacket *) RelinquishMagickMemory(scale_scanline);
if (scale_image->rows != image->rows)
scanline=(MagickPixelPacket *) RelinquishMagickMemory(scanline);
x_vector=(MagickPixelPacket *) RelinquishMagickMemory(x_vector);
scale_image->type=image->type;
return(scale_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T h u m b n a i l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ThumbnailImage() changes the size of an image to the given dimensions and
% removes any associated profiles. The goal is to produce small low cost
% thumbnail images suited for display on the Web.
%
% The format of the ThumbnailImage method is:
%
% Image *ThumbnailImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ThumbnailImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define SampleFactor 5
char
value[MaxTextExtent];
const char
*name;
Image
*thumbnail_image;
MagickRealType
x_factor,
y_factor;
size_t
version;
struct stat
attributes;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
x_factor=(MagickRealType) columns/(MagickRealType) image->columns;
y_factor=(MagickRealType) rows/(MagickRealType) image->rows;
if ((x_factor*y_factor) > 0.1)
thumbnail_image=ResizeImage(image,columns,rows,image->filter,image->blur,
exception);
else
if (((SampleFactor*columns) < 128) || ((SampleFactor*rows) < 128))
thumbnail_image=ResizeImage(image,columns,rows,image->filter,
image->blur,exception);
else
{
Image
*sample_image;
sample_image=SampleImage(image,SampleFactor*columns,SampleFactor*rows,
exception);
if (sample_image == (Image *) NULL)
return((Image *) NULL);
thumbnail_image=ResizeImage(sample_image,columns,rows,image->filter,
image->blur,exception);
sample_image=DestroyImage(sample_image);
}
if (thumbnail_image == (Image *) NULL)
return(thumbnail_image);
(void) ParseAbsoluteGeometry("0x0+0+0",&thumbnail_image->page);
if (thumbnail_image->matte == MagickFalse)
(void) SetImageAlphaChannel(thumbnail_image,OpaqueAlphaChannel);
thumbnail_image->depth=8;
thumbnail_image->interlace=NoInterlace;
/*
Strip all profiles except color profiles.
*/
ResetImageProfileIterator(thumbnail_image);
for (name=GetNextImageProfile(thumbnail_image); name != (const char *) NULL; )
{
if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0))
{
(void) DeleteImageProfile(thumbnail_image,name);
ResetImageProfileIterator(thumbnail_image);
}
name=GetNextImageProfile(thumbnail_image);
}
(void) DeleteImageProperty(thumbnail_image,"comment");
(void) CopyMagickString(value,image->magick_filename,MaxTextExtent);
if (strstr(image->magick_filename,"//") == (char *) NULL)
(void) FormatLocaleString(value,MaxTextExtent,"file://%s",
image->magick_filename);
(void) SetImageProperty(thumbnail_image,"Thumb::URI",value);
(void) CopyMagickString(value,image->magick_filename,MaxTextExtent);
if (GetPathAttributes(image->filename,&attributes) != MagickFalse)
{
(void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double)
attributes.st_mtime);
(void) SetImageProperty(thumbnail_image,"Thumb::MTime",value);
}
(void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double)
attributes.st_mtime);
(void) FormatMagickSize(GetBlobSize(image),MagickFalse,value);
(void) ConcatenateMagickString(value,"B",MaxTextExtent);
(void) SetImageProperty(thumbnail_image,"Thumb::Size",value);
(void) FormatLocaleString(value,MaxTextExtent,"image/%s",image->magick);
LocaleLower(value);
(void) SetImageProperty(thumbnail_image,"Thumb::Mimetype",value);
(void) SetImageProperty(thumbnail_image,"software",
GetMagickVersion(&version));
(void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double)
image->magick_columns);
(void) SetImageProperty(thumbnail_image,"Thumb::Image::Width",value);
(void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double)
image->magick_rows);
(void) SetImageProperty(thumbnail_image,"Thumb::Image::height",value);
(void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double)
GetImageListLength(image));
(void) SetImageProperty(thumbnail_image,"Thumb::Document::Pages",value);
return(thumbnail_image);
}
|
displacement_lagrangemultiplier_mixed_contact_criteria.h | // KRATOS ______ __ __ _____ __ __ __
// / ____/___ ____ / /_____ ______/ /_/ ___// /________ _______/ /___ ___________ _/ /
// / / / __ \/ __ \/ __/ __ `/ ___/ __/\__ \/ __/ ___/ / / / ___/ __/ / / / ___/ __ `/ /
// / /___/ /_/ / / / / /_/ /_/ / /__/ /_ ___/ / /_/ / / /_/ / /__/ /_/ /_/ / / / /_/ / /
// \____/\____/_/ /_/\__/\__,_/\___/\__//____/\__/_/ \__,_/\___/\__/\__,_/_/ \__,_/_/ MECHANICS
//
// License: BSD License
// license: ContactStructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_MIXED_CONTACT_CRITERIA_H)
#define KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_MIXED_CONTACT_CRITERIA_H
/* System includes */
/* External includes */
/* Project includes */
#include "utilities/table_stream_utility.h"
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
#include "utilities/color_utilities.h"
#include "utilities/constraint_utilities.h"
namespace Kratos
{
///@addtogroup ContactStructuralMechanicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@name Kratos Classes
///@{
/**
* @class DisplacementLagrangeMultiplierMixedContactCriteria
* @ingroup ContactStructuralMechanicsApplication
* @brief Convergence criteria for contact problems
* @details This class implements a convergence control based on nodal displacement and
* lagrange multiplier values. The error is evaluated separately for each of them, and
* relative and absolute tolerances for both must be specified.
* @author Vicente Mataix Ferrandiz
*/
template< class TSparseSpace,
class TDenseSpace >
class DisplacementLagrangeMultiplierMixedContactCriteria
: public ConvergenceCriteria< TSparseSpace, TDenseSpace >
{
public:
///@name Type Definitions
///@{
/// Pointer definition of DisplacementLagrangeMultiplierMixedContactCriteria
KRATOS_CLASS_POINTER_DEFINITION( DisplacementLagrangeMultiplierMixedContactCriteria );
/// Local Flags
KRATOS_DEFINE_LOCAL_FLAG( ENSURE_CONTACT );
KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT );
KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED );
KRATOS_DEFINE_LOCAL_FLAG( ROTATION_DOF_IS_CONSIDERED );
KRATOS_DEFINE_LOCAL_FLAG( INITIAL_RESIDUAL_IS_SET );
/// The base class definition
typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType;
/// The definition of the current class
typedef DisplacementLagrangeMultiplierMixedContactCriteria< TSparseSpace, TDenseSpace > ClassType;
/// The dofs array type
typedef typename BaseType::DofsArrayType DofsArrayType;
/// The sparse matrix type
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
/// The dense vector type
typedef typename BaseType::TSystemVectorType TSystemVectorType;
/// The sparse space used
typedef TSparseSpace SparseSpaceType;
/// The table stream definition TODO: Replace by logger
typedef TableStreamUtility::Pointer TablePrinterPointerType;
/// The index type definition
typedef std::size_t IndexType;
/// The epsilon tolerance definition
static constexpr double Tolerance = std::numeric_limits<double>::epsilon();
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor.
*/
explicit DisplacementLagrangeMultiplierMixedContactCriteria()
: BaseType()
{
}
/**
* @brief Default constructor. (with parameters)
* @param ThisParameters The configuration parameters
*/
explicit DisplacementLagrangeMultiplierMixedContactCriteria(Kratos::Parameters ThisParameters)
: BaseType()
{
// Validate and assign defaults
ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters());
this->AssignSettings(ThisParameters);
}
/**
* @brief Default constructor
* @param DispRatioTolerance Relative tolerance for displacement residual error
* @param DispAbsTolerance Absolute tolerance for displacement residual error
* @param RotRatioTolerance Relative tolerance for rotation residual error
* @param RotAbsTolerance Absolute tolerance for rotation residual error
* @param LMRatioTolerance Relative tolerance for lagrange multiplier residual error
* @param LMAbsTolerance Absolute tolerance for lagrange multiplier residual error
* @param EnsureContact To check if the contact is lost
* @param pTable The pointer to the output r_table
* @param PrintingOutput If the output is going to be printed in a txt file
*/
explicit DisplacementLagrangeMultiplierMixedContactCriteria(
const double DispRatioTolerance,
const double DispAbsTolerance,
const double RotRatioTolerance,
const double RotAbsTolerance,
const double LMRatioTolerance,
const double LMAbsTolerance,
const bool EnsureContact = false,
const bool PrintingOutput = false
)
: BaseType()
{
// Set local flags
mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::ENSURE_CONTACT, EnsureContact);
mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::PRINTING_OUTPUT, PrintingOutput);
mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::TABLE_IS_INITIALIZED, false);
mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::ROTATION_DOF_IS_CONSIDERED, false);
mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::INITIAL_RESIDUAL_IS_SET, false);
// The displacement solution
mDispRatioTolerance = DispRatioTolerance;
mDispAbsTolerance = DispAbsTolerance;
// The rotation solution
mRotRatioTolerance = RotRatioTolerance;
mRotAbsTolerance = RotAbsTolerance;
// The normal contact solution
mLMRatioTolerance = LMRatioTolerance;
mLMAbsTolerance = LMAbsTolerance;
}
//* Copy constructor.
DisplacementLagrangeMultiplierMixedContactCriteria( DisplacementLagrangeMultiplierMixedContactCriteria const& rOther )
:BaseType(rOther)
,mOptions(rOther.mOptions)
,mDispRatioTolerance(rOther.mDispRatioTolerance)
,mDispAbsTolerance(rOther.mDispAbsTolerance)
,mDispInitialResidualNorm(rOther.mDispInitialResidualNorm)
,mDispCurrentResidualNorm(rOther.mDispCurrentResidualNorm)
,mRotRatioTolerance(rOther.mRotRatioTolerance)
,mRotAbsTolerance(rOther.mRotAbsTolerance)
,mRotInitialResidualNorm(rOther.mRotInitialResidualNorm)
,mRotCurrentResidualNorm(rOther.mRotCurrentResidualNorm)
,mLMRatioTolerance(rOther.mLMRatioTolerance)
,mLMAbsTolerance(rOther.mLMAbsTolerance)
{
}
/// Destructor.
~DisplacementLagrangeMultiplierMixedContactCriteria() override = default;
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Create method
* @param ThisParameters The configuration parameters
*/
typename BaseType::Pointer Create(Parameters ThisParameters) const override
{
return Kratos::make_shared<ClassType>(ThisParameters);
}
/**
* @brief Compute relative and absolute error.
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
* @return true if convergence is achieved, false otherwise
*/
bool PostCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
if (SparseSpaceType::Size(rb) != 0) { //if we are solving for something
// Initialize
double disp_residual_solution_norm = 0.0, rot_residual_solution_norm = 0.0, lm_solution_norm = 0.0, lm_increase_norm = 0.0;
IndexType disp_dof_num(0),rot_dof_num(0),lm_dof_num(0);
// First iterator
const auto it_dof_begin = rDofSet.begin();
// Auxiliar values
std::size_t dof_id = 0;
double residual_dof_value = 0.0, dof_value = 0.0, dof_incr = 0.0;
// The number of active dofs
const std::size_t number_active_dofs = rb.size();
// Auxiliar displacement DoF check
const std::function<bool(const VariableData&)> check_without_rot =
[](const VariableData& rCurrVar) -> bool {return true;};
const std::function<bool(const VariableData&)> check_with_rot =
[](const VariableData& rCurrVar) -> bool {return ((rCurrVar == DISPLACEMENT_X) || (rCurrVar == DISPLACEMENT_Y) || (rCurrVar == DISPLACEMENT_Z));};
const auto* p_check_disp = (mOptions.Is(DisplacementLagrangeMultiplierMixedContactCriteria::ROTATION_DOF_IS_CONSIDERED)) ? &check_with_rot : &check_without_rot;
// Loop over Dofs
#pragma omp parallel for firstprivate(dof_id, residual_dof_value, dof_value, dof_incr) reduction(+:disp_residual_solution_norm, rot_residual_solution_norm, lm_solution_norm, lm_increase_norm, disp_dof_num, rot_dof_num, lm_dof_num)
for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) {
auto it_dof = it_dof_begin + i;
dof_id = it_dof->EquationId();
// Check dof id is solved
if (dof_id < number_active_dofs) {
if (mActiveDofs[dof_id] == 1) {
const auto& r_curr_var = it_dof->GetVariable();
if ((r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_X) || (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Y) || (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Z) || (r_curr_var == LAGRANGE_MULTIPLIER_CONTACT_PRESSURE)) {
dof_value = it_dof->GetSolutionStepValue(0);
dof_incr = rDx[dof_id];
lm_solution_norm += std::pow(dof_value, 2);
lm_increase_norm += std::pow(dof_incr, 2);
++lm_dof_num;
} else if ((*p_check_disp)(r_curr_var)) {
residual_dof_value = rb[dof_id];
disp_residual_solution_norm += std::pow(residual_dof_value, 2);
++disp_dof_num;
} else { // We will assume is rotation dof
KRATOS_DEBUG_ERROR_IF_NOT((r_curr_var == ROTATION_X) || (r_curr_var == ROTATION_Y) || (r_curr_var == ROTATION_Z)) << "Variable must be a ROTATION and it is: " << r_curr_var.Name() << std::endl;
residual_dof_value = rb[dof_id];
rot_residual_solution_norm += std::pow(residual_dof_value, 2);
++rot_dof_num;
}
}
}
}
if(lm_increase_norm < Tolerance) lm_increase_norm = 1.0;
KRATOS_ERROR_IF(mOptions.Is(DisplacementLagrangeMultiplierMixedContactCriteria::ENSURE_CONTACT) && lm_solution_norm < Tolerance) << "ERROR::CONTACT LOST::ARE YOU SURE YOU ARE SUPPOSED TO HAVE CONTACT?" << std::endl;
mDispCurrentResidualNorm = disp_residual_solution_norm;
mRotCurrentResidualNorm = rot_residual_solution_norm;
const double lm_ratio = lm_solution_norm > Tolerance ? std::sqrt(lm_increase_norm/lm_solution_norm) : 0.0;
const double lm_abs = std::sqrt(lm_increase_norm)/static_cast<double>(lm_dof_num);
double residual_disp_ratio, residual_rot_ratio;
// We initialize the solution
if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::INITIAL_RESIDUAL_IS_SET)) {
mDispInitialResidualNorm = (disp_residual_solution_norm < Tolerance) ? 1.0 : disp_residual_solution_norm;
residual_disp_ratio = 1.0;
if (mOptions.Is(DisplacementLagrangeMultiplierMixedContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
mRotInitialResidualNorm = (rot_residual_solution_norm < Tolerance) ? 1.0 : rot_residual_solution_norm;
residual_rot_ratio = 1.0;
}
mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::INITIAL_RESIDUAL_IS_SET, true);
}
// We calculate the ratio of the displacements
residual_disp_ratio = mDispCurrentResidualNorm/mDispInitialResidualNorm;
// We calculate the ratio of the rotations
residual_rot_ratio = mRotCurrentResidualNorm/mRotInitialResidualNorm;
// We calculate the absolute norms
double residual_disp_abs = mDispCurrentResidualNorm/disp_dof_num;
double residual_rot_abs = mRotCurrentResidualNorm/rot_dof_num;
// The process info of the model part
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// We print the results // TODO: Replace for the new log
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
std::cout.precision(4);
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.Is(DisplacementLagrangeMultiplierMixedContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
r_table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance << residual_rot_ratio << mRotRatioTolerance << residual_rot_abs << mRotAbsTolerance << lm_ratio << mLMRatioTolerance << lm_abs << mLMAbsTolerance;
} else {
r_table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance << lm_ratio << mLMRatioTolerance << lm_abs << mLMAbsTolerance;
}
} else {
std::cout.precision(4);
if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::PRINTING_OUTPUT)) {
KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << BOLDFONT("MIXED CONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific;
KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << residual_disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << residual_disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl;
if (mOptions.Is(DisplacementLagrangeMultiplierMixedContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << BOLDFONT("\tROTATION: RATIO = ") << residual_rot_ratio << BOLDFONT(" EXP.RATIO = ") << mRotRatioTolerance << BOLDFONT(" ABS = ") << residual_rot_abs << BOLDFONT(" EXP.ABS = ") << mRotAbsTolerance << std::endl;
}
KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << BOLDFONT("\tLAGRANGE MUL: RATIO = ") << lm_ratio << BOLDFONT(" EXP.RATIO = ") << mLMRatioTolerance << BOLDFONT(" ABS = ") << lm_abs << BOLDFONT(" EXP.ABS = ") << mLMAbsTolerance << std::endl;
} else {
KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << "MIXED CONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific;
KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << "\tDISPLACEMENT: RATIO = " << residual_disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << residual_disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl;
if (mOptions.Is(DisplacementLagrangeMultiplierMixedContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << "\tROTATION: RATIO = " << residual_rot_ratio << " EXP.RATIO = " << mRotRatioTolerance << " ABS = " << residual_rot_abs << " EXP.ABS = " << mRotAbsTolerance << std::endl;
}
KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << "\tLAGRANGE MUL: RATIO = " << lm_ratio << " EXP.RATIO = " << mLMRatioTolerance << " ABS = " << lm_abs << " EXP.ABS = " << mLMAbsTolerance << std::endl;
}
}
}
r_process_info[CONVERGENCE_RATIO] = (residual_disp_ratio > lm_ratio) ? residual_disp_ratio : lm_ratio;
r_process_info[RESIDUAL_NORM] = (lm_abs > mLMAbsTolerance) ? lm_abs : mLMAbsTolerance;
// We check if converged
const bool disp_converged = (residual_disp_ratio <= mDispRatioTolerance || residual_disp_abs <= mDispAbsTolerance);
const bool rot_converged = (mOptions.Is(DisplacementLagrangeMultiplierMixedContactCriteria::ROTATION_DOF_IS_CONSIDERED)) ? (residual_rot_ratio <= mRotRatioTolerance || residual_rot_abs <= mRotAbsTolerance) : true;
const bool lm_converged = (mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::ENSURE_CONTACT) && lm_solution_norm < Tolerance) ? true : (lm_ratio <= mLMRatioTolerance || lm_abs <= mLMAbsTolerance);
if ( disp_converged && rot_converged && lm_converged ) {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::PRINTING_OUTPUT))
r_table << BOLDFONT(FGRN(" Achieved"));
else
r_table << "Achieved";
} else {
if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << BOLDFONT("\tConvergence") << " is " << BOLDFONT(FGRN("achieved")) << std::endl;
else
KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << "\tConvergence is achieved" << std::endl;
}
}
return true;
} else {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::PRINTING_OUTPUT))
r_table << BOLDFONT(FRED(" Not achieved"));
else
r_table << "Not achieved";
} else {
if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << BOLDFONT("\tConvergence") << " is " << BOLDFONT(FRED(" not achieved")) << std::endl;
else
KRATOS_INFO("DisplacementLagrangeMultiplierMixedContactCriteria") << "\tConvergence is not achieved" << std::endl;
}
}
return false;
}
} else // In this case all the displacements are imposed!
return true;
}
/**
* @brief This function initialize the convergence criteria
* @param rModelPart Reference to the ModelPart containing the contact problem. (unused)
*/
void Initialize( ModelPart& rModelPart) override
{
// Initialize
BaseType::mConvergenceCriteriaIsInitialized = true;
// Check rotation dof
mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::ROTATION_DOF_IS_CONSIDERED, ContactUtilities::CheckModelPartHasRotationDoF(rModelPart));
// Initialize header
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementLagrangeMultiplierMixedContactCriteria::TABLE_IS_INITIALIZED)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
r_table.AddColumn("DP RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
if (mOptions.Is(DisplacementLagrangeMultiplierMixedContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
r_table.AddColumn("RT RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
}
r_table.AddColumn("LM RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
r_table.AddColumn("CONVERGENCE", 15);
mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::TABLE_IS_INITIALIZED, true);
}
}
/**
* @brief This function initializes the solution step
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
*/
void InitializeSolutionStep(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
// Initialize flag
mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::INITIAL_RESIDUAL_IS_SET, false);
// Filling mActiveDofs when MPC exist
ConstraintUtilities::ComputeActiveDofs(rModelPart, mActiveDofs, rDofSet);
}
/**
* @brief This method provides the defaults parameters to avoid conflicts between the different constructors
* @return The default parameters
*/
Parameters GetDefaultParameters() const override
{
Parameters default_parameters = Parameters(R"(
{
"name" : "displacement_lagrange_multiplier_mixed_contact_criteria",
"ensure_contact" : false,
"print_convergence_criterion" : false,
"residual_relative_tolerance" : 1.0e-4,
"residual_absolute_tolerance" : 1.0e-9,
"rotation_residual_relative_tolerance" : 1.0e-4,
"rotation_residual_absolute_tolerance" : 1.0e-9,
"contact_displacement_relative_tolerance" : 1.0e-4,
"contact_displacement_absolute_tolerance" : 1.0e-9
})");
// Getting base class default parameters
const Parameters base_default_parameters = BaseType::GetDefaultParameters();
default_parameters.RecursivelyAddMissingParameters(base_default_parameters);
return default_parameters;
}
/**
* @brief Returns the name of the class as used in the settings (snake_case format)
* @return The name of the class
*/
static std::string Name()
{
return "displacement_lagrange_multiplier_mixed_contact_criteria";
}
///@}
///@name Acces
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "DisplacementLagrangeMultiplierMixedContactCriteria";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
/**
* @brief This method assigns settings to member variables
* @param ThisParameters Parameters that are assigned to the member variables
*/
void AssignSettings(const Parameters ThisParameters) override
{
BaseType::AssignSettings(ThisParameters);
// The displacement solution
mDispRatioTolerance = ThisParameters["residual_relative_tolerance"].GetDouble();
mDispAbsTolerance = ThisParameters["residual_absolute_tolerance"].GetDouble();
// The rotation solution
mRotRatioTolerance = ThisParameters["rotation_residual_relative_tolerance"].GetDouble();
mRotAbsTolerance = ThisParameters["rotation_residual_absolute_tolerance"].GetDouble();
// The contact solution
mLMRatioTolerance = ThisParameters["contact_displacement_relative_tolerance"].GetDouble();
mLMAbsTolerance = ThisParameters["contact_displacement_absolute_tolerance"].GetDouble();
// Set local flags
mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::ENSURE_CONTACT, ThisParameters["ensure_contact"].GetBool());
mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool());
mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::TABLE_IS_INITIALIZED, false);
mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::ROTATION_DOF_IS_CONSIDERED, false);
mOptions.Set(DisplacementLagrangeMultiplierMixedContactCriteria::INITIAL_RESIDUAL_IS_SET, false);
}
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
Flags mOptions; /// Local flags
double mDispRatioTolerance; /// The ratio threshold for the norm of the displacement residual
double mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement residual
double mDispInitialResidualNorm; /// The reference norm of the displacement residual
double mDispCurrentResidualNorm; /// The current norm of the displacement residual
double mRotRatioTolerance; /// The ratio threshold for the norm of the rotation residual
double mRotAbsTolerance; /// The absolute value threshold for the norm of the rotation residual
double mRotInitialResidualNorm; /// The reference norm of the rotation residual
double mRotCurrentResidualNorm; /// The current norm of the rotation residual
double mLMRatioTolerance; /// The ratio threshold for the norm of the LM
double mLMAbsTolerance; /// The absolute value threshold for the norm of the LM
std::vector<int> mActiveDofs; /// This vector contains the dofs that are active
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Unaccessible methods
///@{
///@}
}; // Kratos DisplacementLagrangeMultiplierMixedContactCriteria
///@name Local flags creation
///@{
/// Local Flags
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierMixedContactCriteria<TSparseSpace, TDenseSpace>::ENSURE_CONTACT(Kratos::Flags::Create(0));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierMixedContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierMixedContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierMixedContactCriteria<TSparseSpace, TDenseSpace>::ROTATION_DOF_IS_CONSIDERED(Kratos::Flags::Create(3));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierMixedContactCriteria<TSparseSpace, TDenseSpace>::INITIAL_RESIDUAL_IS_SET(Kratos::Flags::Create(4));
}
#endif /* KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_MIXED_CONTACT_CRITERIA_H */
|
libperf.c | /**
* Copyright (C) Mellanox Technologies Ltd. 2001-2014. ALL RIGHTS RESERVED.
* Copyright (C) UT-Battelle, LLC. 2015. ALL RIGHTS RESERVED.
* Copyright (C) The University of Tennessee and The University
* of Tennessee Research Foundation. 2015-2016. ALL RIGHTS RESERVED.
* Copyright (C) ARM Ltd. 2017. ALL RIGHTS RESERVED.
* See file LICENSE for terms.
*/
#include "libperf_int.h"
#include <ucs/debug/log.h>
#include <string.h>
#include <malloc.h>
#include <unistd.h>
typedef struct {
union {
struct {
size_t dev_addr_len;
size_t iface_addr_len;
size_t ep_addr_len;
} uct;
struct {
size_t addr_len;
} ucp;
};
size_t rkey_size;
unsigned long recv_buffer;
} ucx_perf_ep_info_t;
/*
* This Quickselect routine is based on the algorithm described in
* "Numerical recipes in C", Second Edition,
* Cambridge University Press, 1992, Section 8.5, ISBN 0-521-43108-5
* This code by Nicolas Devillard - 1998. Public domain.
*/
static ucs_time_t __find_median_quick_select(ucs_time_t arr[], int n)
{
int low, high ;
int median;
int middle, ll, hh;
#define ELEM_SWAP(a,b) { register ucs_time_t t=(a);(a)=(b);(b)=t; }
low = 0 ; high = n-1 ; median = (low + high) / 2;
for (;;) {
if (high <= low) /* One element only */
return arr[median] ;
if (high == low + 1) { /* Two elements only */
if (arr[low] > arr[high])
ELEM_SWAP(arr[low], arr[high]) ;
return arr[median] ;
}
/* Find median of low, middle and high items; swap into position low */
middle = (low + high) / 2;
if (arr[middle] > arr[high]) ELEM_SWAP(arr[middle], arr[high]) ;
if (arr[low] > arr[high]) ELEM_SWAP(arr[low], arr[high]) ;
if (arr[middle] > arr[low]) ELEM_SWAP(arr[middle], arr[low]) ;
/* Swap low item (now in position middle) into position (low+1) */
ELEM_SWAP(arr[middle], arr[low+1]) ;
/* Nibble from each end towards middle, swapping items when stuck */
ll = low + 1;
hh = high;
for (;;) {
do ll++; while (arr[low] > arr[ll]) ;
do hh--; while (arr[hh] > arr[low]) ;
if (hh < ll)
break;
ELEM_SWAP(arr[ll], arr[hh]) ;
}
/* Swap middle item (in position low) back into correct position */
ELEM_SWAP(arr[low], arr[hh]) ;
/* Re-set active partition */
if (hh <= median)
low = ll;
if (hh >= median)
high = hh - 1;
}
}
static ucs_status_t uct_perf_test_alloc_mem(ucx_perf_context_t *perf,
ucx_perf_params_t *params)
{
ucs_status_t status;
unsigned flags;
size_t buffer_size;
if ((UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) && params->iov_stride) {
buffer_size = params->msg_size_cnt * params->iov_stride;
} else {
buffer_size = ucx_perf_get_message_size(params);
}
/* TODO use params->alignment */
flags = (params->flags & UCX_PERF_TEST_FLAG_MAP_NONBLOCK) ?
UCT_MD_MEM_FLAG_NONBLOCK : 0;
flags |= UCT_MD_MEM_ACCESS_ALL;
/* Allocate send buffer memory */
status = uct_iface_mem_alloc(perf->uct.iface,
buffer_size * params->thread_count,
flags, "perftest", &perf->uct.send_mem);
if (status != UCS_OK) {
ucs_error("Failed allocate send buffer: %s", ucs_status_string(status));
goto err;
}
ucs_assert(perf->uct.send_mem.md == perf->uct.md);
perf->send_buffer = perf->uct.send_mem.address;
/* Allocate receive buffer memory */
status = uct_iface_mem_alloc(perf->uct.iface,
buffer_size * params->thread_count,
flags, "perftest", &perf->uct.recv_mem);
if (status != UCS_OK) {
ucs_error("Failed allocate receive buffer: %s", ucs_status_string(status));
goto err_free_send;
}
ucs_assert(perf->uct.recv_mem.md == perf->uct.md);
perf->recv_buffer = perf->uct.recv_mem.address;
/* Allocate IOV datatype memory */
perf->params.msg_size_cnt = params->msg_size_cnt;
perf->uct.iov = malloc(sizeof(*perf->uct.iov) *
perf->params.msg_size_cnt *
params->thread_count);
if (NULL == perf->uct.iov) {
status = UCS_ERR_NO_MEMORY;
ucs_error("Failed allocate send IOV(%lu) buffer: %s",
perf->params.msg_size_cnt, ucs_status_string(status));
goto err_free_send;
}
perf->offset = 0;
ucs_debug("allocated memory. Send buffer %p, Recv buffer %p",
perf->send_buffer, perf->recv_buffer);
return UCS_OK;
err_free_send:
uct_iface_mem_free(&perf->uct.send_mem);
err:
return status;
}
static void uct_perf_test_free_mem(ucx_perf_context_t *perf)
{
uct_iface_mem_free(&perf->uct.send_mem);
uct_iface_mem_free(&perf->uct.recv_mem);
free(perf->uct.iov);
}
void ucx_perf_test_start_clock(ucx_perf_context_t *perf)
{
perf->start_time = ucs_get_time();
perf->prev_time = perf->start_time;
perf->prev.time = perf->start_time;
}
static void ucx_perf_test_reset(ucx_perf_context_t *perf,
ucx_perf_params_t *params)
{
unsigned i;
perf->params = *params;
perf->start_time = ucs_get_time();
perf->prev_time = perf->start_time;
perf->end_time = (perf->params.max_time == 0.0) ? UINT64_MAX :
ucs_time_from_sec(perf->params.max_time) + perf->start_time;
perf->max_iter = (perf->params.max_iter == 0) ? UINT64_MAX :
perf->params.max_iter;
perf->report_interval = ucs_time_from_sec(perf->params.report_interval);
perf->current.time = 0;
perf->current.msgs = 0;
perf->current.bytes = 0;
perf->current.iters = 0;
perf->prev.time = perf->start_time;
perf->prev.msgs = 0;
perf->prev.bytes = 0;
perf->prev.iters = 0;
perf->timing_queue_head = 0;
perf->offset = 0;
for (i = 0; i < TIMING_QUEUE_SIZE; ++i) {
perf->timing_queue[i] = 0;
}
}
void ucx_perf_calc_result(ucx_perf_context_t *perf, ucx_perf_result_t *result)
{
double factor;
double sec_value;
sec_value = ucs_time_from_sec(1.0);
if (perf->params.test_type == UCX_PERF_TEST_TYPE_PINGPONG) {
factor = 2.0;
} else {
factor = 1.0;
}
result->iters = perf->current.iters;
result->bytes = perf->current.bytes;
result->elapsed_time = perf->current.time - perf->start_time;
/* Latency */
result->latency.typical =
__find_median_quick_select(perf->timing_queue, TIMING_QUEUE_SIZE)
/ sec_value
/ factor;
result->latency.moment_average =
(double)(perf->current.time - perf->prev.time)
/ (perf->current.iters - perf->prev.iters)
/ sec_value
/ factor;
result->latency.total_average =
(double)(perf->current.time - perf->start_time)
/ perf->current.iters
/ sec_value
/ factor;
/* Bandwidth */
result->bandwidth.typical = 0.0; // Undefined
result->bandwidth.moment_average =
(perf->current.bytes - perf->prev.bytes) * sec_value
/ (double)(perf->current.time - perf->prev.time) * factor;
result->bandwidth.total_average =
perf->current.bytes * sec_value
/ (double)(perf->current.time - perf->start_time) * factor;
/* Packet rate */
result->msgrate.typical = 0.0; // Undefined
result->msgrate.moment_average =
(perf->current.msgs - perf->prev.msgs) * sec_value
/ (double)(perf->current.time - perf->prev.time) * factor;
result->msgrate.total_average =
perf->current.msgs * sec_value
/ (double)(perf->current.time - perf->start_time) * factor;
}
static ucs_status_t ucx_perf_test_check_params(ucx_perf_params_t *params)
{
size_t it;
if (ucx_perf_get_message_size(params) < 1) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size too small, need to be at least 1");
}
return UCS_ERR_INVALID_PARAM;
}
if (params->max_outstanding < 1) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("max_outstanding, need to be at least 1");
}
return UCS_ERR_INVALID_PARAM;
}
/* check if particular message size fit into stride size */
if (params->iov_stride) {
for (it = 0; it < params->msg_size_cnt; ++it) {
if (params->msg_size_list[it] > params->iov_stride) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Buffer size %lu bigger than stride %lu",
params->msg_size_list[it], params->iov_stride);
}
return UCS_ERR_INVALID_PARAM;
}
}
}
return UCS_OK;
}
void uct_perf_iface_flush_b(ucx_perf_context_t *perf)
{
ucs_status_t status;
do {
status = uct_iface_flush(perf->uct.iface, 0, NULL);
uct_worker_progress(perf->uct.worker);
} while (status == UCS_INPROGRESS);
}
static inline uint64_t __get_flag(uct_perf_data_layout_t layout, uint64_t short_f,
uint64_t bcopy_f, uint64_t zcopy_f)
{
return (layout == UCT_PERF_DATA_LAYOUT_SHORT) ? short_f :
(layout == UCT_PERF_DATA_LAYOUT_BCOPY) ? bcopy_f :
(layout == UCT_PERF_DATA_LAYOUT_ZCOPY) ? zcopy_f :
0;
}
static inline uint64_t __get_atomic_flag(size_t size, uint64_t flag32, uint64_t flag64)
{
return (size == 4) ? flag32 :
(size == 8) ? flag64 :
0;
}
static inline size_t __get_max_size(uct_perf_data_layout_t layout, size_t short_m,
size_t bcopy_m, uint64_t zcopy_m)
{
return (layout == UCT_PERF_DATA_LAYOUT_SHORT) ? short_m :
(layout == UCT_PERF_DATA_LAYOUT_BCOPY) ? bcopy_m :
(layout == UCT_PERF_DATA_LAYOUT_ZCOPY) ? zcopy_m :
0;
}
static ucs_status_t uct_perf_test_check_capabilities(ucx_perf_params_t *params,
uct_iface_h iface)
{
uct_iface_attr_t attr;
ucs_status_t status;
uint64_t required_flags;
size_t min_size, max_size, max_iov, message_size;
status = uct_iface_query(iface, &attr);
if (status != UCS_OK) {
return status;
}
min_size = 0;
max_iov = 1;
message_size = ucx_perf_get_message_size(params);
switch (params->command) {
case UCX_PERF_CMD_AM:
required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_AM_SHORT,
UCT_IFACE_FLAG_AM_BCOPY, UCT_IFACE_FLAG_AM_ZCOPY);
required_flags |= UCT_IFACE_FLAG_CB_SYNC;
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.am.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, attr.cap.am.max_short,
attr.cap.am.max_bcopy, attr.cap.am.max_zcopy);
max_iov = attr.cap.am.max_iov;
break;
case UCX_PERF_CMD_PUT:
required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_PUT_SHORT,
UCT_IFACE_FLAG_PUT_BCOPY, UCT_IFACE_FLAG_PUT_ZCOPY);
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.put.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, attr.cap.put.max_short,
attr.cap.put.max_bcopy, attr.cap.put.max_zcopy);
max_iov = attr.cap.put.max_iov;
break;
case UCX_PERF_CMD_GET:
required_flags = __get_flag(params->uct.data_layout, 0,
UCT_IFACE_FLAG_GET_BCOPY, UCT_IFACE_FLAG_GET_ZCOPY);
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.get.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, 0,
attr.cap.get.max_bcopy, attr.cap.get.max_zcopy);
max_iov = attr.cap.get.max_iov;
break;
case UCX_PERF_CMD_ADD:
required_flags = __get_atomic_flag(message_size, UCT_IFACE_FLAG_ATOMIC_ADD32,
UCT_IFACE_FLAG_ATOMIC_ADD64);
max_size = 8;
break;
case UCX_PERF_CMD_FADD:
required_flags = __get_atomic_flag(message_size, UCT_IFACE_FLAG_ATOMIC_FADD32,
UCT_IFACE_FLAG_ATOMIC_FADD64);
max_size = 8;
break;
case UCX_PERF_CMD_SWAP:
required_flags = __get_atomic_flag(message_size, UCT_IFACE_FLAG_ATOMIC_SWAP32,
UCT_IFACE_FLAG_ATOMIC_SWAP64);
max_size = 8;
break;
case UCX_PERF_CMD_CSWAP:
required_flags = __get_atomic_flag(message_size, UCT_IFACE_FLAG_ATOMIC_CSWAP32,
UCT_IFACE_FLAG_ATOMIC_CSWAP64);
max_size = 8;
break;
default:
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Invalid test command");
}
return UCS_ERR_INVALID_PARAM;
}
status = ucx_perf_test_check_params(params);
if (status != UCS_OK) {
return status;
}
if (!ucs_test_all_flags(attr.cap.flags, required_flags) || !required_flags) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Device does not support required operation");
}
return UCS_ERR_UNSUPPORTED;
}
if (message_size < min_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size too small");
}
return UCS_ERR_UNSUPPORTED;
}
if (message_size > max_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size too big");
}
return UCS_ERR_UNSUPPORTED;
}
if (params->command == UCX_PERF_CMD_AM) {
if ((params->uct.data_layout == UCT_PERF_DATA_LAYOUT_SHORT) &&
(params->am_hdr_size != sizeof(uint64_t)))
{
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Short AM header size must be 8 bytes");
}
return UCS_ERR_INVALID_PARAM;
}
if ((params->uct.data_layout == UCT_PERF_DATA_LAYOUT_ZCOPY) &&
(params->am_hdr_size > attr.cap.am.max_hdr))
{
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size too big");
}
return UCS_ERR_UNSUPPORTED;
}
if (params->am_hdr_size > message_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size larger than message size");
}
return UCS_ERR_INVALID_PARAM;
}
if (params->uct.fc_window > UCT_PERF_TEST_MAX_FC_WINDOW) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM flow-control window too large (should be <= %d)",
UCT_PERF_TEST_MAX_FC_WINDOW);
}
return UCS_ERR_INVALID_PARAM;
}
if ((params->flags & UCX_PERF_TEST_FLAG_ONE_SIDED) &&
(params->flags & UCX_PERF_TEST_FLAG_VERBOSE))
{
ucs_warn("Running active-message test with on-sided progress");
}
}
if (UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) {
if (params->msg_size_cnt > max_iov) {
if ((params->flags & UCX_PERF_TEST_FLAG_VERBOSE) ||
!params->msg_size_cnt) {
ucs_error("Wrong number of IOV entries. Requested is %lu, "
"should be in the range 1...%lu", params->msg_size_cnt,
max_iov);
}
return UCS_ERR_UNSUPPORTED;
}
/* if msg_size_cnt == 1 the message size checked above */
if ((UCX_PERF_CMD_AM == params->command) && (params->msg_size_cnt > 1)) {
if (params->am_hdr_size > params->msg_size_list[0]) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size (%lu) larger than the first IOV "
"message size (%lu)", params->am_hdr_size,
params->msg_size_list[0]);
}
return UCS_ERR_INVALID_PARAM;
}
}
}
return UCS_OK;
}
static ucs_status_t uct_perf_test_setup_endpoints(ucx_perf_context_t *perf)
{
const size_t buffer_size = 2048;
ucx_perf_ep_info_t info, *remote_info;
unsigned group_size, i, group_index;
uct_device_addr_t *dev_addr;
uct_iface_addr_t *iface_addr;
uct_ep_addr_t *ep_addr;
uct_iface_attr_t iface_attr;
uct_md_attr_t md_attr;
void *rkey_buffer;
ucs_status_t status;
struct iovec vec[5];
void *buffer;
void *req;
buffer = malloc(buffer_size);
if (buffer == NULL) {
ucs_error("Failed to allocate RTE buffer");
status = UCS_ERR_NO_MEMORY;
goto err;
}
status = uct_iface_query(perf->uct.iface, &iface_attr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_query: %s", ucs_status_string(status));
goto err_free;
}
status = uct_md_query(perf->uct.md, &md_attr);
if (status != UCS_OK) {
ucs_error("Failed to uct_md_query: %s", ucs_status_string(status));
goto err_free;
}
if (md_attr.cap.flags & (UCT_MD_FLAG_ALLOC|UCT_MD_FLAG_REG)) {
info.rkey_size = md_attr.rkey_packed_size;
} else {
info.rkey_size = 0;
}
info.uct.dev_addr_len = iface_attr.device_addr_len;
info.uct.iface_addr_len = iface_attr.iface_addr_len;
info.uct.ep_addr_len = iface_attr.ep_addr_len;
info.recv_buffer = (uintptr_t)perf->recv_buffer;
rkey_buffer = buffer;
dev_addr = (void*)rkey_buffer + info.rkey_size;
iface_addr = (void*)dev_addr + info.uct.dev_addr_len;
ep_addr = (void*)iface_addr + info.uct.iface_addr_len;
ucs_assert_always((void*)ep_addr + info.uct.ep_addr_len <= buffer + buffer_size);
status = uct_iface_get_device_address(perf->uct.iface, dev_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_get_device_address: %s",
ucs_status_string(status));
goto err_free;
}
status = uct_iface_get_address(perf->uct.iface, iface_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_get_address: %s", ucs_status_string(status));
goto err_free;
}
if (info.rkey_size > 0) {
status = uct_md_mkey_pack(perf->uct.md, perf->uct.recv_mem.memh, rkey_buffer);
if (status != UCS_OK) {
ucs_error("Failed to uct_rkey_pack: %s", ucs_status_string(status));
goto err_free;
}
}
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
perf->uct.peers = calloc(group_size, sizeof(*perf->uct.peers));
if (perf->uct.peers == NULL) {
goto err_free;
}
if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) {
for (i = 0; i < group_size; ++i) {
if (i == group_index) {
continue;
}
status = uct_ep_create(perf->uct.iface, &perf->uct.peers[i].ep);
if (status != UCS_OK) {
ucs_error("Failed to uct_ep_create: %s", ucs_status_string(status));
goto err_destroy_eps;
}
status = uct_ep_get_address(perf->uct.peers[i].ep, ep_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_ep_get_address: %s", ucs_status_string(status));
goto err_destroy_eps;
}
}
}
vec[0].iov_base = &info;
vec[0].iov_len = sizeof(info);
vec[1].iov_base = buffer;
vec[1].iov_len = info.rkey_size + info.uct.dev_addr_len +
info.uct.iface_addr_len + info.uct.ep_addr_len;
rte_call(perf, post_vec, vec, 2, &req);
rte_call(perf, exchange_vec, req);
for (i = 0; i < group_size; ++i) {
if (i == group_index) {
continue;
}
rte_call(perf, recv, i, buffer, buffer_size, req);
remote_info = buffer;
rkey_buffer = remote_info + 1;
dev_addr = (void*)rkey_buffer + remote_info->rkey_size;
iface_addr = (void*)dev_addr + remote_info->uct.dev_addr_len;
ep_addr = (void*)iface_addr + remote_info->uct.iface_addr_len;
perf->uct.peers[i].remote_addr = remote_info->recv_buffer;
if (!uct_iface_is_reachable(perf->uct.iface, dev_addr,
remote_info->uct.iface_addr_len ?
iface_addr : NULL)) {
ucs_error("Destination is unreachable");
status = UCS_ERR_UNREACHABLE;
goto err_destroy_eps;
}
if (remote_info->rkey_size > 0) {
status = uct_rkey_unpack(rkey_buffer, &perf->uct.peers[i].rkey);
if (status != UCS_OK) {
ucs_error("Failed to uct_rkey_unpack: %s", ucs_status_string(status));
goto err_destroy_eps;
}
} else {
perf->uct.peers[i].rkey.handle = NULL;
perf->uct.peers[i].rkey.type = NULL;
perf->uct.peers[i].rkey.rkey = UCT_INVALID_RKEY;
}
if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) {
status = uct_ep_connect_to_ep(perf->uct.peers[i].ep, dev_addr, ep_addr);
} else if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) {
status = uct_ep_create_connected(perf->uct.iface, dev_addr, iface_addr,
&perf->uct.peers[i].ep);
} else {
status = UCS_ERR_UNSUPPORTED;
}
if (status != UCS_OK) {
ucs_error("Failed to connect endpoint: %s", ucs_status_string(status));
goto err_destroy_eps;
}
}
uct_perf_iface_flush_b(perf);
free(buffer);
rte_call(perf, barrier);
return UCS_OK;
err_destroy_eps:
for (i = 0; i < group_size; ++i) {
if (perf->uct.peers[i].rkey.type != NULL) {
uct_rkey_release(&perf->uct.peers[i].rkey);
}
if (perf->uct.peers[i].ep != NULL) {
uct_ep_destroy(perf->uct.peers[i].ep);
}
}
free(perf->uct.peers);
err_free:
free(buffer);
err:
return status;
}
static void uct_perf_test_cleanup_endpoints(ucx_perf_context_t *perf)
{
unsigned group_size, group_index, i;
rte_call(perf, barrier);
uct_iface_set_am_handler(perf->uct.iface, UCT_PERF_TEST_AM_ID, NULL, NULL, UCT_CB_FLAG_SYNC);
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
for (i = 0; i < group_size; ++i) {
if (i != group_index) {
if (perf->uct.peers[i].rkey.rkey != UCT_INVALID_RKEY) {
uct_rkey_release(&perf->uct.peers[i].rkey);
}
if (perf->uct.peers[i].ep) {
uct_ep_destroy(perf->uct.peers[i].ep);
}
}
}
free(perf->uct.peers);
}
static ucs_status_t ucp_perf_test_fill_params(ucx_perf_params_t *params,
ucp_params_t *ucp_params)
{
ucs_status_t status, message_size;
message_size = ucx_perf_get_message_size(params);
switch (params->command) {
case UCX_PERF_CMD_PUT:
case UCX_PERF_CMD_GET:
ucp_params->features |= UCP_FEATURE_RMA;
break;
case UCX_PERF_CMD_ADD:
case UCX_PERF_CMD_FADD:
case UCX_PERF_CMD_SWAP:
case UCX_PERF_CMD_CSWAP:
if (message_size == sizeof(uint32_t)) {
ucp_params->features |= UCP_FEATURE_AMO32;
} else if (message_size == sizeof(uint64_t)) {
ucp_params->features |= UCP_FEATURE_AMO64;
} else {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Atomic size should be either 32 or 64 bit");
}
return UCS_ERR_INVALID_PARAM;
}
break;
case UCX_PERF_CMD_TAG:
case UCX_PERF_CMD_TAG_SYNC:
ucp_params->features |= UCP_FEATURE_TAG;
ucp_params->field_mask |= UCP_PARAM_FIELD_REQUEST_SIZE;
ucp_params->request_size = sizeof(ucp_perf_request_t);
break;
case UCX_PERF_CMD_STREAM:
ucp_params->features |= UCP_FEATURE_STREAM;
ucp_params->field_mask |= UCP_PARAM_FIELD_REQUEST_SIZE;
ucp_params->request_size = sizeof(ucp_perf_request_t);
break;
default:
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Invalid test command");
}
return UCS_ERR_INVALID_PARAM;
}
status = ucx_perf_test_check_params(params);
if (status != UCS_OK) {
return status;
}
return UCS_OK;
}
static ucs_status_t ucp_perf_test_alloc_iov_mem(ucp_perf_datatype_t datatype,
size_t iovcnt, unsigned thread_count,
ucp_dt_iov_t **iov_p)
{
ucp_dt_iov_t *iov;
if (UCP_PERF_DATATYPE_IOV == datatype) {
iov = malloc(sizeof(*iov) * iovcnt * thread_count);
if (NULL == iov) {
ucs_error("Failed allocate IOV buffer with iovcnt=%lu", iovcnt);
return UCS_ERR_NO_MEMORY;
}
*iov_p = iov;
}
return UCS_OK;
}
static ucs_status_t
ucp_perf_test_alloc_host(ucx_perf_context_t *perf, ucx_perf_params_t *params,
void **addr, size_t length, ucp_mem_h *memh,
int check_non_blk_flag)
{
ucp_mem_map_params_t mem_map_params;
ucp_mem_attr_t mem_attr;
ucs_status_t status;
mem_map_params.field_mask = UCP_MEM_MAP_PARAM_FIELD_ADDRESS |
UCP_MEM_MAP_PARAM_FIELD_LENGTH |
UCP_MEM_MAP_PARAM_FIELD_FLAGS;
mem_map_params.address = *addr;
mem_map_params.length = length;
mem_map_params.flags = UCP_MEM_MAP_ALLOCATE;
if (check_non_blk_flag) {
mem_map_params.flags |= (params->flags & UCX_PERF_TEST_FLAG_MAP_NONBLOCK) ?
UCP_MEM_MAP_NONBLOCK : 0;
}
status = ucp_mem_map(perf->ucp.context, &mem_map_params, memh);
if (status != UCS_OK) {
goto err;
}
mem_attr.field_mask = UCP_MEM_ATTR_FIELD_ADDRESS;
status = ucp_mem_query(*memh, &mem_attr);
if (status != UCS_OK) {
goto err;
}
*addr = mem_attr.address;
return UCS_OK;
err:
return status;
}
static ucs_status_t
ucp_perf_test_alloc_cuda(void **addr, size_t length)
{
#if HAVE_CUDA
cudaError_t cerr;
cerr = cudaMalloc(addr, length);
if (cerr != cudaSuccess) {
return UCS_ERR_NO_MEMORY;
}
#endif
return UCS_OK;
}
static ucs_status_t
ucp_perf_test_alloc_contig(ucx_perf_context_t *perf, ucx_perf_params_t *params,
void **addr, size_t length, ucp_mem_h *memh,
int check_non_blk_flag)
{
if (perf->params.mem_type == UCT_MD_MEM_TYPE_HOST) {
return ucp_perf_test_alloc_host(perf, params, addr, length, memh,
check_non_blk_flag);
} else if (perf->params.mem_type == UCT_MD_MEM_TYPE_CUDA) {
return ucp_perf_test_alloc_cuda(addr, length);
}
return UCS_ERR_UNSUPPORTED;
}
static void ucp_perf_test_free_contig(ucx_perf_context_t *perf, void *addr, ucp_mem_h memh)
{
if (perf->params.mem_type == UCT_MD_MEM_TYPE_HOST) {
ucp_mem_unmap(perf->ucp.context, memh);
} else if (perf->params.mem_type == UCT_MD_MEM_TYPE_CUDA) {
#if HAVE_CUDA
cudaFree(addr);
#endif
}
}
static ucs_status_t ucp_perf_test_alloc_mem(ucx_perf_context_t *perf, ucx_perf_params_t *params)
{
ucs_status_t status;
size_t buffer_size;
if (params->iov_stride) {
buffer_size = params->msg_size_cnt * params->iov_stride;
} else {
buffer_size = ucx_perf_get_message_size(params);
}
/* Allocate send buffer memory */
perf->send_buffer = NULL;
status = ucp_perf_test_alloc_contig(perf, params, &perf->send_buffer,
buffer_size * params->thread_count,
&perf->ucp.send_memh, 1);
if (status != UCS_OK) {
goto err;
}
/* Allocate receive buffer memory */
perf->recv_buffer = NULL;
status = ucp_perf_test_alloc_contig(perf, params, &perf->recv_buffer,
buffer_size * params->thread_count,
&perf->ucp.recv_memh, 0);
if (status != UCS_OK) {
goto err_free_send_buffer;
}
/* Allocate IOV datatype memory */
perf->params.msg_size_cnt = params->msg_size_cnt;
perf->ucp.send_iov = NULL;
status = ucp_perf_test_alloc_iov_mem(params->ucp.send_datatype, perf->params.msg_size_cnt,
params->thread_count, &perf->ucp.send_iov);
if (UCS_OK != status) {
goto err_free_buffers;
}
perf->ucp.recv_iov = NULL;
status = ucp_perf_test_alloc_iov_mem(params->ucp.recv_datatype, perf->params.msg_size_cnt,
params->thread_count, &perf->ucp.recv_iov);
if (UCS_OK != status) {
goto err_free_send_iov_buffers;
}
return UCS_OK;
err_free_send_iov_buffers:
free(perf->ucp.send_iov);
err_free_buffers:
ucp_perf_test_free_contig(perf, perf->recv_buffer, perf->ucp.recv_memh);
err_free_send_buffer:
ucp_perf_test_free_contig(perf, perf->send_buffer, perf->ucp.send_memh);
err:
return UCS_ERR_NO_MEMORY;
}
static void ucp_perf_test_free_mem(ucx_perf_context_t *perf)
{
free(perf->ucp.recv_iov);
free(perf->ucp.send_iov);
ucp_perf_test_free_contig(perf, perf->recv_buffer, perf->ucp.recv_memh);
ucp_perf_test_free_contig(perf, perf->send_buffer, perf->ucp.send_memh);
}
static void ucp_perf_test_destroy_eps(ucx_perf_context_t* perf,
unsigned group_size)
{
ucs_status_ptr_t *reqs;
ucp_tag_recv_info_t info;
ucs_status_t status;
unsigned i;
reqs = calloc(sizeof(*reqs), group_size);
for (i = 0; i < group_size; ++i) {
if (perf->ucp.peers[i].rkey != NULL) {
ucp_rkey_destroy(perf->ucp.peers[i].rkey);
}
if (perf->ucp.peers[i].ep != NULL) {
reqs[i] = ucp_disconnect_nb(perf->ucp.peers[i].ep);
}
}
for (i = 0; i < group_size; ++i) {
if (!UCS_PTR_IS_PTR(reqs[i])) {
continue;
}
do {
ucp_worker_progress(perf->ucp.worker);
status = ucp_request_test(reqs[i], &info);
} while (status == UCS_INPROGRESS);
ucp_request_release(reqs[i]);
}
free(reqs);
free(perf->ucp.peers);
}
static ucs_status_t ucp_perf_test_exchange_status(ucx_perf_context_t *perf,
ucs_status_t status)
{
unsigned group_size = rte_call(perf, group_size);
ucs_status_t collective_status = UCS_OK;
struct iovec vec;
void *req = NULL;
unsigned i;
vec.iov_base = &status;
vec.iov_len = sizeof(status);
rte_call(perf, post_vec, &vec, 1, &req);
rte_call(perf, exchange_vec, req);
for (i = 0; i < group_size; ++i) {
rte_call(perf, recv, i, &status, sizeof(status), req);
if (status != UCS_OK) {
collective_status = status;
}
}
return collective_status;
}
static ucs_status_t ucp_perf_test_setup_endpoints(ucx_perf_context_t *perf,
uint64_t features)
{
const size_t buffer_size = 2048;
ucx_perf_ep_info_t info, *remote_info;
unsigned group_size, i, group_index;
ucp_address_t *address;
size_t address_length = 0;
ucp_ep_params_t ep_params;
ucs_status_t status;
struct iovec vec[3];
void *rkey_buffer;
void *req = NULL;
void *buffer;
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
status = ucp_worker_get_address(perf->ucp.worker, &address, &address_length);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_worker_get_address() failed: %s", ucs_status_string(status));
}
goto err;
}
info.ucp.addr_len = address_length;
info.recv_buffer = (uintptr_t)perf->recv_buffer;
vec[0].iov_base = &info;
vec[0].iov_len = sizeof(info);
vec[1].iov_base = address;
vec[1].iov_len = address_length;
if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) {
status = ucp_rkey_pack(perf->ucp.context, perf->ucp.recv_memh,
&rkey_buffer, &info.rkey_size);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_rkey_pack() failed: %s", ucs_status_string(status));
}
ucp_worker_release_address(perf->ucp.worker, address);
goto err;
}
vec[2].iov_base = rkey_buffer;
vec[2].iov_len = info.rkey_size;
rte_call(perf, post_vec, vec, 3, &req);
ucp_rkey_buffer_release(rkey_buffer);
} else {
info.rkey_size = 0;
rte_call(perf, post_vec, vec, 2, &req);
}
ucp_worker_release_address(perf->ucp.worker, address);
rte_call(perf, exchange_vec, req);
perf->ucp.peers = calloc(group_size, sizeof(*perf->uct.peers));
if (perf->ucp.peers == NULL) {
goto err;
}
buffer = malloc(buffer_size);
if (buffer == NULL) {
ucs_error("Failed to allocate RTE receive buffer");
status = UCS_ERR_NO_MEMORY;
goto err_destroy_eps;
}
for (i = 0; i < group_size; ++i) {
if (i == group_index) {
continue;
}
rte_call(perf, recv, i, buffer, buffer_size, req);
remote_info = buffer;
address = (void*)(remote_info + 1);
rkey_buffer = (void*)address + remote_info->ucp.addr_len;
perf->ucp.peers[i].remote_addr = remote_info->recv_buffer;
ep_params.field_mask = UCP_EP_PARAM_FIELD_REMOTE_ADDRESS;
ep_params.address = address;
status = ucp_ep_create(perf->ucp.worker, &ep_params, &perf->ucp.peers[i].ep);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_ep_create() failed: %s", ucs_status_string(status));
}
goto err_free_buffer;
}
if (remote_info->rkey_size > 0) {
status = ucp_ep_rkey_unpack(perf->ucp.peers[i].ep, rkey_buffer,
&perf->ucp.peers[i].rkey);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_fatal("ucp_rkey_unpack() failed: %s", ucs_status_string(status));
}
goto err_free_buffer;
}
} else {
perf->ucp.peers[i].rkey = NULL;
}
}
free(buffer);
status = ucp_perf_test_exchange_status(perf, UCS_OK);
if (status != UCS_OK) {
ucp_perf_test_destroy_eps(perf, group_size);
}
return status;
err_free_buffer:
free(buffer);
err_destroy_eps:
ucp_perf_test_destroy_eps(perf, group_size);
err:
(void)ucp_perf_test_exchange_status(perf, status);
return status;
}
static void ucp_perf_test_cleanup_endpoints(ucx_perf_context_t *perf)
{
unsigned group_size;
rte_call(perf, barrier);
group_size = rte_call(perf, group_size);
ucp_perf_test_destroy_eps(perf, group_size);
}
static void ucx_perf_set_warmup(ucx_perf_context_t* perf, ucx_perf_params_t* params)
{
perf->max_iter = ucs_min(params->warmup_iter, params->max_iter / 10);
perf->report_interval = -1;
}
static ucs_status_t uct_perf_create_md(ucx_perf_context_t *perf)
{
uct_md_resource_desc_t *md_resources;
uct_tl_resource_desc_t *tl_resources;
unsigned i, num_md_resources;
unsigned j, num_tl_resources;
ucs_status_t status;
uct_md_h md;
uct_md_config_t *md_config;
status = uct_query_md_resources(&md_resources, &num_md_resources);
if (status != UCS_OK) {
goto out;
}
for (i = 0; i < num_md_resources; ++i) {
status = uct_md_config_read(md_resources[i].md_name, NULL, NULL, &md_config);
if (status != UCS_OK) {
goto out_release_md_resources;
}
status = uct_md_open(md_resources[i].md_name, md_config, &md);
uct_config_release(md_config);
if (status != UCS_OK) {
goto out_release_md_resources;
}
status = uct_md_query_tl_resources(md, &tl_resources, &num_tl_resources);
if (status != UCS_OK) {
uct_md_close(md);
goto out_release_md_resources;
}
for (j = 0; j < num_tl_resources; ++j) {
if (!strcmp(perf->params.uct.tl_name, tl_resources[j].tl_name) &&
!strcmp(perf->params.uct.dev_name, tl_resources[j].dev_name))
{
uct_release_tl_resource_list(tl_resources);
perf->uct.md = md;
status = UCS_OK;
goto out_release_md_resources;
}
}
uct_md_close(md);
uct_release_tl_resource_list(tl_resources);
}
ucs_error("Cannot use transport %s on device %s", perf->params.uct.tl_name,
perf->params.uct.dev_name);
status = UCS_ERR_NO_DEVICE;
out_release_md_resources:
uct_release_md_resource_list(md_resources);
out:
return status;
}
static ucs_status_t uct_perf_setup(ucx_perf_context_t *perf, ucx_perf_params_t *params)
{
uct_iface_config_t *iface_config;
ucs_status_t status;
uct_iface_params_t iface_params = {
.open_mode = UCT_IFACE_OPEN_MODE_DEVICE,
.mode.device.tl_name = params->uct.tl_name,
.mode.device.dev_name = params->uct.dev_name,
.stats_root = ucs_stats_get_root(),
.rx_headroom = 0
};
UCS_CPU_ZERO(&iface_params.cpu_mask);
status = ucs_async_context_init(&perf->uct.async, params->async_mode);
if (status != UCS_OK) {
goto out;
}
status = uct_worker_create(&perf->uct.async, params->thread_mode,
&perf->uct.worker);
if (status != UCS_OK) {
goto out_cleanup_async;
}
status = uct_perf_create_md(perf);
if (status != UCS_OK) {
goto out_destroy_worker;
}
status = uct_md_iface_config_read(perf->uct.md, params->uct.tl_name, NULL,
NULL, &iface_config);
if (status != UCS_OK) {
goto out_destroy_md;
}
status = uct_iface_open(perf->uct.md, perf->uct.worker, &iface_params,
iface_config, &perf->uct.iface);
uct_config_release(iface_config);
if (status != UCS_OK) {
ucs_error("Failed to open iface: %s", ucs_status_string(status));
goto out_destroy_md;
}
status = uct_perf_test_check_capabilities(params, perf->uct.iface);
if (status != UCS_OK) {
goto out_iface_close;
}
status = uct_perf_test_alloc_mem(perf, params);
if (status != UCS_OK) {
goto out_iface_close;
}
status = uct_perf_test_setup_endpoints(perf);
if (status != UCS_OK) {
ucs_error("Failed to setup endpoints: %s", ucs_status_string(status));
goto out_free_mem;
}
uct_iface_progress_enable(perf->uct.iface,
UCT_PROGRESS_SEND | UCT_PROGRESS_RECV);
return UCS_OK;
out_free_mem:
uct_perf_test_free_mem(perf);
out_iface_close:
uct_iface_close(perf->uct.iface);
out_destroy_md:
uct_md_close(perf->uct.md);
out_destroy_worker:
uct_worker_destroy(perf->uct.worker);
out_cleanup_async:
ucs_async_context_cleanup(&perf->uct.async);
out:
return status;
}
static void uct_perf_cleanup(ucx_perf_context_t *perf)
{
uct_perf_test_cleanup_endpoints(perf);
uct_perf_test_free_mem(perf);
uct_iface_close(perf->uct.iface);
uct_md_close(perf->uct.md);
uct_worker_destroy(perf->uct.worker);
ucs_async_context_cleanup(&perf->uct.async);
}
static ucs_status_t ucp_perf_setup(ucx_perf_context_t *perf,
ucx_perf_params_t *params)
{
ucp_params_t ucp_params;
ucp_worker_params_t worker_params;
ucp_config_t *config;
ucs_status_t status;
ucp_params.field_mask = UCP_PARAM_FIELD_FEATURES;
ucp_params.features = 0;
status = ucp_perf_test_fill_params(params, &ucp_params);
if (status != UCS_OK) {
goto err;
}
status = ucp_config_read(NULL, NULL, &config);
if (status != UCS_OK) {
goto err;
}
status = ucp_init(&ucp_params, config, &perf->ucp.context);
ucp_config_release(config);
if (status != UCS_OK) {
goto err;
}
worker_params.field_mask = UCP_WORKER_PARAM_FIELD_THREAD_MODE;
worker_params.thread_mode = params->thread_mode;
status = ucp_worker_create(perf->ucp.context, &worker_params,
&perf->ucp.worker);
if (status != UCS_OK) {
goto err_cleanup;
}
status = ucp_perf_test_alloc_mem(perf, params);
if (status != UCS_OK) {
ucs_warn("ucp test failed to alocate memory");
goto err_destroy_worker;
}
status = ucp_perf_test_setup_endpoints(perf, ucp_params.features);
if (status != UCS_OK) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Failed to setup endpoints: %s", ucs_status_string(status));
}
goto err_free_mem;
}
return UCS_OK;
err_free_mem:
ucp_perf_test_free_mem(perf);
err_destroy_worker:
ucp_worker_destroy(perf->ucp.worker);
err_cleanup:
ucp_cleanup(perf->ucp.context);
err:
return status;
}
static void ucp_perf_cleanup(ucx_perf_context_t *perf)
{
ucp_perf_test_cleanup_endpoints(perf);
rte_call(perf, barrier);
ucp_perf_test_free_mem(perf);
ucp_worker_destroy(perf->ucp.worker);
ucp_cleanup(perf->ucp.context);
}
static struct {
ucs_status_t (*setup)(ucx_perf_context_t *perf, ucx_perf_params_t *params);
void (*cleanup)(ucx_perf_context_t *perf);
ucs_status_t (*run)(ucx_perf_context_t *perf);
} ucx_perf_funcs[] = {
[UCX_PERF_API_UCT] = {uct_perf_setup, uct_perf_cleanup, uct_perf_test_dispatch},
[UCX_PERF_API_UCP] = {ucp_perf_setup, ucp_perf_cleanup, ucp_perf_test_dispatch}
};
static int ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result);
ucs_status_t ucx_perf_run(ucx_perf_params_t *params, ucx_perf_result_t *result)
{
ucx_perf_context_t *perf;
ucs_status_t status;
if (params->command == UCX_PERF_CMD_LAST) {
ucs_error("Test is not selected");
status = UCS_ERR_INVALID_PARAM;
goto out;
}
if ((params->api != UCX_PERF_API_UCT) && (params->api != UCX_PERF_API_UCP)) {
ucs_error("Invalid test API parameter (should be UCT or UCP)");
status = UCS_ERR_INVALID_PARAM;
goto out;
}
perf = malloc(sizeof(*perf));
if (perf == NULL) {
status = UCS_ERR_NO_MEMORY;
goto out;
}
ucx_perf_test_reset(perf, params);
status = ucx_perf_funcs[params->api].setup(perf, params);
if (status != UCS_OK) {
goto out_free;
}
if (UCS_THREAD_MODE_SINGLE == params->thread_mode) {
if (params->warmup_iter > 0) {
ucx_perf_set_warmup(perf, params);
status = ucx_perf_funcs[params->api].run(perf);
if (status != UCS_OK) {
goto out_cleanup;
}
rte_call(perf, barrier);
ucx_perf_test_reset(perf, params);
}
/* Run test */
status = ucx_perf_funcs[params->api].run(perf);
rte_call(perf, barrier);
if (status == UCS_OK) {
ucx_perf_calc_result(perf, result);
rte_call(perf, report, result, perf->params.report_arg, 1);
}
} else {
status = ucx_perf_thread_spawn(perf, result);
}
out_cleanup:
ucx_perf_funcs[params->api].cleanup(perf);
out_free:
free(perf);
out:
return status;
}
#if _OPENMP
/* multiple threads sharing the same worker/iface */
#include <omp.h>
typedef struct {
pthread_t pt;
int tid;
int ntid;
ucs_status_t* statuses;
ucx_perf_context_t perf;
ucx_perf_result_t result;
} ucx_perf_thread_context_t;
static void* ucx_perf_thread_run_test(void* arg)
{
ucx_perf_thread_context_t* tctx = (ucx_perf_thread_context_t*) arg;
ucx_perf_result_t* result = &tctx->result;
ucx_perf_context_t* perf = &tctx->perf;
ucx_perf_params_t* params = &perf->params;
ucs_status_t* statuses = tctx->statuses;
int tid = tctx->tid;
int i;
if (params->warmup_iter > 0) {
ucx_perf_set_warmup(perf, params);
statuses[tid] = ucx_perf_funcs[params->api].run(perf);
rte_call(perf, barrier);
for (i = 0; i < tctx->ntid; i++) {
if (UCS_OK != statuses[i]) {
goto out;
}
}
#pragma omp master
ucx_perf_test_reset(perf, params);
}
/* Run test */
#pragma omp barrier
statuses[tid] = ucx_perf_funcs[params->api].run(perf);
rte_call(perf, barrier);
for (i = 0; i < tctx->ntid; i++) {
if (UCS_OK != statuses[i]) {
goto out;
}
}
#pragma omp master
{
/* Assuming all threads are fairly treated, reporting only tid==0
TODO: aggregate reports */
ucx_perf_calc_result(perf, result);
rte_call(perf, report, result, perf->params.report_arg, 1);
}
out:
return &statuses[tid];
}
static int ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result)
{
ucx_perf_thread_context_t* tctx;
ucs_status_t* statuses;
size_t message_size;
ucs_status_t status;
int ti, nti;
message_size = ucx_perf_get_message_size(&perf->params);
omp_set_num_threads(perf->params.thread_count);
nti = perf->params.thread_count;
tctx = calloc(nti, sizeof(ucx_perf_thread_context_t));
statuses = calloc(nti, sizeof(ucs_status_t));
if ((tctx == NULL) || (statuses == NULL)) {
status = UCS_ERR_NO_MEMORY;
goto out_free;
}
#pragma omp parallel private(ti)
{
ti = omp_get_thread_num();
tctx[ti].tid = ti;
tctx[ti].ntid = nti;
tctx[ti].statuses = statuses;
tctx[ti].perf = *perf;
/* Doctor the src and dst buffers to make them thread specific */
tctx[ti].perf.send_buffer += ti * message_size;
tctx[ti].perf.recv_buffer += ti * message_size;
tctx[ti].perf.offset = ti * message_size;
ucx_perf_thread_run_test((void*)&tctx[ti]);
}
status = UCS_OK;
for (ti = 0; ti < nti; ti++) {
if (UCS_OK != statuses[ti]) {
ucs_error("Thread %d failed to run test: %s", tctx[ti].tid,
ucs_status_string(statuses[ti]));
status = statuses[ti];
}
}
out_free:
free(statuses);
free(tctx);
return status;
}
#else
static int ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result) {
ucs_error("Invalid test parameter (thread mode requested without OpenMP capabilities)");
return UCS_ERR_INVALID_PARAM;
}
#endif /* _OPENMP */
|
DRB050-functionparameter-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Arrays passed as function parameters
*/
void foo1(double o1[], double c[], int len)
{
int i ;
#pragma omp parallel for schedule(dynamic)
for (i = 0; i < len; ++i) {
double volnew_o8 = 0.5 * c[i];
o1[i] = volnew_o8;
}
}
double o1[100];
double c[100];
int main()
{
foo1 (o1, c, 100);
return 0;
}
|
DRB068-restrictpointer2-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
The restrict type qualifier is an indication to the compiler that,
if the memory addressed by the restrict -qualified pointer is modified, no other pointer will access that same memory.
If a particular chunk of memory is not modified, it can be aliased through more than one restricted pointer.
A C99 restrict feature.
For gcc, you must use -std=c99 to compile this program.
*/
#include <stdlib.h>
#include <stdio.h>
void init(int n, int * restrict a, int * restrict b, int * restrict c)
{
int i;
#pragma omp parallel for
for (i = 0; i < n; i++) {
a[i] = 1;
b[i] = i;
c[i] = i * i;
}
}
void foo(int n, int * restrict a, int * restrict b, int * restrict c)
{
int i;
#pragma omp parallel for
for (i = 0; i < n; i++)
a[i] = b[i] + c[i];
}
void print(int n, int * restrict a, int * restrict b, int * restrict c)
{
int i;
for (i = 0; i < n; i++) {
printf("%d %d %d\n", a[i], b[i], c[i]);
}
}
int main()
{
int n = 1000;
int * a , *b, *c;
a = (int*) malloc (n* sizeof (int));
if (a ==0)
{
fprintf (stderr, "skip the execution due to malloc failures.\n");
return 1;
}
b = (int*) malloc (n* sizeof (int));
if (b ==0)
{
fprintf (stderr, "skip the execution due to malloc failures.\n");
return 1;
}
c = (int*) malloc (n* sizeof (int));
if (c ==0)
{
fprintf (stderr, "skip the execution due to malloc failures.\n");
return 1;
}
init (n, a, b,c);
foo (n, a, b,c);
print (n, a, b,c);
free (a);
free (b);
free (c);
return 0;
}
|
Diffusion_core.c | /*
* This work is part of the Core Imaging Library developed by
* Visual Analytics and Imaging System Group of the Science Technology
* Facilities Council, STFC
*
* Copyright 2017 Daniil Kazantsev
* Copyright 2017 Srikanth Nagella, Edoardo Pasca
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "Diffusion_core.h"
#include "utils.h"
#define EPS 1.0e-5
#define MAX(x, y) (((x) > (y)) ? (x) : (y))
#define MIN(x, y) (((x) < (y)) ? (x) : (y))
/*sign function*/
int signNDFc(float x) {
return (x > 0) - (x < 0);
}
/* C-OMP implementation of linear and nonlinear diffusion with the regularisation model [1,2] (2D/3D case)
* The minimisation is performed using explicit scheme.
*
* Input Parameters:
* 1. Noisy image/volume
* 2. lambda - regularization parameter
* 3. Edge-preserving parameter (sigma), when sigma equals to zero nonlinear diffusion -> linear diffusion
* 4. Number of iterations, for explicit scheme >= 150 is recommended
* 5. tau - time-marching step for explicit scheme
* 6. Penalty type: 1 - Huber, 2 - Perona-Malik, 3 - Tukey Biweight, 4 - Threshold-constrained Linear, , 5 - modified Huber with a dead stop on edge
* 7. eplsilon - tolerance constant
*
* Output:
* [1] Filtered/regularized image/volume
* [2] Information vector which contains [iteration no., reached tolerance]
*
* This function is based on the paper by
* [1] Perona, P. and Malik, J., 1990. Scale-space and edge detection using anisotropic diffusion. IEEE Transactions on pattern analysis and machine intelligence, 12(7), pp.629-639.
* [2] Black, M.J., Sapiro, G., Marimont, D.H. and Heeger, D., 1998. Robust anisotropic diffusion. IEEE Transactions on image processing, 7(3), pp.421-432.
*/
float Diffusion_CPU_main(float *Input, float *Output, float *infovector, float lambdaPar, float sigmaPar, int iterationsNumb, float tau, int penaltytype, float epsil, int dimX, int dimY, int dimZ)
{
int i;
float sigmaPar2, *Output_prev=NULL;
sigmaPar2 = sigmaPar/sqrt(2.0f);
long j, DimTotal;
float re, re1;
re = 0.0f; re1 = 0.0f;
int count = 0;
DimTotal = (long)(dimX*dimY*dimZ);
if (epsil != 0.0f) Output_prev = calloc(DimTotal, sizeof(float));
/* copy into output */
copyIm(Input, Output, (long)(dimX), (long)(dimY), (long)(dimZ));
for(i=0; i < iterationsNumb; i++) {
if ((epsil != 0.0f) && (i % 5 == 0)) copyIm(Output, Output_prev, (long)(dimX), (long)(dimY), (long)(dimZ));
if (dimZ == 1) {
/* running 2D diffusion iterations */
if (sigmaPar == 0.0f) LinearDiff2D(Input, Output, lambdaPar, tau, (long)(dimX), (long)(dimY)); /* linear diffusion (heat equation) */
else NonLinearDiff2D(Input, Output, lambdaPar, sigmaPar2, tau, penaltytype, (long)(dimX), (long)(dimY)); /* nonlinear diffusion */
}
else {
/* running 3D diffusion iterations */
if (sigmaPar == 0.0f) LinearDiff3D(Input, Output, lambdaPar, tau, (long)(dimX), (long)(dimY), (long)(dimZ));
else NonLinearDiff3D(Input, Output, lambdaPar, sigmaPar2, tau, penaltytype, (long)(dimX), (long)(dimY), (long)(dimZ));
}
/* check early stopping criteria if epsilon not equal zero */
if ((epsil != 0.0f) && (i % 5 == 0)) {
re = 0.0f; re1 = 0.0f;
for(j=0; j<DimTotal; j++)
{
re += powf(Output[j] - Output_prev[j],2);
re1 += powf(Output[j],2);
}
re = sqrtf(re)/sqrtf(re1);
/* stop if the norm residual is less than the tolerance EPS */
if (re < epsil) count++;
if (count > 3) break;
}
}
free(Output_prev);
/*adding info into info_vector */
infovector[0] = (float)(i); /*iterations number (if stopped earlier based on tolerance)*/
infovector[1] = re; /* reached tolerance */
return 0;
}
/********************************************************************/
/***************************2D Functions*****************************/
/********************************************************************/
/* linear diffusion (heat equation) */
float LinearDiff2D(float *Input, float *Output, float lambdaPar, float tau, long dimX, long dimY)
{
long i,j,i1,i2,j1,j2,index;
float e,w,n,s,e1,w1,n1,s1;
#pragma omp parallel for shared(Input) private(index,i,j,i1,i2,j1,j2,e,w,n,s,e1,w1,n1,s1)
for(j=0; j<dimY; j++) {
/* symmetric boundary conditions (Neuman) */
j1 = j+1; if (j1 == dimY) j1 = j-1;
j2 = j-1; if (j2 < 0) j2 = j+1;
for(i=0; i<dimX; i++) {
/* symmetric boundary conditions (Neuman) */
i1 = i+1; if (i1 == dimX) i1 = i-1;
i2 = i-1; if (i2 < 0) i2 = i+1;
index = j*dimX+i;
e = Output[j*dimX+i1];
w = Output[j*dimX+i2];
n = Output[j1*dimX+i];
s = Output[j2*dimX+i];
e1 = e - Output[index];
w1 = w - Output[index];
n1 = n - Output[index];
s1 = s - Output[index];
Output[index] += tau*(lambdaPar*(e1 + w1 + n1 + s1) - (Output[index] - Input[index]));
}}
return *Output;
}
/* nonlinear diffusion */
float NonLinearDiff2D(float *Input, float *Output, float lambdaPar, float sigmaPar, float tau, int penaltytype, long dimX, long dimY)
{
long i,j,i1,i2,j1,j2,index;
float e,w,n,s,e1,w1,n1,s1;
#pragma omp parallel for shared(Input) private(index,i,j,i1,i2,j1,j2,e,w,n,s,e1,w1,n1,s1)
for(j=0; j<dimY; j++) {
/* symmetric boundary conditions (Neuman) */
j1 = j+1; if (j1 == dimY) j1 = j-1;
j2 = j-1; if (j2 < 0) j2 = j+1;
for(i=0; i<dimX; i++) {
/* symmetric boundary conditions (Neuman) */
i1 = i+1; if (i1 == dimX) i1 = i-1;
i2 = i-1; if (i2 < 0) i2 = i+1;
index = j*dimX+i;
e = Output[j*dimX+i1];
w = Output[j*dimX+i2];
n = Output[j1*dimX+i];
s = Output[j2*dimX+i];
e1 = e - Output[index];
w1 = w - Output[index];
n1 = n - Output[index];
s1 = s - Output[index];
if (penaltytype == 1){
/* Huber penalty */
if (fabs(e1) > sigmaPar) e1 = signNDFc(e1);
else e1 = e1/sigmaPar;
if (fabs(w1) > sigmaPar) w1 = signNDFc(w1);
else w1 = w1/sigmaPar;
if (fabs(n1) > sigmaPar) n1 = signNDFc(n1);
else n1 = n1/sigmaPar;
if (fabs(s1) > sigmaPar) s1 = signNDFc(s1);
else s1 = s1/sigmaPar;
}
else if (penaltytype == 2) {
/* Perona-Malik */
e1 /= (1.0f + powf((e1/sigmaPar),2));
w1 /= (1.0f + powf((w1/sigmaPar),2));
n1 /= (1.0f + powf((n1/sigmaPar),2));
s1 /= (1.0f + powf((s1/sigmaPar),2));
}
else if (penaltytype == 3) {
/* Tukey Biweight */
if (fabs(e1) <= sigmaPar) e1 = e1*powf((1.0f - powf((e1/sigmaPar),2)), 2);
else e1 = 0.0f;
if (fabs(w1) <= sigmaPar) w1 = w1*powf((1.0f - powf((w1/sigmaPar),2)), 2);
else w1 = 0.0f;
if (fabs(n1) <= sigmaPar) n1 = n1*powf((1.0f - powf((n1/sigmaPar),2)), 2);
else n1 = 0.0f;
if (fabs(s1) <= sigmaPar) s1 = s1*powf((1.0f - powf((s1/sigmaPar),2)), 2);
else s1 = 0.0f;
}
else if (penaltytype == 4) {
/* Threshold-constrained linear diffusion
This means that the linear diffusion will be performed on pixels with
absolute difference less than the threshold.
*/
if (fabs(e1) > sigmaPar) e1 = 0.0f;
if (fabs(w1) > sigmaPar) w1 = 0.0f;
if (fabs(n1) > sigmaPar) n1 = 0.0f;
if (fabs(s1) > sigmaPar) s1 = 0.0f;
}
else if (penaltytype == 5) {
/*
Threshold constrained Huber diffusion
*/
if (fabs(e1) <= 2.0f*sigmaPar) {
if (fabs(e1) > sigmaPar) e1 = signNDFc(e1);
else e1 = e1/sigmaPar; }
else e1 = 0.0f;
if (fabs(w1) <= 2.0f*sigmaPar) {
if (fabs(w1) > sigmaPar) w1 = signNDFc(w1);
else w1 = w1/sigmaPar; }
else w1 = 0.0f;
if (fabs(n1) <= 2.0f*sigmaPar) {
if (fabs(n1) > sigmaPar) n1 = signNDFc(n1);
else n1 = n1/sigmaPar; }
else n1 = 0.0f;
if (fabs(s1) <= 2.0f*sigmaPar) {
if (fabs(s1) > sigmaPar) s1 = signNDFc(s1);
else s1 = s1/sigmaPar; }
else s1 = 0.0f;
}
else {
printf("%s \n", "No penalty function selected! Use 1,2,3,4 or 5.");
break;
}
Output[index] += tau*(lambdaPar*(e1 + w1 + n1 + s1) - (Output[index] - Input[index]));
}}
return *Output;
}
/********************************************************************/
/***************************3D Functions*****************************/
/********************************************************************/
/* linear diffusion (heat equation) */
float LinearDiff3D(float *Input, float *Output, float lambdaPar, float tau, long dimX, long dimY, long dimZ)
{
long i,j,k,i1,i2,j1,j2,k1,k2,index;
float e,w,n,s,u,d,e1,w1,n1,s1,u1,d1;
#pragma omp parallel for shared(Input) private(index,i,j,i1,i2,j1,j2,e,w,n,s,e1,w1,n1,s1,k,k1,k2,u1,d1,u,d)
for(k=0; k<dimZ; k++) {
k1 = k+1; if (k1 == dimZ) k1 = k-1;
k2 = k-1; if (k2 < 0) k2 = k+1;
for(j=0; j<dimY; j++) {
/* symmetric boundary conditions (Neuman) */
j1 = j+1; if (j1 == dimY) j1 = j-1;
j2 = j-1; if (j2 < 0) j2 = j+1;
for(i=0; i<dimX; i++) {
/* symmetric boundary conditions (Neuman) */
i1 = i+1; if (i1 == dimX) i1 = i-1;
i2 = i-1; if (i2 < 0) i2 = i+1;
index = (dimX*dimY)*k + j*dimX+i;
e = Output[(dimX*dimY)*k + j*dimX+i1];
w = Output[(dimX*dimY)*k + j*dimX+i2];
n = Output[(dimX*dimY)*k + j1*dimX+i];
s = Output[(dimX*dimY)*k + j2*dimX+i];
u = Output[(dimX*dimY)*k1 + j*dimX+i];
d = Output[(dimX*dimY)*k2 + j*dimX+i];
e1 = e - Output[index];
w1 = w - Output[index];
n1 = n - Output[index];
s1 = s - Output[index];
u1 = u - Output[index];
d1 = d - Output[index];
Output[index] += tau*(lambdaPar*(e1 + w1 + n1 + s1 + u1 + d1) - (Output[index] - Input[index]));
}}}
return *Output;
}
float NonLinearDiff3D(float *Input, float *Output, float lambdaPar, float sigmaPar, float tau, int penaltytype, long dimX, long dimY, long dimZ)
{
long i,j,k,i1,i2,j1,j2,k1,k2,index;
float e,w,n,s,u,d,e1,w1,n1,s1,u1,d1;
#pragma omp parallel for shared(Input) private(index,i,j,i1,i2,j1,j2,e,w,n,s,e1,w1,n1,s1,k,k1,k2,u1,d1,u,d)
for(k=0; k<dimZ; k++) {
k1 = k+1; if (k1 == dimZ) k1 = k-1;
k2 = k-1; if (k2 < 0) k2 = k+1;
for(j=0; j<dimY; j++) {
/* symmetric boundary conditions (Neuman) */
j1 = j+1; if (j1 == dimY) j1 = j-1;
j2 = j-1; if (j2 < 0) j2 = j+1;
for(i=0; i<dimX; i++) {
/* symmetric boundary conditions (Neuman) */
i1 = i+1; if (i1 == dimX) i1 = i-1;
i2 = i-1; if (i2 < 0) i2 = i+1;
index = (dimX*dimY)*k + j*dimX+i;
e = Output[(dimX*dimY)*k + j*dimX+i1];
w = Output[(dimX*dimY)*k + j*dimX+i2];
n = Output[(dimX*dimY)*k + j1*dimX+i];
s = Output[(dimX*dimY)*k + j2*dimX+i];
u = Output[(dimX*dimY)*k1 + j*dimX+i];
d = Output[(dimX*dimY)*k2 + j*dimX+i];
e1 = e - Output[index];
w1 = w - Output[index];
n1 = n - Output[index];
s1 = s - Output[index];
u1 = u - Output[index];
d1 = d - Output[index];
if (penaltytype == 1){
/* Huber penalty */
if (fabs(e1) > sigmaPar) e1 = signNDFc(e1);
else e1 = e1/sigmaPar;
if (fabs(w1) > sigmaPar) w1 = signNDFc(w1);
else w1 = w1/sigmaPar;
if (fabs(n1) > sigmaPar) n1 = signNDFc(n1);
else n1 = n1/sigmaPar;
if (fabs(s1) > sigmaPar) s1 = signNDFc(s1);
else s1 = s1/sigmaPar;
if (fabs(u1) > sigmaPar) u1 = signNDFc(u1);
else u1 = u1/sigmaPar;
if (fabs(d1) > sigmaPar) d1 = signNDFc(d1);
else d1 = d1/sigmaPar;
}
else if (penaltytype == 2) {
/* Perona-Malik */
e1 = (e1)/(1.0f + powf((e1/sigmaPar),2));
w1 = (w1)/(1.0f + powf((w1/sigmaPar),2));
n1 = (n1)/(1.0f + powf((n1/sigmaPar),2));
s1 = (s1)/(1.0f + powf((s1/sigmaPar),2));
u1 = (u1)/(1.0f + powf((u1/sigmaPar),2));
d1 = (d1)/(1.0f + powf((d1/sigmaPar),2));
}
else if (penaltytype == 3) {
/* Tukey Biweight */
if (fabs(e1) <= sigmaPar) e1 = e1*powf((1.0f - powf((e1/sigmaPar),2)), 2);
else e1 = 0.0f;
if (fabs(w1) <= sigmaPar) w1 = w1*powf((1.0f - powf((w1/sigmaPar),2)), 2);
else w1 = 0.0f;
if (fabs(n1) <= sigmaPar) n1 = n1*powf((1.0f - powf((n1/sigmaPar),2)), 2);
else n1 = 0.0f;
if (fabs(s1) <= sigmaPar) s1 = s1*powf((1.0f - powf((s1/sigmaPar),2)), 2);
else s1 = 0.0f;
if (fabs(u1) <= sigmaPar) u1 = u1*powf((1.0f - powf((u1/sigmaPar),2)), 2);
else u1 = 0.0f;
if (fabs(d1) <= sigmaPar) d1 = d1*powf((1.0f - powf((d1/sigmaPar),2)), 2);
else d1 = 0.0f;
}
else if (penaltytype == 4) {
/* Threshold-constrained linear diffusion
This means that the linear diffusion will be performed on pixels with
absolute difference less than the threshold.
*/
if (fabs(e1) > sigmaPar) e1 = 0.0f;
if (fabs(w1) > sigmaPar) w1 = 0.0f;
if (fabs(n1) > sigmaPar) n1 = 0.0f;
if (fabs(s1) > sigmaPar) s1 = 0.0f;
if (fabs(u1) > sigmaPar) u1 = 0.0f;
if (fabs(d1) > sigmaPar) d1 = 0.0f;
}
else if (penaltytype == 5) {
/*
Threshold constrained Huber diffusion
*/
if (fabs(e1) <= 2.0f*sigmaPar) {
if (fabs(e1) > sigmaPar) e1 = signNDFc(e1);
else e1 = e1/sigmaPar; }
else e1 = 0.0f;
if (fabs(w1) <= 2.0f*sigmaPar) {
if (fabs(w1) > sigmaPar) w1 = signNDFc(w1);
else w1 = w1/sigmaPar; }
else w1 = 0.0f;
if (fabs(n1) <= 2.0f*sigmaPar) {
if (fabs(n1) > sigmaPar) n1 = signNDFc(n1);
else n1 = n1/sigmaPar; }
else n1 = 0.0f;
if (fabs(s1) <= 2.0f*sigmaPar) {
if (fabs(s1) > sigmaPar) s1 = signNDFc(s1);
else s1 = s1/sigmaPar; }
else s1 = 0.0f;
if (fabs(u1) <= 2.0f*sigmaPar) {
if (fabs(u1) > sigmaPar) u1 = signNDFc(u1);
else u1 = u1/sigmaPar; }
else u1 = 0.0f;
if (fabs(d1) <= 2.0f*sigmaPar) {
if (fabs(d1) > sigmaPar) d1 = signNDFc(d1);
else d1 = d1/sigmaPar; }
else d1 = 0.0f;
}
else {
printf("%s \n", "No penalty function selected! Use 1,2,3,4 or 5.");
break;
}
Output[index] += tau*(lambdaPar*(e1 + w1 + n1 + s1 + u1 + d1) - (Output[index] - Input[index]));
}}}
return *Output;
}
|
dataracetest3.c | // data race EXISTS
int main() {
double a[10][10];
for(int d=0;d<1;d++) {
#pragma omp parallel for
for(int i=0;i<10;i++) {
for(int j=0;j<10;j++) {
a[i][j]=0;
#pragma ivdep
#pragma vector always
#pragma simd
for(int k=0;k<10;k++) {
a[j][k]=1;
}
}
}
}
return 0;
}
|
GB_binop__iseq_fc64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__iseq_fc64)
// A.*B function (eWiseMult): GB (_AemultB_08__iseq_fc64)
// A.*B function (eWiseMult): GB (_AemultB_02__iseq_fc64)
// A.*B function (eWiseMult): GB (_AemultB_04__iseq_fc64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_fc64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__iseq_fc64)
// C+=b function (dense accum): GB (_Cdense_accumb__iseq_fc64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_fc64)
// C=scalar+B GB (_bind1st__iseq_fc64)
// C=scalar+B' GB (_bind1st_tran__iseq_fc64)
// C=A+scalar GB (_bind2nd__iseq_fc64)
// C=A'+scalar GB (_bind2nd_tran__iseq_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// A pattern? 0
// B type: GxB_FC64_t
// B pattern? 0
// BinaryOp: cij = GB_FC64_iseq (aij, bij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_FC64_iseq (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_FC64 || GxB_NO_ISEQ_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__iseq_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__iseq_fc64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__iseq_fc64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__iseq_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
GxB_FC64_t alpha_scalar ;
GxB_FC64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((GxB_FC64_t *) alpha_scalar_in)) ;
beta_scalar = (*((GxB_FC64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__iseq_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__iseq_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__iseq_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__iseq_fc64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__iseq_fc64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_FC64_iseq (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__iseq_fc64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_FC64_iseq (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_iseq (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__iseq_fc64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_iseq (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__iseq_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 16;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4));
ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(16*t2-Nz-3,16));t3<=min(min(min(floord(4*Nt+Ny-9,16),floord(8*t1+Ny+7,16)),floord(16*t2+Ny+3,16)),floord(16*t1-16*t2+Nz+Ny+5,16));t3++) {
for (t4=max(max(max(0,ceild(t1-255,256)),ceild(16*t2-Nz-2035,2048)),ceild(16*t3-Ny-2035,2048));t4<=min(min(min(min(floord(4*Nt+Nx-9,2048),floord(8*t1+Nx+7,2048)),floord(16*t2+Nx+3,2048)),floord(16*t3+Nx+3,2048)),floord(16*t1-16*t2+Nz+Nx+5,2048));t4++) {
for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(16*t3-Ny+5,4)),ceild(2048*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),Nt-1),2*t1+3),4*t2+2),4*t3+2),512*t4+510);t5++) {
for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(16*t3,4*t5+4);t7<=min(16*t3+15,4*t5+Ny-5);t7++) {
lbv=max(2048*t4,4*t5+4);
ubv=min(2048*t4+2047,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
cell_division_gpu.h | // -----------------------------------------------------------------------------
//
// Copyright (C) 2021 CERN & University of Surrey for the benefit of the
// BioDynaMo collaboration. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
//
// See the LICENSE file distributed with this work for details.
// See the NOTICE file distributed with this work for additional information
// regarding copyright ownership.
//
// -----------------------------------------------------------------------------
#ifndef SYSTEM_CELL_DIVISION_GPU_SRC_CELL_DIVISION_GPU_H_
#define SYSTEM_CELL_DIVISION_GPU_SRC_CELL_DIVISION_GPU_H_
#include <array>
#include "biodynamo.h"
#include "core/param/command_line_options.h"
#include "core/util/math.h"
#include "core/util/timing.h"
namespace bdm {
// ----------------------------------------------------------------------------
// Starting with 8 cells, we let each cell grow in volume up until a point
// a cell must divide. This tests whether the GPU accelerated mechanical
// interactions properly handle the creation of new cells.
// -----------------------------------------------------------------------------
inline void ExpectArrayNear(const Double3& actual, const Double3& expected,
bool* wrong) {
for (size_t i = 0; i < actual.size(); i++) {
if (std::fabs(expected[i] - actual[i]) > 1e-9) {
*wrong = true;
std::cout << "Wrong result! Expected " << expected[i]
<< ", but instead got " << actual[i]
<< ", which is a difference of "
<< std::fabs(expected[i] - actual[i])
<< ", which is larger than 1e-9" << std::endl;
}
}
}
inline void RunTest(bool* wrong, OpComputeTarget mode, uint64_t timesteps,
uint64_t cells_per_dim) {
std::cout << "Running simulation on ";
auto set_param = [&](auto* param) {
switch (mode) {
case kCpu:
std::cout << "CPU (" << omp_get_max_threads() << " threads)\n";
break;
case kOpenCl:
std::cout << "GPU (OpenCL)\n";
param->compute_target = "opencl";
break;
case kCuda:
std::cout << "GPU (CUDA)\n";
param->compute_target = "cuda";
break;
}
};
Simulation simulation("cell_division_gpu", set_param);
auto* rm = simulation.GetResourceManager();
rm->ClearAgents();
// We need to give every test the same seed for the RNG, because in the cell
// division, random numbers are used. Within a single executable these numbers
// vary. Also within the threads this needs to be enforced
#pragma omp parallel
simulation.GetRandom()->SetSeed(1);
auto construct = [](const Double3& position) {
auto* cell = new Cell(position);
cell->SetDiameter(30);
cell->SetAdherence(0.4);
cell->SetMass(1.0);
cell->AddBehavior(new GrowthDivision(30.05, 5000));
return cell;
};
for (size_t x = 0; x < cells_per_dim; x++) {
double x_pos = x * 20.0;
for (size_t y = 0; y < cells_per_dim; y++) {
double y_pos = y * 20.0;
for (size_t z = 0; z < cells_per_dim; z++) {
auto new_simulation_object = construct({x_pos, y_pos, z * 20.0});
rm->AddAgent(new_simulation_object);
}
}
}
{
Timing timer("Execution time");
simulation.GetScheduler()->Simulate(timesteps);
}
// TODO: add verification of results
}
inline int Simulate(int argc, const char** argv) {
auto options = CommandLineOptions(argc, argv);
options.AddOption<bool>("verify", "false");
options.AddOption<uint64_t>("cells-per-dim", "64");
options.AddOption<uint64_t>("timesteps", "5");
uint64_t cells_per_dim = options.Get<uint64_t>("cells-per-dim");
uint64_t timesteps = options.Get<uint64_t>("timesteps");
bool wrong = true;
bool is_opencl = options.Get<bool>("opencl");
bool is_cuda = options.Get<bool>("cuda");
// TODO(ahmad): after Trello card ("Fix inconsistency in cell state due to
// direct updates in Biology Modules") enable multithreading, and adjust
// results if necessary
// omp_set_num_threads(1);
if (!is_cuda && !is_opencl) {
// Run CPU version
RunTest(&wrong, kCpu, timesteps, cells_per_dim);
}
#ifdef USE_CUDA
if (is_cuda) {
// Run GPU (CUDA) version
RunTest(&wrong, kCuda, timesteps, cells_per_dim);
}
#endif // USE_CUDA
#ifdef USE_OPENCL
if (is_opencl) {
// Run GPU (OpenCL) version
RunTest(&wrong, kOpenCl, timesteps, cells_per_dim);
}
#endif // USE_OPENCL
return !wrong;
}
} // namespace bdm
#endif // SYSTEM_CELL_DIVISION_GPU_SRC_CELL_DIVISION_GPU_H_
|
cvAdvDiff_kry_ompdev.c | /* -------------------------------------------------------------------
* Programmer(s): Shelby Lockhart @ LLNL
* -------------------------------------------------------------------
* Acknowledgements: This example is based on cvAdvDiff_kry example
* by Slaven Peles which is based on cvAdvDiff_bnd
* example by Scott D. Cohen, Alan C.
* Hindmarsh and Radu Serban @ LLNL
* -------------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2020, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -------------------------------------------------------------------
* Example problem:
*
* The following is a simple example problem with a banded Jacobian,
* with the program for its solution by CVODE.
* The problem is the semi-discrete form of the advection-diffusion
* equation in 2-D:
* du/dt = d^2 u / dx^2 + .5 du/dx + d^2 u / dy^2
* on the rectangle 0 <= x <= 2, 0 <= y <= 1, and the time
* interval 0 <= t <= 1. Homogeneous Dirichlet boundary conditions
* are posed, and the initial condition is
* u(x,y,t=0) = x(2-x)y(1-y)exp(5xy).
* The PDE is discretized on a uniform MX+2 by MY+2 grid with
* central differencing, and with boundary values eliminated,
* leaving an ODE system of size NEQ = MX*MY.
* This program solves the problem with the BDF method, Newton
* iteration with the CVBAND band linear solver, and a user-supplied
* Jacobian routine.
* It uses scalar relative and absolute tolerances.
* Output is printed at t = .1, .2, ..., 1.
* Run statistics (optional outputs) are printed at the end.
* -----------------------------------------------------------------*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cvode/cvode.h> /* prototypes for CVODE fcts., consts. */
#include <sunlinsol/sunlinsol_spgmr.h> /* access to SPGMR SUNLinearSolver */
#include <sundials/sundials_types.h> /* definition of type realtype */
#include <sundials/sundials_math.h> /* definition of ABS and EXP */
#include <nvector/nvector_openmpdev.h> /* OpenMPDEV N_Vector types, fcts., macros */
#ifdef _OPENMP
#include <omp.h>
#endif
/* Real Constants */
#define ATOL RCONST(1.0e-5) /* scalar absolute tolerance */
#define T0 RCONST(0.0) /* initial time */
#define T1 RCONST(0.1) /* first output time */
#define DTOUT RCONST(0.1) /* output time increment */
#define NOUT 10 /* number of output times */
#define ZERO RCONST(0.0)
#define HALF RCONST(0.5)
#define ONE RCONST(1.0)
#define TWO RCONST(2.0)
#define FIVE RCONST(5.0)
/* Type : _UserData (contains model and discretization parameters) */
typedef struct {
sunindextype MX, MY, NEQ;
realtype dx, dy, XMAX, YMAX;
realtype hdcoef, hacoef, vdcoef;
} *UserData;
/*typedef _UserData *UserData;*/
/* Problem setup and initialization functions */
static UserData SetUserData(int argc, char** argv);
static void SetIC(N_Vector u, UserData data);
/* Functions Called by the Solver */
static int f(realtype t, N_Vector u, N_Vector udot, void *user_data);
static int jtv(N_Vector v, N_Vector Jv, realtype t,
N_Vector u, N_Vector fu,
void *user_data, N_Vector tmp);
/* Private Helper Functions */
static void PrintHeader(realtype reltol, realtype abstol, realtype umax, UserData data);
static void PrintOutput(realtype t, realtype umax, long int nst);
static void PrintFinalStats(void *cvode_mem);
/* Private function to check function return values */
static int check_flag(void *flagvalue, const char *funcname, int opt);
/*
*-------------------------------
* Main Program
*-------------------------------
*/
int main(int argc, char** argv)
{
realtype reltol, abstol, t, tout, umax;
N_Vector u;
UserData data;
SUNLinearSolver LS;
void *cvode_mem;
int iout, flag;
long int nst;
u = NULL;
data = NULL;
LS = NULL;
cvode_mem = NULL;
/* Set model parameters */
data = SetUserData(argc, argv);
if(check_flag((void *)data, "malloc", 2)) return(1);
reltol = ZERO; /* Set the tolerances */
abstol = ATOL;
/* Create an OpenMPDEV vector with initial values */
u = N_VNew_OpenMPDEV(data->NEQ); /* Allocate u vector */
if(check_flag((void*)u, "N_VNew_Cuda", 0)) return(1);
SetIC(u, data); /* Initialize u vector */
/* Call CVodeCreate to create the solver memory and specify the
* Backward Differentiation Formula and the use of a Newton iteration */
cvode_mem = CVodeCreate(CV_BDF);
if(check_flag((void *)cvode_mem, "CVodeCreate", 0)) return(1);
/* Call CVodeInit to initialize the integrator memory and specify the
* user's right hand side function in u'=f(t,u), the initial time T0, and
* the initial dependent variable vector u. */
flag = CVodeInit(cvode_mem, f, T0, u);
if(check_flag(&flag, "CVodeInit", 1)) return(1);
/* Call CVodeSStolerances to specify the scalar relative tolerance
* and scalar absolute tolerance */
flag = CVodeSStolerances(cvode_mem, reltol, abstol);
if (check_flag(&flag, "CVodeSStolerances", 1)) return(1);
/* Set the pointer to user-defined data */
flag = CVodeSetUserData(cvode_mem, data);
if(check_flag(&flag, "CVodeSetUserData", 1)) return(1);
/* Create SPGMR solver without preconditioning
* and the maximum Krylov dimension maxl */
LS = SUNLinSol_SPGMR(u, PREC_NONE, 0);
if(check_flag(&flag, "SUNLinSol_SPGMR", 1)) return(1);
/* Attach the linear solver */
flag = CVodeSetLinearSolver(cvode_mem, LS, NULL);
if(check_flag(&flag, "CVodeSetLinearSolver", 1)) return(1);
/* Set the JAcobian-times-vector function */
flag = CVodeSetJacTimes(cvode_mem, NULL, jtv);
if(check_flag(&flag, "CVodeSetJacTimesVecFn", 1)) return(1);
/* In loop over output points: call CVode, print results, test for errors */
umax = N_VMaxNorm(u);
PrintHeader(reltol, abstol, umax, data);
for(iout=1, tout=T1; iout <= NOUT; iout++, tout += DTOUT) {
flag = CVode(cvode_mem, tout, u, &t, CV_NORMAL);
if(check_flag(&flag, "CVode", 1)) break;
umax = N_VMaxNorm(u);
flag = CVodeGetNumSteps(cvode_mem, &nst);
check_flag(&flag, "CVodeGetNumSteps", 1);
PrintOutput(t, umax, nst);
}
PrintFinalStats(cvode_mem); /* Print some final statistics */
N_VDestroy(u); /* Free the u vector */
CVodeFree(&cvode_mem); /* Free the integrator memory */
free(data); /* Free the user data */
return(0);
}
/*
*-------------------------------------------
* Problem setup and initialization functions
*-------------------------------------------
*/
/* Set model and discretization parameters */
UserData SetUserData(int argc, char *argv[])
{
const sunindextype MX = 10;
const sunindextype MY = 5;
const realtype XMAX = RCONST(2.0); /* domain boundaries */
const realtype YMAX = RCONST(1.0);
/* Allocate user data structure */
UserData ud = (UserData) malloc(sizeof *ud);
if(check_flag((void*) ud, "AllocUserData", 2)) return(NULL);
ud->MX = MX;
ud->MY = MY;
ud->NEQ = MX*MY;
ud->XMAX = XMAX;
ud->YMAX = YMAX;
ud->dx = XMAX/(MX+1); /* Set grid coefficients in data */
ud->dy = YMAX/(MY+1);
ud->hdcoef = ONE/(ud->dx*ud->dx);
ud->hacoef = HALF/(TWO*ud->dx);
ud->vdcoef = ONE/(ud->dy*ud->dy);
return ud;
}
/* Set initial conditions in u vector */
static void SetIC(N_Vector u, UserData data)
{
/* Extract needed constants from data */
const realtype dx = data->dx;
const realtype dy = data->dy;
const realtype xmax = data->XMAX;
const realtype ymax = data->YMAX;
const sunindextype MY = data->MY;
const sunindextype NEQ = data->NEQ;
/* Extract pointer to solution vector data on the host */
realtype *udata = N_VGetHostArrayPointer_OpenMPDEV(u);
sunindextype i, j, tid;
realtype x, y;
/* Load initial profile into u vector */
for (tid=0; tid < NEQ; tid++) {
i = tid / MY;
j = tid % MY;
x = (i+1)*dx;
y = (j+1)*dy;
udata[tid] = x*(xmax - x)*y*(ymax - y)*SUNRexp(FIVE*x*y);
}
N_VCopyToDevice_OpenMPDEV(u);
}
/*
*-------------------------------
* Functions called by the solver
*-------------------------------
*/
/* f routine. Compute f(t,u). */
static int f(realtype t, N_Vector u, N_Vector udot, void *user_data)
{
realtype uij, udn, uup, ult, urt, hdiff, hadv, vdiff;
sunindextype i, j, k;
int dev;
UserData data = (UserData) user_data;
/* Extract needed constants from data */
const sunindextype MX = data->MX;
const sunindextype MY = data->MY;
const realtype hordc = data->hdcoef;
const realtype horac = data->hacoef;
const realtype verdc = data->vdcoef;
/* Extract pointers to vector data */
const realtype *udata = N_VGetDeviceArrayPointer_OpenMPDEV(u);
realtype *dudata = N_VGetDeviceArrayPointer_OpenMPDEV(udot);
/* Get device */
dev = omp_get_default_device();
/* Loop over all grid points. */
#pragma omp target map(to:MY,MX,hordc,horac,verdc) is_device_ptr(udata, dudata) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (k=0; k<MY*MX; k++) {
i = k/MY;
j = k%MY;
uij = udata[k];
udn = (j == 0) ? ZERO : udata[k - 1];
uup = (j == MY-1) ? ZERO : udata[k + 1];
ult = (i == 0) ? ZERO : udata[k - MY];
urt = (i == MX-1) ? ZERO : udata[k + MY];
/* Set diffusion and advection terms and load into udot */
hdiff = hordc * (ult - TWO * uij + urt);
hadv = horac * (urt - ult);
vdiff = verdc * (uup - TWO * uij + udn);
dudata[k] = hdiff + hadv + vdiff;
}
return(0);
}
/* Jacobian-times-vector routine. */
static int jtv(N_Vector v, N_Vector Jv, realtype t,
N_Vector u, N_Vector fu,
void *user_data, N_Vector tmp)
{
sunindextype i, j, k;
int dev;
UserData data = (UserData) user_data;
/* Extract needed constants from data */
const sunindextype MX = data->MX;
const sunindextype MY = data->MY;
const realtype hordc = data->hdcoef;
const realtype horac = data->hacoef;
const realtype verdc = data->vdcoef;
/* Extract pointers to vector data */
const realtype *vdata = N_VGetDeviceArrayPointer_OpenMPDEV(v);
realtype *Jvdata = N_VGetDeviceArrayPointer_OpenMPDEV(Jv);
N_VConst(ZERO, Jv);
/* Get device */
dev = omp_get_default_device();
#pragma omp target map(to:MX,MY,hordc,horac,verdc) is_device_ptr(vdata,Jvdata) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (k=0; k<MX*MY; k++) {
i = k/MY;
j = k%MY;
/* set the kth element of Jv */
Jvdata[k] = -TWO * (verdc + hordc) * vdata[k];
if (i != 0) Jvdata[k] += (hordc - horac) * vdata[k-MY];
if (i != MX-1) Jvdata[k] += (hordc + horac) * vdata[k+MY];
if (j != 0) Jvdata[k] += verdc * vdata[k-1];
if (j != MY-1) Jvdata[k] += verdc * vdata[k+1];
}
return(0);
}
/*
*-------------------------------
* Private helper functions
*-------------------------------
*/
/* Print first lines of output (problem description) */
static void PrintHeader(realtype reltol, realtype abstol, realtype umax, UserData data)
{
printf("\n2-D Advection-Diffusion Equation\n");
printf("Mesh dimensions = %ld X %ld\n", (long int) data->MX, (long int) data->MY);
printf("Total system size = %ld\n", (long int) data->NEQ);
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf("Tolerance parameters: reltol = %Lg abstol = %Lg\n\n",
reltol, abstol);
printf("At t = %Lg max.norm(u) =%14.6Le \n", T0, umax);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf("Tolerance parameters: reltol = %g abstol = %g\n\n",
reltol, abstol);
printf("At t = %g max.norm(u) =%14.6e \n", T0, umax);
#else
printf("Tolerance parameters: reltol = %g abstol = %g\n\n", reltol, abstol);
printf("At t = %g max.norm(u) =%14.6e \n", T0, umax);
#endif
return;
}
/* Print current value */
static void PrintOutput(realtype t, realtype umax, long int nst)
{
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf("At t = %4.2Lf max.norm(u) =%14.6Le nst = %4ld\n", t, umax, nst);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf("At t = %4.2f max.norm(u) =%14.6e nst = %4ld\n", t, umax, nst);
#else
printf("At t = %4.2f max.norm(u) =%14.6e nst = %4ld\n", t, umax, nst);
#endif
return;
}
/* Get and print some final statistics */
static void PrintFinalStats(void *cvode_mem)
{
long lenrw, leniw ;
long lenrwLS, leniwLS;
long int nst, nfe, nsetups, nni, ncfn, netf;
long int nli, npe, nps, ncfl, nfeLS;
int flag;
flag = CVodeGetWorkSpace(cvode_mem, &lenrw, &leniw);
check_flag(&flag, "CVodeGetWorkSpace", 1);
flag = CVodeGetNumSteps(cvode_mem, &nst);
check_flag(&flag, "CVodeGetNumSteps", 1);
flag = CVodeGetNumRhsEvals(cvode_mem, &nfe);
check_flag(&flag, "CVodeGetNumRhsEvals", 1);
flag = CVodeGetNumLinSolvSetups(cvode_mem, &nsetups);
check_flag(&flag, "CVodeGetNumLinSolvSetups", 1);
flag = CVodeGetNumErrTestFails(cvode_mem, &netf);
check_flag(&flag, "CVodeGetNumErrTestFails", 1);
flag = CVodeGetNumNonlinSolvIters(cvode_mem, &nni);
check_flag(&flag, "CVodeGetNumNonlinSolvIters", 1);
flag = CVodeGetNumNonlinSolvConvFails(cvode_mem, &ncfn);
check_flag(&flag, "CVodeGetNumNonlinSolvConvFails", 1);
flag = CVodeGetLinWorkSpace(cvode_mem, &lenrwLS, &leniwLS);
check_flag(&flag, "CVodeGetLinWorkSpace", 1);
flag = CVodeGetNumLinIters(cvode_mem, &nli);
check_flag(&flag, "CVodeGetNumLinIters", 1);
flag = CVodeGetNumPrecEvals(cvode_mem, &npe);
check_flag(&flag, "CVodeGetNumPrecEvals", 1);
flag = CVodeGetNumPrecSolves(cvode_mem, &nps);
check_flag(&flag, "CVodeGetNumPrecSolves", 1);
flag = CVodeGetNumLinConvFails(cvode_mem, &ncfl);
check_flag(&flag, "CVodeGetNumLinConvFails", 1);
flag = CVodeGetNumLinRhsEvals(cvode_mem, &nfeLS);
check_flag(&flag, "CVodeGetNumLinRhsEvals", 1);
printf("\nFinal Statistics.. \n\n");
printf("lenrw = %5ld leniw = %5ld\n", lenrw, leniw);
printf("lenrwLS = %5ld leniwLS = %5ld\n", lenrwLS, leniwLS);
printf("nst = %5ld\n" , nst);
printf("nfe = %5ld nfeLS = %5ld\n" , nfe, nfeLS);
printf("nni = %5ld nli = %5ld\n" , nni, nli);
printf("nsetups = %5ld netf = %5ld\n" , nsetups, netf);
printf("npe = %5ld nps = %5ld\n" , npe, nps);
printf("ncfn = %5ld ncfl = %5ld\n\n", ncfn, ncfl);
return;
}
/* Check function return value...
opt == 0 means SUNDIALS function allocates memory so check if
returned NULL pointer
opt == 1 means SUNDIALS function returns a flag so check if
flag >= 0
opt == 2 means function allocates memory so check if returned
NULL pointer */
static int check_flag(void *flagvalue, const char *funcname, int opt)
{
int *errflag;
/* Check if SUNDIALS function returned NULL pointer - no memory allocated */
if (opt == 0 && flagvalue == NULL) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1); }
/* Check if flag < 0 */
else if (opt == 1) {
errflag = (int *) flagvalue;
if (*errflag < 0) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with flag = %d\n\n",
funcname, *errflag);
return(1); }}
/* Check if function returned NULL pointer - no memory allocated */
else if (opt == 2 && flagvalue == NULL) {
fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1); }
return(0);
}
|
GB_unaryop__abs_uint16_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_uint16_int32
// op(A') function: GB_tran__abs_uint16_int32
// C type: uint16_t
// A type: int32_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint16_t z = (uint16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT16 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_uint16_int32
(
uint16_t *restrict Cx,
const int32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_uint16_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pool_functors.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/platform/threadpool.h"
#include "core/providers/cpu/nn/pool_base.h"
namespace onnxruntime {
template <typename T, typename PoolType>
struct Pool1DTask final {
const T* X_data;
T* Y_data;
int64_t x_step;
int64_t y_step;
int64_t pooled_height;
int64_t stride_h;
int64_t height;
const std::vector<int64_t>& kernel_shape;
const std::vector<int64_t>& pads;
const PoolProcessContext& pool_context_;
const PoolAttributes& pool_attrs_;
TensorOpCost Cost() {
double loop_count = static_cast<double>(pooled_height * kernel_shape[0]);
return TensorOpCost{loop_count, loop_count, loop_count};
}
void operator()(std::ptrdiff_t begin, std::ptrdiff_t end) const {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int64_t c = begin; c < end; ++c) {
operator()(c);
}
}
void operator()(std::ptrdiff_t c) const {
const T* x_d = X_data + c * x_step;
T* y_d = Y_data + c * y_step;
for (int64_t ph = 0; ph < pooled_height; ++ph) {
int64_t hstart = ph * stride_h - pads[0];
int64_t hend = std::min(hstart + kernel_shape[0], height);
hstart = std::max(hstart, static_cast<int64_t>(0));
T Yh = PoolType::Initialize();
for (int64_t h = hstart; h < hend; ++h) {
PoolType::Process(x_d[h], Yh, pool_context_);
}
if (pool_attrs_.count_include_pad) {
PoolType::Finalize(kernel_shape[0], Yh, pool_context_);
} else {
PoolType::Finalize(hend - hstart, Yh, pool_context_);
}
y_d[ph] = Yh;
}
}
};
template <typename T, typename PoolType>
struct Pool2DTask final {
const T* X_data;
T* Y_data;
int64_t x_step;
int64_t y_step;
int64_t pooled_height;
int64_t pooled_width;
int64_t stride_h;
int64_t stride_w;
int64_t height;
int64_t width;
const std::vector<int64_t>& kernel_shape;
const std::vector<int64_t>& pads;
const PoolProcessContext& pool_context_;
const PoolAttributes& pool_attrs_;
TensorOpCost Cost() {
double loop_count = static_cast<double>(pooled_height * pooled_width * kernel_shape[0] * kernel_shape[1]);
return TensorOpCost{loop_count, loop_count, loop_count};
}
void operator()(std::ptrdiff_t begin, std::ptrdiff_t end) const {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int64_t c = begin; c < end; ++c) {
operator()(c);
}
}
void operator()(std::ptrdiff_t c) const {
const T* x_d = X_data + c * x_step;
T* y_d = Y_data + c * y_step;
for (int64_t ph = 0; ph < pooled_height; ++ph) {
int64_t hstart = ph * stride_h - pads[0];
int64_t hend = std::min(hstart + kernel_shape[0], height);
hstart = std::max(hstart, static_cast<int64_t>(0));
for (int64_t pw = 0; pw < pooled_width; ++pw) {
int64_t wstart = pw * stride_w - pads[1];
int64_t wend = std::min(wstart + kernel_shape[1], width);
wstart = std::max(wstart, static_cast<int64_t>(0));
const int64_t pool_index = ph * pooled_width + pw;
T Yh = PoolType::Initialize();
for (int64_t h = hstart; h < hend; ++h) {
for (int64_t w = wstart; w < wend; ++w) {
const int64_t input_index = h * width + w;
PoolType::Process(x_d[input_index], Yh, pool_context_);
}
}
if (pool_attrs_.count_include_pad) {
PoolType::Finalize(kernel_shape[0] * kernel_shape[1], Yh, pool_context_);
} else {
PoolType::Finalize((hend - hstart) * (wend - wstart), Yh, pool_context_);
}
y_d[pool_index] = Yh;
}
}
}
};
template <typename T, typename PoolType>
struct Pool3DTask final {
const T* X_data;
T* Y_data;
int64_t x_step;
int64_t y_step;
int64_t pooled_height;
int64_t pooled_width;
int64_t pooled_depth;
int64_t stride_h;
int64_t stride_w;
int64_t stride_d;
int64_t height;
int64_t width;
int64_t depth;
const std::vector<int64_t>& kernel_shape;
const std::vector<int64_t>& pads;
const PoolProcessContext& pool_context_;
const PoolAttributes& pool_attrs_;
TensorOpCost Cost() {
double loop_count = static_cast<double>(pooled_height * pooled_width * pooled_depth * kernel_shape[0] *
kernel_shape[1] * kernel_shape[2]);
return TensorOpCost{loop_count, loop_count, loop_count};
}
void operator()(std::ptrdiff_t begin, std::ptrdiff_t end) const {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int64_t c = begin; c < end; ++c) {
operator()(c);
}
}
void operator()(std::ptrdiff_t c) const {
const T* x_d = X_data + c * x_step;
T* y_d = Y_data + c * y_step;
for (int64_t ph = 0; ph < pooled_height; ++ph) {
int64_t hstart = ph * stride_h - pads[0];
int64_t hend = std::min(hstart + kernel_shape[0], height);
hstart = std::max(hstart, static_cast<int64_t>(0));
for (int64_t pw = 0; pw < pooled_width; ++pw) {
int64_t wstart = pw * stride_w - pads[1];
int64_t wend = std::min(wstart + kernel_shape[1], width);
wstart = std::max(wstart, static_cast<int64_t>(0));
for (int64_t pd = 0; pd < pooled_depth; ++pd) {
int64_t dstart = pd * stride_d - pads[2];
int64_t dend = std::min(dstart + kernel_shape[2], depth);
dstart = std::max(dstart, static_cast<int64_t>(0));
const int64_t pool_index = ph * pooled_width * pooled_depth + pw * pooled_depth + pd;
T Yh = PoolType::Initialize();
for (int64_t h = hstart; h < hend; ++h) {
for (int64_t w = wstart; w < wend; ++w) {
for (int64_t d = dstart; d < dend; ++d) {
const int64_t input_index = h * width * depth + w * depth + d;
PoolType::Process(x_d[input_index], Yh, pool_context_);
}
}
}
if (pool_attrs_.count_include_pad) {
PoolType::Finalize(kernel_shape[0] * kernel_shape[1] * kernel_shape[2], Yh, pool_context_);
} else {
PoolType::Finalize((hend - hstart) * (wend - wstart) * (dend - dstart), Yh, pool_context_);
}
y_d[pool_index] = Yh;
}
}
}
}
};
template <typename T>
struct MaxPool1DTask final {
const T* X_data;
T* Y_data;
int64_t* I_data;
int64_t x_step;
int64_t y_step;
int64_t dilation_h;
int64_t pooled_height;
int64_t stride_h;
int64_t height;
const std::vector<int64_t>& kernel_shape;
const std::vector<int64_t>& pads;
TensorOpCost Cost() {
double loop_count = static_cast<double>(pooled_height * kernel_shape[0]);
return TensorOpCost{loop_count, loop_count, loop_count};
}
void operator()(std::ptrdiff_t begin, std::ptrdiff_t end) const {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int64_t c = begin; c < end; ++c) {
operator()(c);
}
}
void operator()(std::ptrdiff_t c) const {
const T* x_d = X_data + c * x_step;
T* y_d = Y_data + c * y_step;
int64_t* i_d = I_data ? I_data + c * y_step : nullptr;
for (int64_t ph = 0; ph < pooled_height; ++ph) {
int64_t hstart = ph * stride_h - pads[0];
int64_t hend = hstart + kernel_shape[0] * dilation_h;
T Yh = std::numeric_limits<T>::lowest();
int64_t h_index = -1;
for (int64_t h = hstart; h < hend; h += dilation_h) {
if (math::is_a_ge_zero_and_a_lt_b(h, height)) {
if (x_d[h] > Yh) {
Yh = x_d[h];
h_index = h;
}
}
}
y_d[ph] = Yh;
if (i_d != nullptr)
i_d[ph] = c * x_step + h_index;
}
}
};
template <typename T>
struct MaxPool2DTask final {
const T* X_data;
T* Y_data;
int64_t* I_data;
int64_t x_step;
int64_t y_step;
int64_t dilation_h;
int64_t dilation_w;
int64_t pooled_height;
int64_t pooled_width;
int64_t stride_h;
int64_t stride_w;
int64_t height;
int64_t width;
const std::vector<int64_t>& kernel_shape;
const std::vector<int64_t>& pads;
int64_t storage_order;
TensorOpCost Cost() {
double loop_count = static_cast<double>(pooled_height * pooled_width * kernel_shape[0] * kernel_shape[1]);
return TensorOpCost{loop_count, loop_count, loop_count};
}
void operator()(std::ptrdiff_t begin, std::ptrdiff_t end) const {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int64_t c = begin; c < end; ++c) {
operator()(c);
}
}
void operator()(std::ptrdiff_t c) const {
const T* x_d = X_data + c * x_step;
T* y_d = Y_data + c * y_step;
int64_t* i_d = I_data ? I_data + c * y_step : nullptr;
for (int64_t ph = 0; ph < pooled_height; ++ph) {
int64_t hstart = ph * stride_h - pads[0];
int64_t hend = hstart + kernel_shape[0] * dilation_h;
for (int64_t pw = 0; pw < pooled_width; ++pw) {
int64_t wstart = pw * stride_w - pads[1];
int64_t wend = wstart + kernel_shape[1] * dilation_w;
const int64_t pool_index = ph * pooled_width + pw;
T Yh = std::numeric_limits<T>::lowest();
int64_t h_index = -1;
int64_t w_index = -1;
for (int64_t h = hstart; h < hend; h += dilation_h) {
if (math::is_a_ge_zero_and_a_lt_b(h, height)) {
for (int64_t w = wstart; w < wend; w += dilation_w) {
if (math::is_a_ge_zero_and_a_lt_b(w, width)) {
const int64_t input_index = h * width + w;
if (x_d[input_index] > Yh) {
Yh = x_d[input_index];
h_index = h;
w_index = w;
}
}
}
}
}
y_d[pool_index] = Yh;
if (i_d != nullptr)
i_d[pool_index] =
storage_order == 0 ? c * x_step + h_index * width + w_index : c * x_step + h_index + w_index * height;
}
}
}
};
template <typename T>
struct MaxPool3DTask {
const T* X_data;
T* Y_data;
int64_t* I_data;
int64_t x_step;
int64_t y_step;
int64_t dilation_h;
int64_t dilation_w;
int64_t dilation_d;
int64_t pooled_height;
int64_t pooled_width;
int64_t pooled_depth;
int64_t stride_h;
int64_t stride_w;
int64_t stride_d;
int64_t height;
int64_t width;
int64_t depth;
const std::vector<int64_t>& kernel_shape;
const std::vector<int64_t>& pads;
int64_t storage_order;
void operator()(std::ptrdiff_t begin, std::ptrdiff_t end) const {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int64_t c = begin; c < end; ++c) {
operator()(c);
}
}
TensorOpCost Cost() {
double loop_count = static_cast<double>(pooled_height * pooled_width * pooled_depth * kernel_shape[0] *
kernel_shape[1] * kernel_shape[2]);
return TensorOpCost{loop_count, loop_count, loop_count};
}
void operator()(std::ptrdiff_t c) const {
const T* x_d = X_data + c * x_step;
T* y_d = Y_data + c * y_step;
int64_t* i_d = I_data ? I_data + c * y_step : nullptr;
for (int64_t ph = 0; ph < pooled_height; ++ph) {
int64_t hstart = ph * stride_h - pads[0];
int64_t hend = hstart + kernel_shape[0] * dilation_h;
for (int64_t pw = 0; pw < pooled_width; ++pw) {
int64_t wstart = pw * stride_w - pads[1];
int64_t wend = wstart + kernel_shape[1] * dilation_w;
for (int64_t pd = 0; pd < pooled_depth; ++pd) {
int64_t dstart = pd * stride_d - pads[2];
int64_t dend = dstart + kernel_shape[2] * dilation_d;
const int64_t pool_index = ph * pooled_width * pooled_depth + pw * pooled_depth + pd;
T Yh = std::numeric_limits<T>::lowest();
int64_t h_index = -1;
int64_t w_index = -1;
int64_t d_index = -1;
for (int64_t h = hstart; h < hend; h += dilation_h) {
if (math::is_a_ge_zero_and_a_lt_b(h, height)) {
for (int64_t w = wstart; w < wend; w += dilation_w) {
if (math::is_a_ge_zero_and_a_lt_b(w, width)) {
for (int64_t d = dstart; d < dend; d += dilation_d) {
if (math::is_a_ge_zero_and_a_lt_b(d, depth)) {
const int64_t input_index = h * width * depth + w * depth + d;
if (x_d[input_index] > Yh) {
Yh = x_d[input_index];
h_index = h;
w_index = w;
d_index = d;
}
}
}
}
}
}
}
y_d[pool_index] = Yh;
if (i_d != nullptr)
i_d[pool_index] = storage_order == 0 ? c * x_step + h_index * width * depth + w_index * depth + d_index :
c * x_step + h_index + w_index * height + d_index * height * width;
}
}
}
}
};
} // namespace onnxruntime
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 32;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
c_qsort.c | /* ***********************************************************************
This program is part of the
OpenMP Source Code Repository
http://www.pcg.ull.es/ompscr/
e-mail: ompscr@etsii.ull.es
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
(LICENSE file) along with this program; if not, write to
the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
Boston, MA 02111-1307 USA
FILE: c_qsort.c
VERSION: 1.0
DATE: May 2004
AUTHOR: F. de Sande
COMMENTS TO: sande@csi.ull.es
DESCRIPTION: Parallel implementation of Quicksort using OpenMP
Sorts an integer array
COMMENTS: The code requires nested Parallelism.
REFERENCES: C. A. R. Hoare,
ACM Algorithm 64}: Quicksort",
Communications of the ACM",
vol. 4, no. 7, pg. 321. Jul 1961
http://en.wikipedia.org/wiki/Quicksort
BASIC PRAGMAS: parallel for
USAGE: ./c_qsort.par 2000000
INPUT: The size (in K) of the vector to sort
OUTPUT: The code tests that the vector is sorted
FILE FORMATS: -
RESTRICTIONS: -
REVISION HISTORY:
**************************************************************************/
//#include "OmpSCR.h"
#include <omp.h>
#define NUM_ARGS 1
#define NUM_TIMERS 1
#define KILO (1024)
#define MEGA (1024 * 1024)
#define DEFAULT_SIZE (2 * MEGA)
#define MAXSIZE (9 * MEGA)
#define NUM_STEPS 10 /* No. of iterations (number of vectors to sort) */
#define SIZEINIT 128
char USAGE_STR[] = "<size_in_Kb>";
int SIZE;
int array[MAXSIZE];
/* -----------------------------------------------------------------------
PROTOTYPES
* ----------------------------------------------------------------------- */
void initialize(int *v, int seed);
void testit(int *v);
void qs(int *v, int first, int last);
/* -----------------------------------------------------------------------
IMPLEMENTATION
* ----------------------------------------------------------------------- */
/* -----------------------------------------------------------------------
Sets randomly the values for the array
* ----------------------------------------------------------------------- */
void initialize(int *v, int seed) {
unsigned i;
srandom(seed);
for(i = 0; i < SIZE; i++)
v[i] = (int)random();
}
/* -----------------------------------------------------------------------
Tests the result
* ----------------------------------------------------------------------- */
void testit(int *v) {
register int k;
int not_sorted = 0;
for (k = 0; k < SIZE - 1; k++)
if (v[k] > v[k + 1]) {
not_sorted = 1;
break;
}
if (not_sorted)
printf("Array NOT sorted.\n");
else
printf("Array sorted.\n");
}
/* ----------------------------------------------------------------------- */
void qs(int *v, int first, int last) {
int start[2], end[2], pivot, i, temp;
if (first < last) {
start[1] = first;
end[0] = last;
pivot = v[(first + last) / 2];
while (start[1] <= end[0]) {
while (v[start[1]] < pivot)
start[1]++;
while (pivot < v[end[0]])
end[0]--;
if (start[1] <= end[0]) {
temp = v[start[1]];
v[start[1]] = v[end[0]];
v[end[0]] = temp;
start[1]++;
end[0]--;
}
}
start[0] = first;
end[1] = last;
#pragma omp parallel
{
#pragma omp for nowait
for(i = 0; i <= 1; i++) {
qs(v, start[i], end[i]);
}
}
}
}
/* ----------------------------------------------------------------------- */
int main(int argc, char *argv[]) {
int STEP, NUMTHREADS;
double total_time;
char *PARAM_NAMES[NUM_ARGS] = {"Size (in K)"};
char *TIMERS_NAMES[NUM_TIMERS] = {"Total_time" };
char *DEFAULT_VALUES[NUM_ARGS] = {"2048 K"};
NUMTHREADS = 1; //omp_get_num_threads();
//OSCR_init (NUMTHREADS, "Quicksort", "Use 'qsort' <size (in K)>", NUM_ARGS,
// PARAM_NAMES, DEFAULT_VALUES , NUM_TIMERS, NUM_TIMERS, TIMERS_NAMES,
// argc, argv);
SIZE = SIZEINIT; //OSCR_getarg_int(1);
if (SIZE > MAXSIZE) {
printf("Size: %d Maximum size: %d\n", SIZE, MAXSIZE);
exit(-1);
}
/* Default: DEFAULT_SIZE */
for (STEP = 0; STEP < NUM_STEPS; STEP++) {
initialize(array, STEP);
//OSCR_timer_start(0);
qs(array, 0, SIZE-1);
OSCR_timer_stop(0);
testit(array);
}
total_time = 1; //OSCR_timer_read(0);
//OSCR_report(1, TIMERS_NAMES);
printf("\n \t# THREADS \tSIZE \tSTEPS \tTIME (secs.) \n");
printf("\t%d \t\t%d \t%d \t%14.6lf \n", NUMTHREADS, SIZE, NUM_STEPS, total_time);
} /* main */
/*
* vim:ts=2:sw=2:
*/
|
FullyDistSpVec.h | /****************************************************************/
/* Parallel Combinatorial BLAS Library (for Graph Computations) */
/* version 1.6 -------------------------------------------------*/
/* date: 6/15/2017 ---------------------------------------------*/
/* authors: Ariful Azad, Aydin Buluc --------------------------*/
/****************************************************************/
/*
Copyright (c) 2010-2017, The Regents of the University of California
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#ifndef _FULLY_DIST_SP_VEC_H_
#define _FULLY_DIST_SP_VEC_H_
#include <iostream>
#include <vector>
#include <utility>
#include "CommGrid.h"
#include "promote.h"
#include "SpParMat.h"
#include "FullyDist.h"
#include "Exception.h"
#include "OptBuf.h"
#include "CombBLAS.h"
namespace combblas {
template <class IT, class NT, class DER>
class SpParMat;
template <class IT>
class DistEdgeList;
template <class IU, class NU>
class FullyDistVec;
template <class IU, class NU>
class SparseVectorLocalIterator;
/**
* A sparse vector of length n (with nnz <= n of them being nonzeros) is distributed to
* "all the processors" in a way that "respects ordering" of the nonzero indices
* Example: x = [5,1,6,2,9] for nnz(x)=5 and length(x)=12
* we use 4 processors P_00, P_01, P_10, P_11
* Then P_00 owns [1,2] (in the range [0,...,2]), P_01 ow`ns [5] (in the range [3,...,5]), and so on.
* In the case of A(v,w) type sparse matrix indexing, this doesn't matter because n = nnz
* After all, A(v,w) will have dimensions length(v) x length (w)
* v and w will be of numerical type (NT) "int" and their indices (IT) will be consecutive integers
* It is possibly that nonzero counts are distributed unevenly
* Example: x=[1,2,3,4,5] and length(x) = 20, then P_00 would own all the nonzeros and the rest will hold empry vectors
* Just like in SpParMat case, indices are local to processors (they belong to range [0,...,length-1] on each processor)
* \warning Always create vectors with the right length, setting elements won't increase its length (similar to operator[] on std::vector)
**/
template <class IT, class NT>
class FullyDistSpVec: public FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>
{
public:
FullyDistSpVec ( );
explicit FullyDistSpVec ( IT glen );
FullyDistSpVec ( std::shared_ptr<CommGrid> grid);
FullyDistSpVec ( std::shared_ptr<CommGrid> grid, IT glen);
template <typename _UnaryOperation>
FullyDistSpVec (const FullyDistVec<IT,NT> & rhs, _UnaryOperation unop);
FullyDistSpVec (const FullyDistVec<IT,NT> & rhs); // Conversion copy-constructor
FullyDistSpVec (IT globalsize, const FullyDistVec<IT,IT> & inds, const FullyDistVec<IT,NT> & vals, bool SumDuplicates = false);
FullyDistSpVec (std::shared_ptr<CommGrid> grid, IT globallen, const std::vector<IT>& indvec, const std::vector<NT> & numvec, bool SumDuplicates = false, bool sorted=false);
IT NnzUntil() const;
FullyDistSpVec<IT,NT> Invert (IT globallen);
template <typename _BinaryOperationIdx, typename _BinaryOperationVal, typename _BinaryOperationDuplicate>
FullyDistSpVec<IT,NT> Invert (IT globallen, _BinaryOperationIdx __binopIdx, _BinaryOperationVal __binopVal, _BinaryOperationDuplicate __binopDuplicate);
template <typename _BinaryOperationIdx, typename _BinaryOperationVal>
FullyDistSpVec<IT,NT> InvertRMA (IT globallen, _BinaryOperationIdx __binopIdx, _BinaryOperationVal __binopVal);
template <typename NT1, typename _UnaryOperation>
void Select (const FullyDistVec<IT,NT1> & denseVec, _UnaryOperation unop);
template <typename _UnaryOperation>
void FilterByVal (FullyDistSpVec<IT,IT> Selector, _UnaryOperation __unop, bool filterByIndex);
template <typename NT1>
void Setminus (const FullyDistSpVec<IT,NT1> & other);
//template <typename NT1, typename _UnaryOperation>
//void Set (FullyDistSpVec<IT,NT1> Selector, _UnaryOperation __unop);
template <typename NT1, typename _UnaryOperation, typename _BinaryOperation>
void SelectApply (const FullyDistVec<IT,NT1> & denseVec, _UnaryOperation __unop, _BinaryOperation __binop);
//! like operator=, but instead of making a deep copy it just steals the contents.
//! Useful for places where the "victim" will be distroyed immediately after the call.
void stealFrom(FullyDistSpVec<IT,NT> & victim);
FullyDistSpVec<IT,NT> & operator=(const FullyDistSpVec< IT,NT > & rhs);
FullyDistSpVec<IT,NT> & operator=(const FullyDistVec< IT,NT > & rhs); // convert from dense
FullyDistSpVec<IT,NT> & operator=(NT fixedval) // assign fixed value
{
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(size_t i=0; i < ind.size(); ++i)
num[i] = fixedval;
return *this;
}
FullyDistSpVec<IT,NT> & operator+=(const FullyDistSpVec<IT,NT> & rhs);
FullyDistSpVec<IT,NT> & operator-=(const FullyDistSpVec<IT,NT> & rhs);
class ScalarReadSaveHandler
{
public:
NT getNoNum(IT index) { return static_cast<NT>(1); }
template <typename c, typename t>
NT read(std::basic_istream<c,t>& is, IT index)
{
NT v;
is >> v;
return v;
}
template <typename c, typename t>
void save(std::basic_ostream<c,t>& os, const NT& v, IT index)
{
os << v;
}
};
template <class HANDLER>
void ParallelWrite(const std::string & filename, bool onebased, HANDLER handler, bool includeindices = true, bool includeheader = false);
void ParallelWrite(const std::string & filename, bool onebased, bool includeindices = true) { ParallelWrite(filename, onebased, ScalarReadSaveHandler(), includeindices); };
template <typename _BinaryOperation>
void ParallelRead (const std::string & filename, bool onebased, _BinaryOperation BinOp);
//! Totally obsolete version that only accepts an ifstream object and ascii files
template <class HANDLER>
std::ifstream& ReadDistribute (std::ifstream& infile, int master, HANDLER handler);
std::ifstream& ReadDistribute (std::ifstream& infile, int master) { return ReadDistribute(infile, master, ScalarReadSaveHandler()); }
template <class HANDLER>
void SaveGathered(std::ofstream& outfile, int master, HANDLER handler, bool printProcSplits = false);
void SaveGathered(std::ofstream& outfile, int master) { SaveGathered(outfile, master, ScalarReadSaveHandler()); }
template <typename NNT> operator FullyDistSpVec< IT,NNT > () const //!< Type conversion operator
{
FullyDistSpVec<IT,NNT> CVT(commGrid);
CVT.ind = std::vector<IT>(ind.begin(), ind.end());
CVT.num = std::vector<NNT>(num.begin(), num.end());
CVT.glen = glen;
return CVT;
}
bool operator==(const FullyDistSpVec<IT,NT> & rhs) const
{
FullyDistVec<IT,NT> v = *this;
FullyDistVec<IT,NT> w = rhs;
return (v == w);
}
void PrintInfo(std::string vecname) const;
void iota(IT globalsize, NT first);
void nziota(NT first);
FullyDistVec<IT,NT> operator() (const FullyDistVec<IT,IT> & ri) const; //!< SpRef (expects ri to be 0-based)
void SetElement (IT indx, NT numx); // element-wise assignment
void DelElement (IT indx); // element-wise deletion
NT operator[](IT indx);
bool WasFound() const { return wasFound; }
//! sort the vector itself, return the permutation vector (0-based)
FullyDistSpVec<IT, IT> sort();
#if __cplusplus > 199711L
template <typename _BinaryOperation = minimum<NT> >
FullyDistSpVec<IT, NT> Uniq(_BinaryOperation __binary_op = _BinaryOperation(), MPI_Op mympiop = MPI_MIN);
#else
template <typename _BinaryOperation >
FullyDistSpVec<IT, NT> Uniq(_BinaryOperation __binary_op, MPI_Op mympiop);
#endif
// Aydin TODO: parallelize with OpenMP
template <typename _UnaryOperation>
FullyDistSpVec<IT,NT> Prune(_UnaryOperation __unary_op, bool inPlace = true) //<! Prune any nonzero entries for which the __unary_op evaluates to true (solely based on value)
{
FullyDistSpVec<IT,NT> temp(commGrid);
IT spsize = ind.size();
for(IT i=0; i< spsize; ++i)
{
if(!(__unary_op(num[i]))) // keep this nonzero
{
temp.ind.push_back(ind[i]);
temp.num.push_back(num[i]);
}
}
if (inPlace)
{
ind.swap(temp.ind);
ind.swap(temp.num);
return FullyDistSpVec<IT,NT>(commGrid); // return blank to match signature
}
else
{
return temp;
}
}
IT getlocnnz() const
{
return ind.size();
}
IT getnnz() const
{
IT totnnz = 0;
IT locnnz = ind.size();
MPI_Allreduce( &locnnz, &totnnz, 1, MPIType<IT>(), MPI_SUM, commGrid->GetWorld());
return totnnz;
}
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::LengthUntil;
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::MyLocLength;
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::MyRowLength;
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::TotalLength;
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::Owner;
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::RowLenUntil;
void setNumToInd()
{
IT offset = LengthUntil();
IT spsize = ind.size();
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(IT i=0; i< spsize; ++i)
num[i] = ind[i] + offset;
}
template <typename _Predicate>
IT Count(_Predicate pred) const; //!< Return the number of elements for which pred is true
template <typename _UnaryOperation>
void Apply(_UnaryOperation __unary_op)
{
//transform(num.begin(), num.end(), num.begin(), __unary_op);
IT spsize = num.size();
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(IT i=0; i < spsize; ++i)
num[i] = __unary_op(num[i]);
}
template <typename _BinaryOperation>
void ApplyInd(_BinaryOperation __binary_op)
{
IT offset = LengthUntil();
IT spsize = ind.size();
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(IT i=0; i < spsize; ++i)
num[i] = __binary_op(num[i], ind[i] + offset);
}
template <typename _BinaryOperation>
NT Reduce(_BinaryOperation __binary_op, NT init) const;
template <typename OUT, typename _BinaryOperation, typename _UnaryOperation>
OUT Reduce(_BinaryOperation __binary_op, OUT default_val, _UnaryOperation __unary_op) const;
void DebugPrint();
std::shared_ptr<CommGrid> getcommgrid() const { return commGrid; }
void Reset();
NT GetLocalElement(IT indx);
void BulkSet(IT inds[], int count);
std::vector<IT> GetLocalInd (){std::vector<IT> rind = ind; return rind;};
std::vector<NT> GetLocalNum (){std::vector<NT> rnum = num; return rnum;};
template <typename _Predicate>
FullyDistVec<IT,IT> FindInds(_Predicate pred) const;
template <typename _Predicate>
FullyDistVec<IT,NT> FindVals(_Predicate pred) const;
protected:
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::glen;
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::commGrid;
private:
std::vector< IT > ind; // ind.size() give the number of nonzeros
std::vector< NT > num;
bool wasFound; // true if the last GetElement operation returned an actual value
template <typename _BinaryOperation>
void SparseCommon(std::vector< std::vector < std::pair<IT,NT> > > & data, _BinaryOperation BinOp);
#if __cplusplus > 199711L
template <typename _BinaryOperation = minimum<NT> >
FullyDistSpVec<IT, NT> UniqAll2All(_BinaryOperation __binary_op = _BinaryOperation(), MPI_Op mympiop = MPI_MIN);
#else
template <typename _BinaryOperation >
FullyDistSpVec<IT, NT> UniqAll2All(_BinaryOperation __binary_op, MPI_Op mympiop);
#endif
template <class IU, class NU>
friend class FullyDistSpVec;
template <class IU, class NU>
friend class FullyDistVec;
template <class IU, class NU, class UDER>
friend class SpParMat;
template <class IU, class NU>
friend class SparseVectorLocalIterator;
template <typename SR, typename IU, typename NUM, typename NUV, typename UDER>
friend FullyDistSpVec<IU,typename promote_trait<NUM,NUV>::T_promote>
SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,NUV> & x );
template <typename SR, typename IU, typename NUM, typename UDER>
friend FullyDistSpVec<IU,typename promote_trait<NUM,IU>::T_promote>
SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IU> & x, bool indexisvalue);
template <typename VT, typename IU, typename UDER> // NoSR version (in BFSFriends.h)
friend FullyDistSpVec<IU,VT> SpMV (const SpParMat<IU,bool,UDER> & A, const FullyDistSpVec<IU,VT> & x, OptBuf<int32_t, VT > & optbuf);
template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER>
friend void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y,bool indexisvalue, OptBuf<int32_t, OVT > & optbuf);
template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER>
friend void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y,bool indexisvalue, OptBuf<int32_t, OVT > & optbuf, PreAllocatedSPA<OVT> & SPA);
template <typename IU, typename NU1, typename NU2>
friend FullyDistSpVec<IU,typename promote_trait<NU1,NU2>::T_promote>
EWiseMult (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , bool exclude, NU2 zero);
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
friend FullyDistSpVec<IU,RET>
EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero, const bool useExtendedBinOp);
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
friend FullyDistSpVec<IU,RET>
EWiseApply_threaded (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero, const bool useExtendedBinOp);
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
friend FullyDistSpVec<IU,RET>
EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistSpVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, bool allowWNulls, NU1 Vzero, NU2 Wzero, const bool allowIntersect, const bool useExtendedBinOp);
template <typename IU>
friend void RandPerm(FullyDistSpVec<IU,IU> & V); // called on an existing object, randomly permutes it
template <typename IU>
friend void RenameVertices(DistEdgeList<IU> & DEL);
//! Helper functions for sparse matrix X sparse vector
// Ariful: I made this an internal function in ParFriends.h
//template <typename SR, typename IU, typename OVT>
//friend void MergeContributions(FullyDistSpVec<IU,OVT> & y, int * & recvcnt, int * & rdispls, int32_t * & recvindbuf, OVT * & recvnumbuf, int rowneighs);
template <typename IU, typename VT>
friend void MergeContributions(FullyDistSpVec<IU,VT> & y, int * & recvcnt, int * & rdispls, int32_t * & recvindbuf, VT * & recvnumbuf, int rowneighs);
template<typename IU, typename NV>
friend void TransposeVector(MPI_Comm & World, const FullyDistSpVec<IU,NV> & x, int32_t & trxlocnz, IU & lenuntil, int32_t * & trxinds, NV * & trxnums, bool indexisvalue);
template <class IU, class NU, class DER, typename _UnaryOperation>
friend SpParMat<IU, bool, DER> PermMat1 (const FullyDistSpVec<IU,NU> & ri, const IU ncol, _UnaryOperation __unop);
};
}
#include "FullyDistSpVec.cpp"
#endif
|
test.c | #include <stdio.h>
#include "../utilities/check.h"
#define N 100
int main()
{
check_offloading();
int a[N], aa[N];
int i, error = 0;
// initialize
for(i=0; i<N; i++)
aa[i] = a[i] = -1;
// offload
#pragma omp target map(tofrom: a[0:100])
{
int k, l;
#pragma omp simd linear(l: 2)
for(k=0; k<N; k++) {
l = 2*k;
a[k] = l;
}
}
// host
for(i=0; i<N; i++)
aa[i] = 2*i;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i])
printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error);
if (error > 10) {
printf("abort\n");
return 0;
}
}
// report
printf("done with %d errors\n", error);
return error;
}
|
PGF.c | /*
* ======================= PGF ====================
* Integrate forward (advection only) by one time step.
* ATMS 502 / CSE 566, Spring 2016
*
* Arguments:
*
* q1 real array values at current step
* q2 real array values at next step
* c real true speed of wave
* dx real grid spacing
* dt real time step
* i1,i2 integers indices bounding array data
* nx integer number of grid points
* advection_type
* char if 'L', linear advection;
* otherwise, nonlinear
*/
#include <stdio.h>
#include <stdlib.h>
void PGF(theta_d,p,u,v,w,rho,dx,dy,dz,tstep,i1,i2,j1,j2,k1,k2,nx,ny,nz,BC_WIDTH)
int i1,i2,j1,j2,k1,k2,nx,ny,nz,BC_WIDTH;
float rho[],p[][ny][nz],theta_d[][ny][nz],u[][ny][nz],v[][j2+2][nz],w[][ny][k2+2],dx,dy,dz,tstep;
{
int i,j,k;
float theta_0 = 300.0;
float g = 9.81;
float Cs = 60;
#pragma omp parallel for shared(u,p) private(i,j,k)
for (i=i1+1;i<=i2;i++) /* u */
for (j=j1;j<=j2;j++)
for (k=k1;k<=k2;k++)
{
u[i][j][k] = u[i][j][k] - tstep/rho[k]*(p[i][j][k]-p[i-1][j][k])/dx;
}
#pragma omp parallel for shared(v,p) private(i,j,k)
for (i=i1;i<=i2;i++) /* v */
for (j=j1;j<=j2;j++)
for (k=k1;k<=k2;k++)
{
v[i][j][k] = v[i][j][k] - tstep/rho[k]*(p[i][j][k]-p[i][j-1][k])/dy;
}
#pragma omp parallel for shared(w,p,theta_d) private(i,j,k)
for (i=i1;i<=i2;i++) /* w */
for (j=j1;j<=j2;j++)
for (k=k1+1;k<=k2;k++)
{
w[i][j][k] = w[i][j][k] - tstep*2.0/(rho[k-1]+rho[k])*(p[i][j][k]-p[i][j][k-1])/dz;
w[i][j][k] = w[i][j][k] + tstep*g*(theta_d[i][j][k]/theta_0+theta_d[i][j][k-1]/theta_0)/2;
}
bc(theta_d,p,u,v,w,i1,i2,j1,j2,k1,k2,nx,ny,nz,BC_WIDTH);
#pragma omp parallel for shared(p,u,v,w) private(i,j,k)
for (i=i1;i<=i2;i++)
for (j=j1;j<=j2;j++)
for (k=k1;k<=k2;k++)
{
p[i][j][k] = p[i][j][k] - tstep*Cs*Cs*rho[k]*(u[i+1][j][k]-u[i][j][k])/dx;
p[i][j][k] = p[i][j][k] - tstep*Cs*Cs*rho[k]*(v[i][j+1][k]-v[i][j][k])/dy;
p[i][j][k] = p[i][j][k] - tstep*Cs*Cs*((rho[k]+rho[k+1])/2*w[i][j][k+1]-(rho[k-1]+rho[k])/2*w[i][j][k])/dz;
}
return;
}
|
convolution_3x3_int8.h | // BUG1989 is pleased to support the open source community by supporting ncnn available.
//
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const signed char* kernel = _kernel;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
out0.fill(0);
const signed char* kernel0 = (const signed char*)kernel + p * inch * 9;
for (int q = 0; q < inch; q++)
{
int* outptr0 = out0;
const signed char* img0 = bottom_blob.channel(q);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w * 2;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
int sum0 = 0;
sum0 += (int)r0[0] * kernel0[0];
sum0 += (int)r0[1] * kernel0[1];
sum0 += (int)r0[2] * kernel0[2];
sum0 += (int)r1[0] * kernel0[3];
sum0 += (int)r1[1] * kernel0[4];
sum0 += (int)r1[2] * kernel0[5];
sum0 += (int)r2[0] * kernel0[6];
sum0 += (int)r2[1] * kernel0[7];
sum0 += (int)r2[2] * kernel0[8];
*outptr0 += sum0;
r0++;
r1++;
r2++;
outptr0++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
kernel0 += 9;
}
}
}
static void conv3x3s1_winograd23_transform_kernel_int8_sse(const Mat& kernel, Mat& kernel_tm, int inch, int outch, const Option& opt)
{
kernel_tm.create(4 * 4, inch, outch, (size_t)2u);
// G
const short ktm[4][3] = {
{2, 0, 0},
{1, 1, 1},
{1, -1, 1},
{0, 0, 2}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9;
short* kernel_tm0 = kernel_tm.channel(p).row<short>(q);
// transform kernel
const signed char* k0 = kernel0;
const signed char* k1 = kernel0 + 3;
const signed char* k2 = kernel0 + 6;
// h
short tmp[4][3];
for (int i = 0; i < 4; i++)
{
tmp[i][0] = (short)k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = (short)k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = (short)k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 4; j++)
{
short* tmpp = &tmp[j][0];
for (int i = 0; i < 4; i++)
{
kernel_tm0[j * 4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
}
static void conv3x3s1_winograd23_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 2n+2, winograd F(2,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 1) / 2 * 2;
outh = (outh + 1) / 2 * 2;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm / 4; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 4;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(4 * 4, tiles, inch, 2u, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {1.0f, 0.0f, -1.0f, 0.0f},
// {0.0f, 1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 0.00f, 1.0f}
// };
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const signed char* img = bottom_blob_bordered.channel(q);
short* out_tm0 = bottom_blob_tm.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const signed char* r0 = img + w * j * 2;
const signed char* r1 = r0 + w;
const signed char* r2 = r1 + w;
const signed char* r3 = r2 + w;
for (int i = 0; i < nRowBlocks; i++)
{
short d0[4], d1[4], d2[4], d3[4];
short w0[4], w1[4], w2[4], w3[4];
short t0[4], t1[4], t2[4], t3[4];
// load
for (int n = 0; n < 4; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
}
// w = B_t * d
for (int n = 0; n < 4; n++)
{
w0[n] = d0[n] - d2[n];
w1[n] = d1[n] + d2[n];
w2[n] = d2[n] - d1[n];
w3[n] = d3[n] - d1[n];
}
// transpose d to d_t
{
t0[0] = w0[0];
t1[0] = w0[1];
t2[0] = w0[2];
t3[0] = w0[3];
t0[1] = w1[0];
t1[1] = w1[1];
t2[1] = w1[2];
t3[1] = w1[3];
t0[2] = w2[0];
t1[2] = w2[1];
t2[2] = w2[2];
t3[2] = w2[3];
t0[3] = w3[0];
t1[3] = w3[1];
t2[3] = w3[2];
t3[3] = w3[3];
}
// U = B_t * d_t
for (int n = 0; n < 4; n++)
{
d0[n] = t0[n] - t2[n];
d1[n] = t1[n] + t2[n];
d2[n] = t2[n] - t1[n];
d3[n] = t3[n] - t1[n];
}
// save to out_tm
for (int n = 0; n < 4; n++)
{
out_tm0[n] = d0[n];
out_tm0[n + 4] = d1[n];
out_tm0[n + 8] = d2[n];
out_tm0[n + 12] = d3[n];
}
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
out_tm0 += 16;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm / 4; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 4;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(16, tiles, outch, 4u, opt.workspace_allocator);
int nn_outch = outch >> 2;
int remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
Mat out0_tm = top_blob_tm.channel(p);
Mat out1_tm = top_blob_tm.channel(p + 1);
Mat out2_tm = top_blob_tm.channel(p + 2);
Mat out3_tm = top_blob_tm.channel(p + 3);
const Mat kernel0_tm = kernel_tm.channel(p);
const Mat kernel1_tm = kernel_tm.channel(p + 1);
const Mat kernel2_tm = kernel_tm.channel(p + 2);
const Mat kernel3_tm = kernel_tm.channel(p + 3);
for (int i = 0; i < tiles; i++)
{
int* output0_tm = out0_tm.row<int>(i);
int* output1_tm = out1_tm.row<int>(i);
int* output2_tm = out2_tm.row<int>(i);
int* output3_tm = out3_tm.row<int>(i);
int sum0[16] = {0};
int sum1[16] = {0};
int sum2[16] = {0};
int sum3[16] = {0};
int q = 0;
for (; q + 3 < inch; q += 4)
{
const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
const short* r1 = bottom_blob_tm.channel(q + 1).row<short>(i);
const short* r2 = bottom_blob_tm.channel(q + 2).row<short>(i);
const short* r3 = bottom_blob_tm.channel(q + 3).row<short>(i);
const short* k0 = kernel0_tm.row<short>(q);
const short* k1 = kernel1_tm.row<short>(q);
const short* k2 = kernel2_tm.row<short>(q);
const short* k3 = kernel3_tm.row<short>(q);
for (int n = 0; n < 16; n++)
{
sum0[n] += (int)r0[n] * k0[n];
k0 += 16;
sum0[n] += (int)r1[n] * k0[n];
k0 += 16;
sum0[n] += (int)r2[n] * k0[n];
k0 += 16;
sum0[n] += (int)r3[n] * k0[n];
k0 -= 16 * 3;
sum1[n] += (int)r0[n] * k1[n];
k1 += 16;
sum1[n] += (int)r1[n] * k1[n];
k1 += 16;
sum1[n] += (int)r2[n] * k1[n];
k1 += 16;
sum1[n] += (int)r3[n] * k1[n];
k1 -= 16 * 3;
sum2[n] += (int)r0[n] * k2[n];
k2 += 16;
sum2[n] += (int)r1[n] * k2[n];
k2 += 16;
sum2[n] += (int)r2[n] * k2[n];
k2 += 16;
sum2[n] += (int)r3[n] * k2[n];
k2 -= 16 * 3;
sum3[n] += (int)r0[n] * k3[n];
k3 += 16;
sum3[n] += (int)r1[n] * k3[n];
k3 += 16;
sum3[n] += (int)r2[n] * k3[n];
k3 += 16;
sum3[n] += (int)r3[n] * k3[n];
k3 -= 16 * 3;
}
}
for (; q < inch; q++)
{
const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
const short* k0 = kernel0_tm.row<short>(q);
const short* k1 = kernel1_tm.row<short>(q);
const short* k2 = kernel2_tm.row<short>(q);
const short* k3 = kernel3_tm.row<short>(q);
for (int n = 0; n < 16; n++)
{
sum0[n] += (int)r0[n] * k0[n];
sum1[n] += (int)r0[n] * k1[n];
sum2[n] += (int)r0[n] * k2[n];
sum3[n] += (int)r0[n] * k3[n];
}
}
for (int n = 0; n < 16; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
Mat out0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int i = 0; i < tiles; i++)
{
int* output0_tm = out0_tm.row<int>(i);
int sum0[16] = {0};
int q = 0;
for (; q + 3 < inch; q += 4)
{
const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
const short* r1 = bottom_blob_tm.channel(q + 1).row<short>(i);
const short* r2 = bottom_blob_tm.channel(q + 2).row<short>(i);
const short* r3 = bottom_blob_tm.channel(q + 3).row<short>(i);
const short* k0 = kernel0_tm.row<short>(q);
const short* k1 = kernel0_tm.row<short>(q + 1);
const short* k2 = kernel0_tm.row<short>(q + 2);
const short* k3 = kernel0_tm.row<short>(q + 3);
for (int n = 0; n < 16; n++)
{
sum0[n] += (int)r0[n] * k0[n];
sum0[n] += (int)r1[n] * k1[n];
sum0[n] += (int)r2[n] * k2[n];
sum0[n] += (int)r3[n] * k3[n];
}
}
for (; q < inch; q++)
{
const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
const short* k0 = kernel0_tm.row<short>(q);
for (int n = 0; n < 16; n++)
{
sum0[n] += (int)r0[n] * k0[n];
}
}
for (int n = 0; n < 16; n++)
{
output0_tm[n] = sum0[n];
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
{
// AT
// const float itm[2][4] = {
// {1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 1.0f}
// };
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm / 4; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out_tm = top_blob_tm.channel(p);
Mat out = top_blob_bordered.channel(p);
for (int j = 0; j < nColBlocks; j++)
{
int* outRow0 = out.row<int>(j * 2);
int* outRow1 = out.row<int>(j * 2 + 1);
for (int i = 0; i < nRowBlocks; i++)
{
int* out_tile = out_tm.row<int>(j * nRowBlocks + i);
int s0[4], s1[4], s2[4], s3[4];
int w0[4], w1[4];
int d0[2], d1[2], d2[2], d3[2];
int o0[2], o1[2];
// load
for (int n = 0; n < 4; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n + 4];
s2[n] = out_tile[n + 8];
s3[n] = out_tile[n + 12];
}
// w = A_T * W
for (int n = 0; n < 4; n++)
{
w0[n] = s0[n] + s1[n] + s2[n];
w1[n] = s1[n] - s2[n] + s3[n];
}
// transpose w to w_t
{
d0[0] = w0[0];
d0[1] = w1[0];
d1[0] = w0[1];
d1[1] = w1[1];
d2[0] = w0[2];
d2[1] = w1[2];
d3[0] = w0[3];
d3[1] = w1[3];
}
// Y = A_T * w_t
for (int n = 0; n < 2; n++)
{
o0[n] = d0[n] + d1[n] + d2[n];
o1[n] = d1[n] - d2[n] + d3[n];
}
// save to top blob tm,why right 2,because the G' = G*2
outRow0[0] = o0[0] >> 2;
outRow0[1] = o0[1] >> 2;
outRow1[0] = o1[0] >> 2;
outRow1[1] = o1[1] >> 2;
outRow0 += 2;
outRow1 += 2;
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd43_transform_kernel_int8_sse(const Mat& kernel, Mat& kernel_tm, int inch, int outch, const Option& opt)
{
kernel_tm.create(6 * 6, inch, outch, (size_t)2u);
// G
// const float ktm[6][3] = {
// { 1.0f/4, 0.0f, 0.0f},
// { -1.0f/6, -1.0f/6, -1.0f/6},
// { -1.0f/6, 1.0f/6, -1.0f/6},
// { 1.0f/24, 1.0f/12, 1.0f/6},
// { 1.0f/24, -1.0f/12, 1.0f/6},
// { 0.0f, 0.0f, 1.0f}
// };
const short ktm[6][3] = {
{6, 0, 0},
{-4, -4, -4},
{-4, 4, -4},
{1, 2, 4},
{1, -2, 4},
{0, 0, 24}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9;
short* kernel_tm0 = kernel_tm.channel(p).row<short>(q);
// transform kernel
const signed char* k0 = kernel0;
const signed char* k1 = kernel0 + 3;
const signed char* k2 = kernel0 + 6;
// h
short tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
short* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
}
static void conv3x3s1_winograd43_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2, winograd F(4,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(6 * 6, tiles, inch, 2u, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const signed char* img = bottom_blob_bordered.channel(q);
short* out_tm0 = bottom_blob_tm.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const signed char* r0 = img + w * j * 4;
const signed char* r1 = r0 + w;
const signed char* r2 = r1 + w;
const signed char* r3 = r2 + w;
const signed char* r4 = r3 + w;
const signed char* r5 = r4 + w;
for (int i = 0; i < nRowBlocks; i++)
{
short d0[6], d1[6], d2[6], d3[6], d4[6], d5[6];
short w0[6], w1[6], w2[6], w3[6], w4[6], w5[6];
short t0[6], t1[6], t2[6], t3[6], t4[6], t5[6];
// load
for (int n = 0; n < 6; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
d4[n] = r4[n];
d5[n] = r5[n];
}
// w = B_t * d
for (int n = 0; n < 6; n++)
{
w0[n] = 4 * d0[n] - 5 * d2[n] + d4[n];
w1[n] = -4 * d1[n] - 4 * d2[n] + d3[n] + d4[n];
w2[n] = 4 * d1[n] - 4 * d2[n] - d3[n] + d4[n];
w3[n] = -2 * d1[n] - d2[n] + 2 * d3[n] + d4[n];
w4[n] = 2 * d1[n] - d2[n] - 2 * d3[n] + d4[n];
w5[n] = 4 * d1[n] - 5 * d3[n] + d5[n];
}
// transpose d to d_t
{
t0[0] = w0[0];
t1[0] = w0[1];
t2[0] = w0[2];
t3[0] = w0[3];
t4[0] = w0[4];
t5[0] = w0[5];
t0[1] = w1[0];
t1[1] = w1[1];
t2[1] = w1[2];
t3[1] = w1[3];
t4[1] = w1[4];
t5[1] = w1[5];
t0[2] = w2[0];
t1[2] = w2[1];
t2[2] = w2[2];
t3[2] = w2[3];
t4[2] = w2[4];
t5[2] = w2[5];
t0[3] = w3[0];
t1[3] = w3[1];
t2[3] = w3[2];
t3[3] = w3[3];
t4[3] = w3[4];
t5[3] = w3[5];
t0[4] = w4[0];
t1[4] = w4[1];
t2[4] = w4[2];
t3[4] = w4[3];
t4[4] = w4[4];
t5[4] = w4[5];
t0[5] = w5[0];
t1[5] = w5[1];
t2[5] = w5[2];
t3[5] = w5[3];
t4[5] = w5[4];
t5[5] = w5[5];
}
// d = B_t * d_t
for (int n = 0; n < 6; n++)
{
d0[n] = 4 * t0[n] - 5 * t2[n] + t4[n];
d1[n] = -4 * t1[n] - 4 * t2[n] + t3[n] + t4[n];
d2[n] = 4 * t1[n] - 4 * t2[n] - t3[n] + t4[n];
d3[n] = -2 * t1[n] - t2[n] + 2 * t3[n] + t4[n];
d4[n] = 2 * t1[n] - t2[n] - 2 * t3[n] + t4[n];
d5[n] = 4 * t1[n] - 5 * t3[n] + t5[n];
}
// save to out_tm
for (int n = 0; n < 6; n++)
{
out_tm0[n] = d0[n];
out_tm0[n + 6] = d1[n];
out_tm0[n + 12] = d2[n];
out_tm0[n + 18] = d3[n];
out_tm0[n + 24] = d4[n];
out_tm0[n + 30] = d5[n];
}
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
out_tm0 += 36;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(36, tiles, outch, 4u, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int i = 0; i < tiles; i++)
{
int* output0_tm = out0_tm.row<int>(i);
int sum0[36] = {0};
for (int q = 0; q < inch; q++)
{
const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
const short* k0 = kernel0_tm.row<short>(q);
for (int n = 0; n < 36; n++)
{
sum0[n] += (int)r0[n] * k0[n];
}
}
for (int n = 0; n < 36; n++)
{
output0_tm[n] = sum0[n];
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
{
// AT
// const float itm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + r01 + r02 + r03 + r04
// 1 = r01 - r02 + 2 * (r03 - r04)
// 2 = r01 + r02 + 4 * (r03 + r04)
// 3 = r01 - r02 + 8 * (r03 - r04) + r05
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out_tm = top_blob_tm.channel(p);
Mat out = top_blob_bordered.channel(p);
for (int j = 0; j < nColBlocks; j++)
{
int* outRow0 = out.row<int>(j * 4);
int* outRow1 = out.row<int>(j * 4 + 1);
int* outRow2 = out.row<int>(j * 4 + 2);
int* outRow3 = out.row<int>(j * 4 + 3);
for (int i = 0; i < nRowBlocks; i++)
{
int* out_tile = out_tm.row<int>(j * nRowBlocks + i);
int s0[6], s1[6], s2[6], s3[6], s4[6], s5[6];
int w0[6], w1[6], w2[6], w3[6];
int d0[4], d1[4], d2[4], d3[4], d4[4], d5[4];
int o0[4], o1[4], o2[4], o3[4];
// load
for (int n = 0; n < 6; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n + 6];
s2[n] = out_tile[n + 12];
s3[n] = out_tile[n + 18];
s4[n] = out_tile[n + 24];
s5[n] = out_tile[n + 30];
}
// w = A_T * W
for (int n = 0; n < 6; n++)
{
w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n];
w1[n] = s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n];
w2[n] = s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n];
w3[n] = s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + s5[n];
}
// transpose w to w_t
{
d0[0] = w0[0];
d0[1] = w1[0];
d0[2] = w2[0];
d0[3] = w3[0];
d1[0] = w0[1];
d1[1] = w1[1];
d1[2] = w2[1];
d1[3] = w3[1];
d2[0] = w0[2];
d2[1] = w1[2];
d2[2] = w2[2];
d2[3] = w3[2];
d3[0] = w0[3];
d3[1] = w1[3];
d3[2] = w2[3];
d3[3] = w3[3];
d4[0] = w0[4];
d4[1] = w1[4];
d4[2] = w2[4];
d4[3] = w3[4];
d5[0] = w0[5];
d5[1] = w1[5];
d5[2] = w2[5];
d5[3] = w3[5];
}
// Y = A_T * w_t
for (int n = 0; n < 4; n++)
{
o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n];
o1[n] = d1[n] - d2[n] + 2 * d3[n] - 2 * d4[n];
o2[n] = d1[n] + d2[n] + 4 * d3[n] + 4 * d4[n];
o3[n] = d1[n] - d2[n] + 8 * d3[n] - 8 * d4[n] + d5[n];
}
// save to top blob tm
for (int n = 0; n < 4; n++)
{
outRow0[n] = o0[n] / 576;
outRow1[n] = o1[n] / 576;
outRow2[n] = o2[n] / 576;
outRow3[n] = o3[n] / 576;
}
outRow0 += 4;
outRow1 += 4;
outRow2 += 4;
outRow3 += 4;
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s2_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const signed char* kernel = _kernel;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
out0.fill(0);
const signed char* kernel0 = (const signed char*)kernel + p * inch * 9;
for (int q = 0; q < inch; q++)
{
int* outptr0 = out0;
const signed char* img0 = bottom_blob.channel(q);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w * 2;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
int sum0 = 0;
sum0 += (int)r0[0] * kernel0[0];
sum0 += (int)r0[1] * kernel0[1];
sum0 += (int)r0[2] * kernel0[2];
sum0 += (int)r1[0] * kernel0[3];
sum0 += (int)r1[1] * kernel0[4];
sum0 += (int)r1[2] * kernel0[5];
sum0 += (int)r2[0] * kernel0[6];
sum0 += (int)r2[1] * kernel0[7];
sum0 += (int)r2[2] * kernel0[8];
*outptr0 += sum0;
r0 += 2;
r1 += 2;
r2 += 2;
outptr0++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
kernel0 += 9;
}
}
}
|
buggy_version.c |
int array[1000];
#pragma omp parallel
{
bool flag = true;
while(flag){
int x = rand()%1000;
#pragma omp critical
{
array[x] = some_function(array[x]);
if (some_condition(array[x])){
flag = false;
}
}
}
}
|
GB_subassign_07.c | //------------------------------------------------------------------------------
// GB_subassign_07: C(I,J)<M> += scalar ; no S
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Method 07: C(I,J)<M> += scalar ; no S
// M: present
// Mask_comp: false
// C_replace: false
// accum: present
// A: scalar
// S: none
#include "GB_subassign_methods.h"
GrB_Info GB_subassign_07
(
GrB_Matrix C,
// input:
const GrB_Index *I,
const int64_t nI,
const int Ikind,
const int64_t Icolon [3],
const GrB_Index *J,
const int64_t nJ,
const int Jkind,
const int64_t Jcolon [3],
const GrB_Matrix M,
const GrB_BinaryOp accum,
const void *scalar,
const GrB_Type atype,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
GB_GET_C ;
int64_t zorig = C->nzombies ;
const bool C_is_hyper = C->is_hyper ;
const int64_t *restrict Ch = C->h ;
const int64_t *restrict Cp = C->p ;
const int64_t Cnvec = C->nvec ;
const int64_t cvlen = C->vlen ;
GB_GET_MASK ;
GB_GET_ACCUM_SCALAR ;
//--------------------------------------------------------------------------
// Method 07: C(I,J)<M> += scalar ; no S
//--------------------------------------------------------------------------
// Time: Close to Optimal: same as Method 05.
// Method 05 and Method 07 are very similar. Also compare with Method 06n.
//--------------------------------------------------------------------------
// Parallel: slice M into coarse/fine tasks (Method 05, 06n, 07)
//--------------------------------------------------------------------------
GB_SUBASSIGN_ONE_SLICE (M) ;
//--------------------------------------------------------------------------
// phase 1: create zombies, update entries, and count pending tuples
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (int taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
GB_GET_TASK_DESCRIPTOR_PHASE1 ;
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// get j, the kth vector of M
//------------------------------------------------------------------
int64_t j = (Mh == NULL) ? k : Mh [k] ;
GB_GET_VECTOR (pM, pM_end, pA, pA_end, Mp, k) ;
int64_t mjnz = pM_end - pM ;
if (mjnz == 0) continue ;
//------------------------------------------------------------------
// get jC, the corresponding vector of C
//------------------------------------------------------------------
GB_GET_jC ;
int64_t cjnz = pC_end - pC_start ;
bool cjdense = (cjnz == cvlen) ;
//------------------------------------------------------------------
// C(I,jC)<M(:,j)> += scalar ; no S
//------------------------------------------------------------------
if (cjdense)
{
//--------------------------------------------------------------
// C(:,jC) is dense so the binary search of C is not needed
//--------------------------------------------------------------
for ( ; pM < pM_end ; pM++)
{
//----------------------------------------------------------
// consider the entry M(iA,j)
//----------------------------------------------------------
bool mij ;
cast_M (&mij, Mx +(pM*msize), 0) ;
//----------------------------------------------------------
// update C(iC,jC), but only if M(iA,j) allows it
//----------------------------------------------------------
if (mij)
{
int64_t iA = Mi [pM] ;
GB_iC_DENSE_LOOKUP ;
// ----[C A 1] or [X A 1]-------------------------------
// [C A 1]: action: ( =C+A ): apply accum
// [X A 1]: action: ( undelete ): zombie lives
GB_withaccum_C_A_1_scalar ;
}
}
}
else
{
//--------------------------------------------------------------
// C(:,jC) is sparse; use binary search for C
//--------------------------------------------------------------
for ( ; pM < pM_end ; pM++)
{
//----------------------------------------------------------
// consider the entry M(iA,j)
//----------------------------------------------------------
bool mij ;
cast_M (&mij, Mx +(pM*msize), 0) ;
//----------------------------------------------------------
// update C(iC,jC), but only if M(iA,j) allows it
//----------------------------------------------------------
if (mij)
{
int64_t iA = Mi [pM] ;
GB_iC_BINARY_SEARCH ;
if (cij_found)
{
// ----[C A 1] or [X A 1]---------------------------
// [C A 1]: action: ( =C+A ): apply accum
// [X A 1]: action: ( undelete ): zombie lives
GB_withaccum_C_A_1_scalar ;
}
else
{
// ----[. A 1]--------------------------------------
// [. A 1]: action: ( insert )
task_pending++ ;
}
}
}
}
}
GB_PHASE1_TASK_WRAPUP ;
}
//--------------------------------------------------------------------------
// phase 2: insert pending tuples
//--------------------------------------------------------------------------
GB_PENDING_CUMSUM ;
zorig = C->nzombies ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (int taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
GB_GET_TASK_DESCRIPTOR_PHASE2 ;
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// get j, the kth vector of M
//------------------------------------------------------------------
int64_t j = (Mh == NULL) ? k : Mh [k] ;
GB_GET_VECTOR (pM, pM_end, pA, pA_end, Mp, k) ;
int64_t mjnz = pM_end - pM ;
if (mjnz == 0) continue ;
//------------------------------------------------------------------
// get jC, the corresponding vector of C
//------------------------------------------------------------------
GB_GET_jC ;
bool cjdense = ((pC_end - pC_start) == cvlen) ;
//------------------------------------------------------------------
// C(I,jC)<M(:,j)> += scalar ; no S
//------------------------------------------------------------------
if (!cjdense)
{
//--------------------------------------------------------------
// C(:,jC) is sparse; use binary search for C
//--------------------------------------------------------------
for ( ; pM < pM_end ; pM++)
{
//----------------------------------------------------------
// consider the entry M(iA,j)
//----------------------------------------------------------
bool mij ;
cast_M (&mij, Mx +(pM*msize), 0) ;
//----------------------------------------------------------
// update C(iC,jC), but only if M(iA,j) allows it
//----------------------------------------------------------
if (mij)
{
int64_t iA = Mi [pM] ;
GB_iC_BINARY_SEARCH ;
if (!cij_found)
{
// ----[. A 1]--------------------------------------
// [. A 1]: action: ( insert )
GB_PENDING_INSERT (scalar) ;
}
}
}
}
}
GB_PHASE2_TASK_WRAPUP ;
}
//--------------------------------------------------------------------------
// finalize the matrix and return result
//--------------------------------------------------------------------------
GB_SUBASSIGN_WRAPUP ;
}
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 8;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,3);t1++) {
lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6));
ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(max(0,ceild(3*t1,2)),ceild(24*t2-Nz+5,8)),3*t1-3*t2+1);t3<=min(min(min(floord(4*Nt+Ny-9,8),floord(12*t1+Ny+15,8)),floord(24*t2+Ny+11,8)),floord(24*t1-24*t2+Nz+Ny+13,8));t3++) {
for (t4=max(max(max(max(0,ceild(3*t1-3*t2-2,4)),ceild(3*t1-6,8)),ceild(24*t2-Nz-19,32)),ceild(8*t3-Ny-19,32));t4<=min(min(min(min(floord(4*Nt+Nx-9,32),floord(12*t1+Nx+15,32)),floord(24*t2+Nx+11,32)),floord(8*t3+Nx-5,32)),floord(24*t1-24*t2+Nz+Nx+13,32));t4++) {
for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(32*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),2*t3),Nt-1),3*t1+5),6*t2+4),8*t4+6);t5++) {
for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) {
lbv=max(32*t4,4*t5+4);
ubv=min(32*t4+31,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
GB_binop__bget_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bget_int8)
// A.*B function (eWiseMult): GB (_AemultB_01__bget_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__bget_int8)
// A.*B function (eWiseMult): GB (_AemultB_03__bget_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bget_int8)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bget_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__bget_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bget_int8)
// C=scalar+B GB (_bind1st__bget_int8)
// C=scalar+B' GB (_bind1st_tran__bget_int8)
// C=A+scalar GB (_bind2nd__bget_int8)
// C=A'+scalar GB (_bind2nd_tran__bget_int8)
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = GB_BITGET (aij, bij, int8_t, 8)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITGET (x, y, int8_t, 8) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BGET || GxB_NO_INT8 || GxB_NO_BGET_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bget_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bget_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bget_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bget_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bget_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bget_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bget_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bget_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bget_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITGET (x, bij, int8_t, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bget_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITGET (aij, y, int8_t, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITGET (x, aij, int8_t, 8) ; \
}
GrB_Info GB (_bind1st_tran__bget_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITGET (aij, y, int8_t, 8) ; \
}
GrB_Info GB (_bind2nd_tran__bget_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
6297.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "3mm.h"
/* Array initialization. */
static
void init_array(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nk; j++)
A[i][j] = ((DATA_TYPE) i*j) / ni;
for (i = 0; i < nk; i++)
for (j = 0; j < nj; j++)
B[i][j] = ((DATA_TYPE) i*(j+1)) / nj;
for (i = 0; i < nj; i++)
for (j = 0; j < nm; j++)
C[i][j] = ((DATA_TYPE) i*(j+3)) / nl;
for (i = 0; i < nm; i++)
for (j = 0; j < nl; j++)
D[i][j] = ((DATA_TYPE) i*(j+2)) / nk;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nl,
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nl; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]);
if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_3mm(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl),
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j, k;
#pragma scop
#pragma omp parallel private (i, j, k) num_threads(#P11)
{
/* E := A*B */
#pragma omp parallel for simd
for (i = 0; i < _PB_NI; i++)
{
#pragma omp target teams distribute thread_limit(64)
for (j = 0; j < _PB_NJ; j++)
{
E[i][j] = 0;
for (k = 0; k < _PB_NK; ++k)
E[i][j] += A[i][k] * B[k][j];
}
}
/* F := C*D */
#pragma omp parallel for simd
for (i = 0; i < _PB_NJ; i++)
{
#pragma omp target teams distribute thread_limit(64)
for (j = 0; j < _PB_NL; j++)
{
F[i][j] = 0;
for (k = 0; k < _PB_NM; ++k)
F[i][j] += C[i][k] * D[k][j];
}
}
/* G := E*F */
#pragma omp parallel for simd
for (i = 0; i < _PB_NI; i++)
{
#pragma omp target teams distribute thread_limit(64)
for (j = 0; j < _PB_NL; j++)
{
G[i][j] = 0;
for (k = 0; k < _PB_NJ; ++k)
G[i][j] += E[i][k] * F[k][j];
}
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
int nk = NK;
int nl = NL;
int nm = NM;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj);
POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl);
POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm);
POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl);
POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl);
/* Initialize array(s). */
init_array (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_3mm (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(E),
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(F),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D),
POLYBENCH_ARRAY(G));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(E);
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
POLYBENCH_FREE_ARRAY(F);
POLYBENCH_FREE_ARRAY(C);
POLYBENCH_FREE_ARRAY(D);
POLYBENCH_FREE_ARRAY(G);
return 0;
}
|
GB_unop__log_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__log_fc32_fc32)
// op(A') function: GB (_unop_tran__log_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = clogf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = clogf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = clogf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOG || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__log_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = clogf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = clogf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__log_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
first.c | int main()
{
const int N = 10;
int i;
#pragma omp parallel for
for(i = 0; i < N; i++)
{
printf("I am counter %d\n", i);
}
}
|
functions.c | f64 gaussian(f64 x, f64 mu, f64 sigma) {
return 1.0/(sigma*sqrt(2.0*M_PI)) * exp(-(x-mu)*(x-mu)/(2*sigma*sigma));
}
void f64_normalize(f64* out, f64* data, u32 size) {
f64 sum = 0.0;
#pragma omp parallel for shared(data) reduction(+: sum)
for (u32 i = 0; i < size; ++i) {
f64 absval = fabs(data[i]);
sum += absval*absval;
}
f64 scaling = 1.0/sqrt(sum);
#pragma omp parallel for
for (u32 i = 0; i < size; ++i) {
out[i] = data[i] * scaling;
}
}
static inline f128 factorial_128(const u32 n) {
f128 prod = 1.0;
f128 current_value = (f128) n;
while (current_value > 0.0) {
prod *= current_value;
current_value -= 1.0;
}
return prod;
}
static inline f128 double_factorial_positive_128(const u32 n) {
f128 prod = 1.0;
f128 current_value = (f128) n;
while (current_value > 0.0) {
prod *= current_value;
current_value -= 2.0;
}
return prod;
}
static inline f128 double_factorial_128(const i32 n) {
if (n < 0) {
assert(n % 2 != 0);
f128 n_double_fact = double_factorial_positive_128((u32)(-n));
f128 sign = (((n-1)/2) % 2 == 0) ? 1.0 : -1.0;
return sign*n/n_double_fact;
}
return double_factorial_positive_128((u32)n);
}
static inline u64 n_choose_k(const u32 n, const u32 k) {
f128 n_fact = factorial_128(n);
f128 k_fact = factorial_128(k);
f128 n_minus_k_fact = factorial_128(n-k);
f128 ans = (n_fact/n_minus_k_fact) * 1.0/k_fact;
return lroundl(ans);
}
|
image.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% IIIII M M AAA GGGG EEEEE %
% I MM MM A A G E %
% I M M M AAAAA G GG EEE %
% I M M A A G G E %
% IIIII M M A A GGGG EEEEE %
% %
% %
% MagickCore Image Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/animate.h"
#include "magick/artifact.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/compress.h"
#include "magick/constitute.h"
#include "magick/delegate.h"
#include "magick/deprecate.h"
#include "magick/display.h"
#include "magick/draw.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/histogram.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/magic.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/memory-private.h"
#include "magick/module.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/resource_.h"
#include "magick/segment.h"
#include "magick/semaphore.h"
#include "magick/signature-private.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/timer.h"
#include "magick/token.h"
#include "magick/token-private.h"
#include "magick/utility.h"
#include "magick/version.h"
#include "magick/xwindow-private.h"
/*
Constant declaration.
*/
const char
BackgroundColor[] = "#ffffff", /* white */
BorderColor[] = "#dfdfdf", /* gray */
DefaultTileFrame[] = "15x15+3+3",
DefaultTileGeometry[] = "120x120+4+3>",
DefaultTileLabel[] = "%f\n%G\n%b",
ForegroundColor[] = "#000", /* black */
LoadImageTag[] = "Load/Image",
LoadImagesTag[] = "Load/Images",
MatteColor[] = "#bdbdbd", /* gray */
PSDensityGeometry[] = "72.0x72.0",
PSPageGeometry[] = "612x792",
SaveImageTag[] = "Save/Image",
SaveImagesTag[] = "Save/Images",
TransparentColor[] = "#00000000"; /* transparent black */
const double
DefaultResolution = 72.0;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImage() returns a pointer to an image structure initialized to
% default values.
%
% The format of the AcquireImage method is:
%
% Image *AcquireImage(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
*/
MagickExport Image *AcquireImage(const ImageInfo *image_info)
{
const char
*option;
Image
*image;
MagickStatusType
flags;
/*
Allocate image structure.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
image=(Image *) AcquireCriticalMemory(sizeof(*image));
(void) memset(image,0,sizeof(*image));
/*
Initialize Image structure.
*/
(void) CopyMagickString(image->magick,"MIFF",MaxTextExtent);
image->storage_class=DirectClass;
image->depth=MAGICKCORE_QUANTUM_DEPTH;
image->colorspace=sRGBColorspace;
image->rendering_intent=PerceptualIntent;
image->gamma=1.000f/2.200f;
image->chromaticity.red_primary.x=0.6400f;
image->chromaticity.red_primary.y=0.3300f;
image->chromaticity.red_primary.z=0.0300f;
image->chromaticity.green_primary.x=0.3000f;
image->chromaticity.green_primary.y=0.6000f;
image->chromaticity.green_primary.z=0.1000f;
image->chromaticity.blue_primary.x=0.1500f;
image->chromaticity.blue_primary.y=0.0600f;
image->chromaticity.blue_primary.z=0.7900f;
image->chromaticity.white_point.x=0.3127f;
image->chromaticity.white_point.y=0.3290f;
image->chromaticity.white_point.z=0.3583f;
image->interlace=NoInterlace;
image->ticks_per_second=UndefinedTicksPerSecond;
image->compose=OverCompositeOp;
image->blur=1.0;
InitializeExceptionInfo(&image->exception);
(void) QueryColorDatabase(BackgroundColor,&image->background_color,
&image->exception);
(void) QueryColorDatabase(BorderColor,&image->border_color,&image->exception);
(void) QueryColorDatabase(MatteColor,&image->matte_color,&image->exception);
(void) QueryColorDatabase(TransparentColor,&image->transparent_color,
&image->exception);
GetTimerInfo(&image->timer);
image->ping=MagickFalse;
image->cache=AcquirePixelCache(0);
image->blob=CloneBlobInfo((BlobInfo *) NULL);
image->timestamp=time((time_t *) NULL);
image->debug=IsEventLogging();
image->reference_count=1;
image->semaphore=AllocateSemaphoreInfo();
image->signature=MagickCoreSignature;
if (image_info == (ImageInfo *) NULL)
return(image);
/*
Transfer image info.
*/
SetBlobExempt(image,image_info->file != (FILE *) NULL ? MagickTrue :
MagickFalse);
(void) CopyMagickString(image->filename,image_info->filename,MaxTextExtent);
(void) CopyMagickString(image->magick_filename,image_info->filename,
MaxTextExtent);
(void) CopyMagickString(image->magick,image_info->magick,MaxTextExtent);
if (image_info->size != (char *) NULL)
{
(void) ParseAbsoluteGeometry(image_info->size,&image->extract_info);
image->columns=image->extract_info.width;
image->rows=image->extract_info.height;
image->offset=image->extract_info.x;
image->extract_info.x=0;
image->extract_info.y=0;
}
if (image_info->extract != (char *) NULL)
{
RectangleInfo
geometry;
(void) memset(&geometry,0,sizeof(geometry));
flags=ParseAbsoluteGeometry(image_info->extract,&geometry);
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
{
image->extract_info=geometry;
Swap(image->columns,image->extract_info.width);
Swap(image->rows,image->extract_info.height);
}
}
image->compression=image_info->compression;
image->quality=image_info->quality;
image->endian=image_info->endian;
image->interlace=image_info->interlace;
image->units=image_info->units;
if (image_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(image_info->density,&geometry_info);
image->x_resolution=geometry_info.rho;
image->y_resolution=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->y_resolution=image->x_resolution;
}
if (image_info->page != (char *) NULL)
{
char
*geometry;
image->page=image->extract_info;
geometry=GetPageGeometry(image_info->page);
(void) ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
if (image_info->depth != 0)
image->depth=image_info->depth;
image->dither=image_info->dither;
image->background_color=image_info->background_color;
image->border_color=image_info->border_color;
image->matte_color=image_info->matte_color;
image->transparent_color=image_info->transparent_color;
image->ping=image_info->ping;
image->progress_monitor=image_info->progress_monitor;
image->client_data=image_info->client_data;
if (image_info->cache != (void *) NULL)
ClonePixelCacheMethods(image->cache,image_info->cache);
(void) SyncImageSettings(image_info,image);
option=GetImageOption(image_info,"delay");
if (option != (const char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(option,&geometry_info);
if ((flags & GreaterValue) != 0)
{
if (image->delay > (size_t) floor(geometry_info.rho+0.5))
image->delay=(size_t) floor(geometry_info.rho+0.5);
}
else
if ((flags & LessValue) != 0)
{
if (image->delay < (size_t) floor(geometry_info.rho+0.5))
image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
else
image->delay=(size_t) floor(geometry_info.rho+0.5);
if ((flags & SigmaValue) != 0)
image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
option=GetImageOption(image_info,"dispose");
if (option != (const char *) NULL)
image->dispose=(DisposeType) ParseCommandOption(MagickDisposeOptions,
MagickFalse,option);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImageInfo() allocates the ImageInfo structure.
%
% The format of the AcquireImageInfo method is:
%
% ImageInfo *AcquireImageInfo(void)
%
*/
MagickExport ImageInfo *AcquireImageInfo(void)
{
ImageInfo
*image_info;
image_info=(ImageInfo *) AcquireMagickMemory(sizeof(*image_info));
if (image_info == (ImageInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetImageInfo(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e N e x t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireNextImage() initializes the next image in a sequence to
% default values. The next member of image points to the newly allocated
% image. If there is a memory shortage, next is assigned NULL.
%
% The format of the AcquireNextImage method is:
%
% void AcquireNextImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
% o image: the image.
%
*/
MagickExport void AcquireNextImage(const ImageInfo *image_info,Image *image)
{
/*
Allocate image structure.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->next=AcquireImage(image_info);
if (GetNextImageInList(image) == (Image *) NULL)
return;
(void) CopyMagickString(GetNextImageInList(image)->filename,image->filename,
MaxTextExtent);
if (image_info != (ImageInfo *) NULL)
(void) CopyMagickString(GetNextImageInList(image)->filename,
image_info->filename,MaxTextExtent);
DestroyBlob(GetNextImageInList(image));
image->next->blob=ReferenceBlob(image->blob);
image->next->endian=image->endian;
image->next->scene=image->scene+1;
image->next->previous=image;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A p p e n d I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AppendImages() takes all images from the current image pointer to the end
% of the image list and appends them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting now effects how the image is justified in the
% final image.
%
% The format of the AppendImages method is:
%
% Image *AppendImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AppendImages(const Image *images,
const MagickBooleanType stack,ExceptionInfo *exception)
{
#define AppendImageTag "Append/Image"
CacheView
*append_view;
Image
*append_image;
MagickBooleanType
homogeneous_colorspace,
matte,
status;
MagickOffsetType
n;
RectangleInfo
geometry;
register const Image
*next;
size_t
depth,
height,
number_images,
width;
ssize_t
x_offset,
y,
y_offset;
/*
Compute maximum area of appended area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
matte=images->matte;
number_images=1;
width=images->columns;
height=images->rows;
depth=images->depth;
homogeneous_colorspace=MagickTrue;
next=GetNextImageInList(images);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->depth > depth)
depth=next->depth;
if (next->colorspace != images->colorspace)
homogeneous_colorspace=MagickFalse;
if (next->matte != MagickFalse)
matte=MagickTrue;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
continue;
}
width+=next->columns;
if (next->rows > height)
height=next->rows;
}
/*
Append images.
*/
append_image=CloneImage(images,width,height,MagickTrue,exception);
if (append_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(append_image,DirectClass) == MagickFalse)
{
InheritException(exception,&append_image->exception);
append_image=DestroyImage(append_image);
return((Image *) NULL);
}
if (homogeneous_colorspace == MagickFalse)
(void) SetImageColorspace(append_image,sRGBColorspace);
append_image->depth=depth;
append_image->matte=matte;
append_image->page=images->page;
(void) SetImageBackgroundColor(append_image);
status=MagickTrue;
x_offset=0;
y_offset=0;
next=images;
append_view=AcquireAuthenticCacheView(append_image,exception);
for (n=0; n < (MagickOffsetType) number_images; n++)
{
CacheView
*image_view;
MagickBooleanType
proceed;
SetGeometry(append_image,&geometry);
GravityAdjustGeometry(next->columns,next->rows,next->gravity,&geometry);
if (stack != MagickFalse)
x_offset-=geometry.x;
else
y_offset-=geometry.y;
image_view=AcquireVirtualCacheView(next,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(next,next,next->rows,1)
#endif
for (y=0; y < (ssize_t) next->rows; y++)
{
MagickBooleanType
sync;
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict append_indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
q=QueueCacheViewAuthenticPixels(append_view,x_offset,y+y_offset,
next->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
append_indexes=GetCacheViewAuthenticIndexQueue(append_view);
for (x=0; x < (ssize_t) next->columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (next->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if ((next->colorspace == CMYKColorspace) &&
(append_image->colorspace == CMYKColorspace))
SetPixelIndex(append_indexes+x,GetPixelIndex(indexes+x));
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(append_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (stack == MagickFalse)
{
x_offset+=(ssize_t) next->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) next->rows;
}
proceed=SetImageProgress(append_image,AppendImageTag,n,number_images);
if (proceed == MagickFalse)
break;
next=GetNextImageInList(next);
}
append_view=DestroyCacheView(append_view);
if (status == MagickFalse)
append_image=DestroyImage(append_image);
return(append_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C a t c h I m a g e E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CatchImageException() returns if no exceptions are found in the image
% sequence, otherwise it determines the most severe exception and reports
% it as a warning or error depending on the severity.
%
% The format of the CatchImageException method is:
%
% ExceptionType CatchImageException(Image *image)
%
% A description of each parameter follows:
%
% o image: An image sequence.
%
*/
MagickExport ExceptionType CatchImageException(Image *image)
{
ExceptionInfo
*exception;
ExceptionType
severity;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
exception=AcquireExceptionInfo();
GetImageException(image,exception);
CatchException(exception);
severity=exception->severity;
exception=DestroyExceptionInfo(exception);
return(severity);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l i p I m a g e P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipImagePath() sets the image clip mask based any clipping path information
% if it exists.
%
% The format of the ClipImagePath method is:
%
% MagickBooleanType ClipImagePath(Image *image,const char *pathname,
% const MagickBooleanType inside)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o pathname: name of clipping path resource. If name is preceded by #, use
% clipping path numbered by name.
%
% o inside: if non-zero, later operations take effect inside clipping path.
% Otherwise later operations take effect outside clipping path.
%
*/
MagickExport MagickBooleanType ClipImage(Image *image)
{
return(ClipImagePath(image,"#1",MagickTrue));
}
MagickExport MagickBooleanType ClipImagePath(Image *image,const char *pathname,
const MagickBooleanType inside)
{
#define ClipImagePathTag "ClipPath/Image"
char
*property;
const char
*value;
Image
*clip_mask;
ImageInfo
*image_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pathname != NULL);
property=AcquireString(pathname);
(void) FormatLocaleString(property,MaxTextExtent,"8BIM:1999,2998:%s",
pathname);
value=GetImageProperty(image,property);
property=DestroyString(property);
if (value == (const char *) NULL)
{
ThrowFileException(&image->exception,OptionError,"NoClipPathDefined",
image->filename);
return(MagickFalse);
}
image_info=AcquireImageInfo();
(void) CopyMagickString(image_info->filename,image->filename,MaxTextExtent);
(void) ConcatenateMagickString(image_info->filename,pathname,MaxTextExtent);
clip_mask=BlobToImage(image_info,value,strlen(value),&image->exception);
image_info=DestroyImageInfo(image_info);
if (clip_mask == (Image *) NULL)
return(MagickFalse);
if (clip_mask->storage_class == PseudoClass)
{
(void) SyncImage(clip_mask);
if (SetImageStorageClass(clip_mask,DirectClass) == MagickFalse)
return(MagickFalse);
}
if (inside == MagickFalse)
(void) NegateImage(clip_mask,MagickFalse);
(void) FormatLocaleString(clip_mask->magick_filename,MaxTextExtent,
"8BIM:1999,2998:%s\nPS",pathname);
(void) SetImageClipMask(image,clip_mask);
clip_mask=DestroyImage(clip_mask);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImage() copies an image and returns the copy as a new image object.
%
% If the specified columns and rows is 0, an exact copy of the image is
% returned, otherwise the pixel data is undefined and must be initialized
% with the QueueAuthenticPixels() and SyncAuthenticPixels() methods. On
% failure, a NULL image is returned and exception describes the reason for the
% failure.
%
% The format of the CloneImage method is:
%
% Image *CloneImage(const Image *image,const size_t columns,
% const size_t rows,const MagickBooleanType orphan,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the cloned image.
%
% o rows: the number of rows in the cloned image.
%
% o detach: With a value other than 0, the cloned image is detached from
% its parent I/O stream.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CloneImage(const Image *image,const size_t columns,
const size_t rows,const MagickBooleanType detach,ExceptionInfo *exception)
{
double
scale;
Image
*clone_image;
size_t
length;
/*
Clone the image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((image->columns == 0) || (image->rows == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError,
"NegativeOrZeroImageSize","`%s'",image->filename);
return((Image *) NULL);
}
clone_image=(Image *) AcquireCriticalMemory(sizeof(*clone_image));
(void) memset(clone_image,0,sizeof(*clone_image));
clone_image->signature=MagickCoreSignature;
clone_image->storage_class=image->storage_class;
clone_image->channels=image->channels;
clone_image->colorspace=image->colorspace;
clone_image->matte=image->matte;
clone_image->columns=image->columns;
clone_image->rows=image->rows;
clone_image->dither=image->dither;
(void) CloneImageProfiles(clone_image,image);
(void) CloneImageProperties(clone_image,image);
(void) CloneImageArtifacts(clone_image,image);
GetTimerInfo(&clone_image->timer);
InitializeExceptionInfo(&clone_image->exception);
InheritException(&clone_image->exception,&image->exception);
if (image->ascii85 != (void *) NULL)
Ascii85Initialize(clone_image);
clone_image->magick_columns=image->magick_columns;
clone_image->magick_rows=image->magick_rows;
clone_image->type=image->type;
(void) CopyMagickString(clone_image->magick_filename,image->magick_filename,
MaxTextExtent);
(void) CopyMagickString(clone_image->magick,image->magick,MaxTextExtent);
(void) CopyMagickString(clone_image->filename,image->filename,MaxTextExtent);
clone_image->progress_monitor=image->progress_monitor;
clone_image->client_data=image->client_data;
clone_image->reference_count=1;
clone_image->next=image->next;
clone_image->previous=image->previous;
clone_image->list=NewImageList();
clone_image->clip_mask=NewImageList();
clone_image->mask=NewImageList();
if (detach == MagickFalse)
clone_image->blob=ReferenceBlob(image->blob);
else
{
clone_image->next=NewImageList();
clone_image->previous=NewImageList();
clone_image->blob=CloneBlobInfo((BlobInfo *) NULL);
}
clone_image->ping=image->ping;
clone_image->debug=IsEventLogging();
clone_image->semaphore=AllocateSemaphoreInfo();
if (image->colormap != (PixelPacket *) NULL)
{
/*
Allocate and copy the image colormap.
*/
clone_image->colors=image->colors;
length=(size_t) image->colors;
clone_image->colormap=(PixelPacket *) AcquireQuantumMemory(length+1,
sizeof(*clone_image->colormap));
if (clone_image->colormap == (PixelPacket *) NULL)
{
clone_image=DestroyImage(clone_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) memcpy(clone_image->colormap,image->colormap,length*
sizeof(*clone_image->colormap));
}
if ((columns == 0) || (rows == 0))
{
if (image->montage != (char *) NULL)
(void) CloneString(&clone_image->montage,image->montage);
if (image->directory != (char *) NULL)
(void) CloneString(&clone_image->directory,image->directory);
if (image->clip_mask != (Image *) NULL)
clone_image->clip_mask=CloneImage(image->clip_mask,0,0,MagickTrue,
exception);
if (image->mask != (Image *) NULL)
clone_image->mask=CloneImage(image->mask,0,0,MagickTrue,exception);
clone_image->cache=ReferencePixelCache(image->cache);
return(clone_image);
}
if ((columns == image->columns) && (rows == image->rows))
{
if (image->clip_mask != (Image *) NULL)
clone_image->clip_mask=CloneImage(image->clip_mask,0,0,MagickTrue,
exception);
if (image->mask != (Image *) NULL)
clone_image->mask=CloneImage(image->mask,0,0,MagickTrue,exception);
}
scale=1.0;
if (image->columns != 0)
scale=(double) columns/(double) image->columns;
clone_image->page.width=(size_t) floor(scale*image->page.width+0.5);
clone_image->page.x=(ssize_t) ceil(scale*image->page.x-0.5);
clone_image->tile_offset.x=(ssize_t) ceil(scale*image->tile_offset.x-0.5);
scale=1.0;
if (image->rows != 0)
scale=(double) rows/(double) image->rows;
clone_image->page.height=(size_t) floor(scale*image->page.height+0.5);
clone_image->page.y=(ssize_t) ceil(scale*image->page.y-0.5);
clone_image->tile_offset.y=(ssize_t) ceil(scale*image->tile_offset.y-0.5);
clone_image->cache=ClonePixelCache(image->cache);
if (SetImageExtent(clone_image,columns,rows) == MagickFalse)
{
InheritException(exception,&clone_image->exception);
clone_image=DestroyImage(clone_image);
}
return(clone_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageInfo() makes a copy of the given image info structure. If
% NULL is specified, a new image info structure is created initialized to
% default values.
%
% The format of the CloneImageInfo method is:
%
% ImageInfo *CloneImageInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *CloneImageInfo(const ImageInfo *image_info)
{
ImageInfo
*clone_info;
clone_info=AcquireImageInfo();
if (image_info == (ImageInfo *) NULL)
return(clone_info);
clone_info->compression=image_info->compression;
clone_info->temporary=image_info->temporary;
clone_info->adjoin=image_info->adjoin;
clone_info->antialias=image_info->antialias;
clone_info->scene=image_info->scene;
clone_info->number_scenes=image_info->number_scenes;
clone_info->depth=image_info->depth;
if (image_info->size != (char *) NULL)
(void) CloneString(&clone_info->size,image_info->size);
if (image_info->extract != (char *) NULL)
(void) CloneString(&clone_info->extract,image_info->extract);
if (image_info->scenes != (char *) NULL)
(void) CloneString(&clone_info->scenes,image_info->scenes);
if (image_info->page != (char *) NULL)
(void) CloneString(&clone_info->page,image_info->page);
clone_info->interlace=image_info->interlace;
clone_info->endian=image_info->endian;
clone_info->units=image_info->units;
clone_info->quality=image_info->quality;
if (image_info->sampling_factor != (char *) NULL)
(void) CloneString(&clone_info->sampling_factor,
image_info->sampling_factor);
if (image_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,image_info->server_name);
if (image_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,image_info->font);
if (image_info->texture != (char *) NULL)
(void) CloneString(&clone_info->texture,image_info->texture);
if (image_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,image_info->density);
clone_info->pointsize=image_info->pointsize;
clone_info->fuzz=image_info->fuzz;
clone_info->pen=image_info->pen;
clone_info->background_color=image_info->background_color;
clone_info->border_color=image_info->border_color;
clone_info->matte_color=image_info->matte_color;
clone_info->transparent_color=image_info->transparent_color;
clone_info->dither=image_info->dither;
clone_info->monochrome=image_info->monochrome;
clone_info->colors=image_info->colors;
clone_info->colorspace=image_info->colorspace;
clone_info->type=image_info->type;
clone_info->orientation=image_info->orientation;
clone_info->preview_type=image_info->preview_type;
clone_info->group=image_info->group;
clone_info->ping=image_info->ping;
clone_info->verbose=image_info->verbose;
if (image_info->view != (char *) NULL)
(void) CloneString(&clone_info->view,image_info->view);
if (image_info->authenticate != (char *) NULL)
(void) CloneString(&clone_info->authenticate,image_info->authenticate);
(void) CloneImageOptions(clone_info,image_info);
clone_info->progress_monitor=image_info->progress_monitor;
clone_info->client_data=image_info->client_data;
clone_info->cache=image_info->cache;
if (image_info->cache != (void *) NULL)
clone_info->cache=ReferencePixelCache(image_info->cache);
if (image_info->profile != (void *) NULL)
clone_info->profile=(void *) CloneStringInfo((StringInfo *)
image_info->profile);
SetImageInfoFile(clone_info,image_info->file);
SetImageInfoBlob(clone_info,image_info->blob,image_info->length);
clone_info->stream=image_info->stream;
clone_info->virtual_pixel_method=image_info->virtual_pixel_method;
(void) CopyMagickString(clone_info->magick,image_info->magick,MaxTextExtent);
(void) CopyMagickString(clone_info->unique,image_info->unique,MaxTextExtent);
(void) CopyMagickString(clone_info->zero,image_info->zero,MaxTextExtent);
(void) CopyMagickString(clone_info->filename,image_info->filename,
MaxTextExtent);
clone_info->subimage=image_info->scene; /* deprecated */
clone_info->subrange=image_info->number_scenes; /* deprecated */
clone_info->channel=image_info->channel;
clone_info->debug=IsEventLogging();
clone_info->signature=image_info->signature;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o p y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CopyImagePixels() copies pixels from the source image as defined by the
% geometry the destination image at the specified offset.
%
% The format of the CopyImagePixels method is:
%
% MagickBooleanType CopyImagePixels(Image *image,const Image *source_image,
% const RectangleInfo *geometry,const OffsetInfo *offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the destination image.
%
% o source_image: the source image.
%
% o geometry: define the dimensions of the source pixel rectangle.
%
% o offset: define the offset in the destination image.
%
% o exception: return the highest severity exception.
%
*/
MagickExport MagickBooleanType CopyImagePixels(Image *image,
const Image *source_image,const RectangleInfo *geometry,
const OffsetInfo *offset,ExceptionInfo *exception)
{
#define CopyImageTag "Copy/Image"
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(source_image != (Image *) NULL);
assert(geometry != (RectangleInfo *) NULL);
assert(offset != (OffsetInfo *) NULL);
if ((offset->x < 0) || (offset->y < 0) ||
((ssize_t) (offset->x+geometry->width) > (ssize_t) image->columns) ||
((ssize_t) (offset->y+geometry->height) > (ssize_t) image->rows))
ThrowBinaryException(OptionError,"GeometryDoesNotContainImage",
image->filename);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
/*
Copy image pixels.
*/
status=MagickTrue;
progress=0;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,image,geometry->height,1)
#endif
for (y=0; y < (ssize_t) geometry->height; y++)
{
register const IndexPacket
*magick_restrict source_indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,geometry->x,y+geometry->y,
geometry->width,1,exception);
q=GetCacheViewAuthenticPixels(image_view,offset->x,y+offset->y,
geometry->width,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
source_indexes=GetCacheViewVirtualIndexQueue(source_view);
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) geometry->width; x++)
{
*q=(*p);
if (image->colorspace == CMYKColorspace)
indexes[x]=source_indexes[x];
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CopyImagePixels)
#endif
proceed=SetImageProgress(image,CopyImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
source_view=DestroyCacheView(source_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImage() dereferences an image, deallocating memory associated with
% the image if the reference count becomes zero.
%
% The format of the DestroyImage method is:
%
% Image *DestroyImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *DestroyImage(Image *image)
{
MagickBooleanType
destroy;
/*
Dereference image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
destroy=MagickFalse;
LockSemaphoreInfo(image->semaphore);
image->reference_count--;
if (image->reference_count == 0)
destroy=MagickTrue;
UnlockSemaphoreInfo(image->semaphore);
if (destroy == MagickFalse)
return((Image *) NULL);
/*
Destroy image.
*/
DestroyImagePixels(image);
if (image->clip_mask != (Image *) NULL)
image->clip_mask=DestroyImage(image->clip_mask);
if (image->mask != (Image *) NULL)
image->mask=DestroyImage(image->mask);
if (image->montage != (char *) NULL)
image->montage=DestroyString(image->montage);
if (image->directory != (char *) NULL)
image->directory=DestroyString(image->directory);
if (image->colormap != (PixelPacket *) NULL)
image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap);
if (image->geometry != (char *) NULL)
image->geometry=DestroyString(image->geometry);
DestroyImageProfiles(image);
DestroyImageProperties(image);
DestroyImageArtifacts(image);
if (image->ascii85 != (Ascii85Info*) NULL)
image->ascii85=(Ascii85Info *) RelinquishMagickMemory(image->ascii85);
DestroyBlob(image);
(void) ClearExceptionInfo(&image->exception,MagickTrue);
if (image->semaphore != (SemaphoreInfo *) NULL)
DestroySemaphoreInfo(&image->semaphore);
image->signature=(~MagickCoreSignature);
image=(Image *) RelinquishMagickMemory(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageInfo() deallocates memory associated with an ImageInfo
% structure.
%
% The format of the DestroyImageInfo method is:
%
% ImageInfo *DestroyImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
if (image_info->size != (char *) NULL)
image_info->size=DestroyString(image_info->size);
if (image_info->extract != (char *) NULL)
image_info->extract=DestroyString(image_info->extract);
if (image_info->scenes != (char *) NULL)
image_info->scenes=DestroyString(image_info->scenes);
if (image_info->page != (char *) NULL)
image_info->page=DestroyString(image_info->page);
if (image_info->sampling_factor != (char *) NULL)
image_info->sampling_factor=DestroyString(
image_info->sampling_factor);
if (image_info->server_name != (char *) NULL)
image_info->server_name=DestroyString(
image_info->server_name);
if (image_info->font != (char *) NULL)
image_info->font=DestroyString(image_info->font);
if (image_info->texture != (char *) NULL)
image_info->texture=DestroyString(image_info->texture);
if (image_info->density != (char *) NULL)
image_info->density=DestroyString(image_info->density);
if (image_info->view != (char *) NULL)
image_info->view=DestroyString(image_info->view);
if (image_info->authenticate != (char *) NULL)
image_info->authenticate=DestroyString(
image_info->authenticate);
DestroyImageOptions(image_info);
if (image_info->cache != (void *) NULL)
image_info->cache=DestroyPixelCache(image_info->cache);
if (image_info->profile != (StringInfo *) NULL)
image_info->profile=(void *) DestroyStringInfo((StringInfo *)
image_info->profile);
image_info->signature=(~MagickCoreSignature);
image_info=(ImageInfo *) RelinquishMagickMemory(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i s a s s o c i a t e I m a g e S t r e a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DisassociateImageStream() disassociates the image stream. It checks if the
% blob of the specified image is referenced by other images. If the reference
% count is higher then 1 a new blob is assigned to the specified image.
%
% The format of the DisassociateImageStream method is:
%
% void DisassociateImageStream(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DisassociateImageStream(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
DisassociateBlob(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C l i p M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageClipMask() returns the clip path associated with the image.
%
% The format of the GetImageClipMask method is:
%
% Image *GetImageClipMask(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *GetImageClipMask(const Image *image,
ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->clip_mask == (Image *) NULL)
return((Image *) NULL);
return(CloneImage(image->clip_mask,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageException() traverses an image sequence and returns any
% error more severe than noted by the exception parameter.
%
% The format of the GetImageException method is:
%
% void GetImageException(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to a list of one or more images.
%
% o exception: return the highest severity exception.
%
*/
MagickExport void GetImageException(Image *image,ExceptionInfo *exception)
{
register Image
*next;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
for (next=image; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->exception.severity == UndefinedException)
continue;
if (next->exception.severity > exception->severity)
InheritException(exception,&next->exception);
next->exception.severity=UndefinedException;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfo() initializes image_info to default values.
%
% The format of the GetImageInfo method is:
%
% void GetImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport void GetImageInfo(ImageInfo *image_info)
{
char
*synchronize;
ExceptionInfo
*exception;
/*
File and image dimension members.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info != (ImageInfo *) NULL);
(void) memset(image_info,0,sizeof(*image_info));
image_info->adjoin=MagickTrue;
image_info->interlace=NoInterlace;
image_info->channel=DefaultChannels;
image_info->quality=UndefinedCompressionQuality;
image_info->antialias=MagickTrue;
image_info->dither=MagickTrue;
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
image_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
exception=AcquireExceptionInfo();
(void) QueryColorDatabase(BackgroundColor,&image_info->background_color,
exception);
(void) QueryColorDatabase(BorderColor,&image_info->border_color,exception);
(void) QueryColorDatabase(MatteColor,&image_info->matte_color,exception);
(void) QueryColorDatabase(TransparentColor,&image_info->transparent_color,
exception);
exception=DestroyExceptionInfo(exception);
image_info->debug=IsEventLogging();
image_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfoFile() returns the image info file member.
%
% The format of the GetImageInfoFile method is:
%
% FILE *GetImageInfoFile(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport FILE *GetImageInfoFile(const ImageInfo *image_info)
{
return(image_info->file);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMask() returns the mask associated with the image.
%
% The format of the GetImageMask method is:
%
% Image *GetImageMask(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *GetImageMask(const Image *image,ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->mask == (Image *) NULL)
return((Image *) NULL);
return(CloneImage(image->mask,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannels() returns the number of pixel channels associated with the
% specified image.
%
% The format of the GetChannels method is:
%
% size_t GetImageChannels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport size_t GetImageChannels(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(image->channels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e R e f e r e n c e C o u n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageReferenceCount() returns the image reference count.
%
% The format of the GetReferenceCount method is:
%
% ssize_t GetImageReferenceCount(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport ssize_t GetImageReferenceCount(Image *image)
{
ssize_t
reference_count;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
LockSemaphoreInfo(image->semaphore);
reference_count=image->reference_count;
UnlockSemaphoreInfo(image->semaphore);
return(reference_count);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageVirtualPixelMethod() gets the "virtual pixels" method for the
% image. A virtual pixel is any pixel access that is outside the boundaries
% of the image cache.
%
% The format of the GetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(GetPixelCacheVirtualMethod(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p r e t I m a g e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpretImageFilename() interprets embedded characters in an image filename.
% The filename length is returned.
%
% The format of the InterpretImageFilename method is:
%
% size_t InterpretImageFilename(const ImageInfo *image_info,Image *image,
% const char *format,int value,char *filename)
%
% A description of each parameter follows.
%
% o image_info: the image info..
%
% o image: the image.
%
% o format: A filename describing the format to use to write the numeric
% argument. Only the first numeric format identifier is replaced.
%
% o value: Numeric value to substitute into format filename.
%
% o filename: return the formatted filename in this character buffer.
%
*/
MagickExport size_t InterpretImageFilename(const ImageInfo *image_info,
Image *image,const char *format,int value,char *filename)
{
char
*q;
int
c;
MagickBooleanType
canonical;
register const char
*p;
ssize_t
field_width,
offset;
canonical=MagickFalse;
offset=0;
(void) CopyMagickString(filename,format,MaxTextExtent);
for (p=strchr(format,'%'); p != (char *) NULL; p=strchr(p+1,'%'))
{
q=(char *) p+1;
if (*q == '%')
{
p=q+1;
continue;
}
field_width=0;
if (*q == '0')
field_width=(ssize_t) strtol(q,&q,10);
switch (*q)
{
case 'd':
case 'o':
case 'x':
{
q++;
c=(*q);
*q='\0';
(void) FormatLocaleString(filename+(p-format-offset),(size_t)
(MaxTextExtent-(p-format-offset)),p,value);
offset+=(4-field_width);
*q=c;
(void) ConcatenateMagickString(filename,q,MaxTextExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
case '[':
{
char
pattern[MaxTextExtent];
const char
*value;
register char
*r;
register ssize_t
i;
ssize_t
depth;
/*
Image option.
*/
if (strchr(p,']') == (char *) NULL)
break;
depth=1;
r=q+1;
for (i=0; (i < (MaxTextExtent-1L)) && (*r != '\0'); i++)
{
if (*r == '[')
depth++;
if (*r == ']')
depth--;
if (depth <= 0)
break;
pattern[i]=(*r++);
}
pattern[i]='\0';
if (LocaleNCompare(pattern,"filename:",9) != 0)
break;
value=(const char *) NULL;
if (image != (Image *) NULL)
value=GetImageProperty(image,pattern);
if ((value == (const char *) NULL) &&
(image != (Image *) NULL))
value=GetImageArtifact(image,pattern);
if ((value == (const char *) NULL) &&
(image_info != (ImageInfo *) NULL))
value=GetImageOption(image_info,pattern);
if (value == (const char *) NULL)
break;
q--;
c=(*q);
*q='\0';
(void) CopyMagickString(filename+(p-format-offset),value,(size_t)
(MaxTextExtent-(p-format-offset)));
offset+=strlen(pattern)-4;
*q=c;
(void) ConcatenateMagickString(filename,r+1,MaxTextExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
default:
break;
}
}
for (q=filename; *q != '\0'; q++)
if ((*q == '%') && (*(q+1) == '%'))
{
(void) CopyMagickString(q,q+1,(size_t) (MaxTextExtent-(q-filename)));
canonical=MagickTrue;
}
if (canonical == MagickFalse)
(void) CopyMagickString(filename,format,MaxTextExtent);
return(strlen(filename));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s H i g h D y n a m i c R a n g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsHighDynamicRangeImage() returns MagickTrue if any pixel component is
% non-integer or exceeds the bounds of the quantum depth (e.g. for Q16
% 0..65535.
%
% The format of the IsHighDynamicRangeImage method is:
%
% MagickBooleanType IsHighDynamicRangeImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsHighDynamicRangeImage(const Image *image,
ExceptionInfo *exception)
{
#if !defined(MAGICKCORE_HDRI_SUPPORT)
(void) image;
(void) exception;
return(MagickFalse);
#else
CacheView
*image_view;
MagickBooleanType
status;
MagickPixelPacket
zero;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
GetMagickPixelPacket(image,&zero);
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if ((pixel.red < 0.0) || (pixel.red > QuantumRange) ||
(pixel.red != (QuantumAny) pixel.red))
break;
if ((pixel.green < 0.0) || (pixel.green > QuantumRange) ||
(pixel.green != (QuantumAny) pixel.green))
break;
if ((pixel.blue < 0.0) || (pixel.blue > QuantumRange) ||
(pixel.blue != (QuantumAny) pixel.blue))
break;
if (pixel.matte != MagickFalse)
{
if ((pixel.opacity < 0.0) || (pixel.opacity > QuantumRange) ||
(pixel.opacity != (QuantumAny) pixel.opacity))
break;
}
if (pixel.colorspace == CMYKColorspace)
{
if ((pixel.index < 0.0) || (pixel.index > QuantumRange) ||
(pixel.index != (QuantumAny) pixel.index))
break;
}
p++;
}
if (x < (ssize_t) image->columns)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status != MagickFalse ? MagickFalse : MagickTrue);
#endif
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e O b j e c t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageObject() returns MagickTrue if the image sequence contains a valid
% set of image objects.
%
% The format of the IsImageObject method is:
%
% MagickBooleanType IsImageObject(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsImageObject(const Image *image)
{
register const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
if (p->signature != MagickCoreSignature)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s T a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsTaintImage() returns MagickTrue any pixel in the image has been altered
% since it was first constituted.
%
% The format of the IsTaintImage method is:
%
% MagickBooleanType IsTaintImage(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsTaintImage(const Image *image)
{
char
magick[MaxTextExtent],
filename[MaxTextExtent];
register const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
(void) CopyMagickString(magick,image->magick,MaxTextExtent);
(void) CopyMagickString(filename,image->filename,MaxTextExtent);
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
{
if (p->taint != MagickFalse)
return(MagickTrue);
if (LocaleCompare(p->magick,magick) != 0)
return(MagickTrue);
if (LocaleCompare(p->filename,filename) != 0)
return(MagickTrue);
}
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModifyImage() ensures that there is only a single reference to the image
% to be modified, updating the provided image pointer to point to a clone of
% the original image if necessary.
%
% The format of the ModifyImage method is:
%
% MagickBooleanType ModifyImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ModifyImage(Image **image,
ExceptionInfo *exception)
{
Image
*clone_image;
assert(image != (Image **) NULL);
assert(*image != (Image *) NULL);
assert((*image)->signature == MagickCoreSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
if (GetImageReferenceCount(*image) <= 1)
return(MagickTrue);
clone_image=CloneImage(*image,0,0,MagickTrue,exception);
LockSemaphoreInfo((*image)->semaphore);
(*image)->reference_count--;
UnlockSemaphoreInfo((*image)->semaphore);
*image=clone_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w M a g i c k I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewMagickImage() creates a blank image canvas of the specified size and
% background color.
%
% The format of the NewMagickImage method is:
%
% Image *NewMagickImage(const ImageInfo *image_info,const size_t width,
% const size_t height,const MagickPixelPacket *background)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the image width.
%
% o height: the image height.
%
% o background: the image color.
%
*/
MagickExport Image *NewMagickImage(const ImageInfo *image_info,
const size_t width,const size_t height,const MagickPixelPacket *background)
{
CacheView
*image_view;
ExceptionInfo
*exception;
Image
*image;
ssize_t
y;
MagickBooleanType
status;
assert(image_info != (const ImageInfo *) NULL);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info->signature == MagickCoreSignature);
assert(background != (const MagickPixelPacket *) NULL);
image=AcquireImage(image_info);
image->columns=width;
image->rows=height;
image->colorspace=background->colorspace;
image->matte=background->matte;
image->fuzz=background->fuzz;
image->depth=background->depth;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelPacket(image,background,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e f e r e n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferenceImage() increments the reference count associated with an image
% returning a pointer to the image.
%
% The format of the ReferenceImage method is:
%
% Image *ReferenceImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *ReferenceImage(Image *image)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
LockSemaphoreInfo(image->semaphore);
image->reference_count++;
UnlockSemaphoreInfo(image->semaphore);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t I m a g e P a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImagePage() resets the image page canvas and position.
%
% The format of the ResetImagePage method is:
%
% MagickBooleanType ResetImagePage(Image *image,const char *page)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o page: the relative page specification.
%
*/
MagickExport MagickBooleanType ResetImagePage(Image *image,const char *page)
{
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
flags=ParseAbsoluteGeometry(page,&geometry);
if ((flags & WidthValue) != 0)
{
if ((flags & HeightValue) == 0)
geometry.height=geometry.width;
image->page.width=geometry.width;
image->page.height=geometry.height;
}
if ((flags & AspectValue) != 0)
{
if ((flags & XValue) != 0)
image->page.x+=geometry.x;
if ((flags & YValue) != 0)
image->page.y+=geometry.y;
}
else
{
if ((flags & XValue) != 0)
{
image->page.x=geometry.x;
if ((image->page.width == 0) && (geometry.x > 0))
image->page.width=image->columns+geometry.x;
}
if ((flags & YValue) != 0)
{
image->page.y=geometry.y;
if ((image->page.height == 0) && (geometry.y > 0))
image->page.height=image->rows+geometry.y;
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImagePixels() reset the image pixels, that is, all the pixel components
% are zereod.
%
% The format of the SetImage method is:
%
% MagickBooleanType ResetImagePixels(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ResetImagePixels(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
const void
*pixels;
MagickBooleanType
status;
MagickSizeType
length;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
pixels=AcquirePixelCachePixels(image,&length,exception);
if (pixels != (void *) NULL)
{
/*
Reset in-core image pixels.
*/
(void) memset((void *) pixels,0,(size_t) length);
return(MagickTrue);
}
/*
Reset image pixels.
*/
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) memset(q,0,sizeof(PixelPacket));
if ((image->storage_class == PseudoClass) ||
(image->colorspace == CMYKColorspace))
indexes[x]=0;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e B a c k g r o u n d C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageBackgroundColor() initializes the image pixels to the image
% background color. The background color is defined by the background_color
% member of the image structure.
%
% The format of the SetImage method is:
%
% MagickBooleanType SetImageBackgroundColor(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType SetImageBackgroundColor(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
IndexPacket
index;
MagickBooleanType
status;
MagickPixelPacket
background;
PixelPacket
pixel;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if ((IsPixelGray(&image->background_color) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) TransformImageColorspace(image,RGBColorspace);
if ((image->background_color.opacity != OpaqueOpacity) &&
(image->matte == MagickFalse))
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *)
NULL,&background);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&background);
index=0;
pixel.opacity=OpaqueOpacity;
SetPixelPacket(image,&background,&pixel,&index);
/*
Set image background color.
*/
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
*q++=pixel;
if (image->colorspace == CMYKColorspace)
{
register IndexPacket
*magick_restrict indexes;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,index);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageChannels() sets the number of pixels channels associated with the
% image.
%
% The format of the SetImageChannels method is:
%
% MagickBooleanType SetImageChannels(Image *image,const size_t channels)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channels: The number of pixel channels.
%
*/
MagickExport MagickBooleanType SetImageChannels(Image *image,
const size_t channels)
{
image->channels=channels;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColor() set the entire image canvas to the specified color.
%
% The format of the SetImageColor method is:
%
% MagickBooleanType SetImageColor(Image *image,
% const MagickPixelPacket *color)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o background: the image color.
%
*/
MagickExport MagickBooleanType SetImageColor(Image *image,
const MagickPixelPacket *color)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
assert(color != (const MagickPixelPacket *) NULL);
image->colorspace=color->colorspace;
image->matte=color->matte;
image->fuzz=color->fuzz;
image->depth=color->depth;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelPacket(image,color,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageStorageClass() sets the image class: DirectClass for true color
% images or PseudoClass for colormapped images.
%
% The format of the SetImageStorageClass method is:
%
% MagickBooleanType SetImageStorageClass(Image *image,
% const ClassType storage_class)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o storage_class: The image class.
%
*/
MagickExport MagickBooleanType SetImageStorageClass(Image *image,
const ClassType storage_class)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->storage_class=storage_class;
return(SyncImagePixelCache(image,&image->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C l i p M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageClipMask() associates a clip path with the image. The clip path
% must be the same dimensions as the image. Set any pixel component of
% the clip path to TransparentOpacity to prevent that corresponding image
% pixel component from being updated when SyncAuthenticPixels() is applied.
%
% The format of the SetImageClipMask method is:
%
% MagickBooleanType SetImageClipMask(Image *image,const Image *clip_mask)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clip_mask: the image clip path.
%
*/
MagickExport MagickBooleanType SetImageClipMask(Image *image,
const Image *clip_mask)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (clip_mask != (const Image *) NULL)
if ((clip_mask->columns != image->columns) ||
(clip_mask->rows != image->rows))
ThrowBinaryImageException(ImageError,"ImageSizeDiffers",image->filename);
if (image->clip_mask != (Image *) NULL)
image->clip_mask=DestroyImage(image->clip_mask);
image->clip_mask=NewImageList();
if (clip_mask == (Image *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
image->clip_mask=CloneImage(clip_mask,0,0,MagickTrue,&image->exception);
if (image->clip_mask == (Image *) NULL)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageExtent() sets the image size (i.e. columns & rows).
%
% The format of the SetImageExtent method is:
%
% MagickBooleanType SetImageExtent(Image *image,const size_t columns,
% const size_t rows)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: The image width in pixels.
%
% o rows: The image height in pixels.
%
*/
MagickExport MagickBooleanType SetImageExtent(Image *image,const size_t columns,
const size_t rows)
{
if ((columns == 0) || (rows == 0))
ThrowBinaryImageException(ImageError,"NegativeOrZeroImageSize",
image->filename);
image->columns=columns;
image->rows=rows;
if ((image->depth == 0) || (image->depth > (8*sizeof(MagickSizeType))))
ThrowBinaryImageException(ImageError,"ImageDepthNotSupported",
image->filename);
return(SyncImagePixelCache(image,&image->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfo() initializes the `magick' field of the ImageInfo structure.
% It is set to a type of image format based on the prefix or suffix of the
% filename. For example, `ps:image' returns PS indicating a Postscript image.
% JPEG is returned for this filename: `image.jpg'. The filename prefix has
% precendence over the suffix. Use an optional index enclosed in brackets
% after a file name to specify a desired scene of a multi-resolution image
% format like Photo CD (e.g. img0001.pcd[4]). A True (non-zero) return value
% indicates success.
%
% The format of the SetImageInfo method is:
%
% MagickBooleanType SetImageInfo(ImageInfo *image_info,
% const unsigned int frames,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o frames: the number of images you intend to write.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info,
const unsigned int frames,ExceptionInfo *exception)
{
char
extension[MaxTextExtent],
filename[MaxTextExtent],
magic[MaxTextExtent],
*q,
subimage[MaxTextExtent];
const MagicInfo
*magic_info;
const MagickInfo
*magick_info;
ExceptionInfo
*sans_exception;
Image
*image;
MagickBooleanType
status;
register const char
*p;
ssize_t
count;
unsigned char
magick[2*MaxTextExtent];
/*
Look for 'image.format' in filename.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
*subimage='\0';
GetPathComponent(image_info->filename,SubimagePath,subimage);
if (*subimage != '\0')
{
/*
Look for scene specification (e.g. img0001.pcd[4]).
*/
if (IsSceneGeometry(subimage,MagickFalse) == MagickFalse)
{
if (IsGeometry(subimage) != MagickFalse)
(void) CloneString(&image_info->extract,subimage);
}
else
{
size_t
first,
last;
(void) CloneString(&image_info->scenes,subimage);
image_info->scene=StringToUnsignedLong(image_info->scenes);
image_info->number_scenes=image_info->scene;
p=image_info->scenes;
for (q=(char *) image_info->scenes; *q != '\0'; p++)
{
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ','))
p++;
first=(size_t) strtol(p,&q,10);
last=first;
while (isspace((int) ((unsigned char) *q)) != 0)
q++;
if (*q == '-')
last=(size_t) strtol(q+1,&q,10);
if (first > last)
Swap(first,last);
if (first < image_info->scene)
image_info->scene=first;
if (last > image_info->number_scenes)
image_info->number_scenes=last;
p=q;
}
image_info->number_scenes-=image_info->scene-1;
image_info->subimage=image_info->scene;
image_info->subrange=image_info->number_scenes;
}
}
*extension='\0';
if (*image_info->magick == '\0')
GetPathComponent(image_info->filename,ExtensionPath,extension);
#if defined(MAGICKCORE_ZLIB_DELEGATE)
if (*extension != '\0')
if ((LocaleCompare(extension,"gz") == 0) ||
(LocaleCompare(extension,"Z") == 0) ||
(LocaleCompare(extension,"svgz") == 0) ||
(LocaleCompare(extension,"wmz") == 0))
{
char
path[MaxTextExtent];
(void) CopyMagickString(path,image_info->filename,MaxTextExtent);
path[strlen(path)-strlen(extension)-1]='\0';
GetPathComponent(path,ExtensionPath,extension);
}
#endif
#if defined(MAGICKCORE_BZLIB_DELEGATE)
if (*extension != '\0')
if (LocaleCompare(extension,"bz2") == 0)
{
char
path[MaxTextExtent];
(void) CopyMagickString(path,image_info->filename,MaxTextExtent);
path[strlen(path)-strlen(extension)-1]='\0';
GetPathComponent(path,ExtensionPath,extension);
}
#endif
image_info->affirm=MagickFalse;
sans_exception=AcquireExceptionInfo();
if ((*extension != '\0') && (IsGlob(extension) == MagickFalse))
{
MagickFormatType
format_type;
register ssize_t
i;
static const char
*format_type_formats[] =
{
"AUTOTRACE",
"BROWSE",
"DCRAW",
"EDIT",
"LAUNCH",
"MPEG:DECODE",
"MPEG:ENCODE",
"PRINT",
"PS:ALPHA",
"PS:CMYK",
"PS:COLOR",
"PS:GRAY",
"PS:MONO",
"SCAN",
"SHOW",
"WIN",
(char *) NULL
};
/*
User specified image format.
*/
(void) CopyMagickString(magic,extension,MaxTextExtent);
LocaleUpper(magic);
/*
Look for explicit image formats.
*/
format_type=UndefinedFormatType;
i=0;
while ((format_type == UndefinedFormatType) &&
(format_type_formats[i] != (char *) NULL))
{
if ((*magic == *format_type_formats[i]) &&
(LocaleCompare(magic,format_type_formats[i]) == 0))
format_type=ExplicitFormatType;
i++;
}
magick_info=GetMagickInfo(magic,sans_exception);
if ((magick_info != (const MagickInfo *) NULL) &&
(magick_info->format_type != UndefinedFormatType))
format_type=magick_info->format_type;
if (format_type == UndefinedFormatType)
(void) CopyMagickString(image_info->magick,magic,MaxTextExtent);
else
if (format_type == ExplicitFormatType)
{
image_info->affirm=MagickTrue;
(void) CopyMagickString(image_info->magick,magic,MaxTextExtent);
}
if (LocaleCompare(magic,"RGB") == 0)
image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */
}
/*
Look for explicit 'format:image' in filename.
*/
*magic='\0';
GetPathComponent(image_info->filename,MagickPath,magic);
if (*magic == '\0')
{
(void) CopyMagickString(magic,image_info->magick,MaxTextExtent);
magick_info=GetMagickInfo(magic,sans_exception);
GetPathComponent(image_info->filename,CanonicalPath,filename);
(void) CopyMagickString(image_info->filename,filename,MaxTextExtent);
}
else
{
const DelegateInfo
*delegate_info;
/*
User specified image format.
*/
LocaleUpper(magic);
magick_info=GetMagickInfo(magic,sans_exception);
delegate_info=GetDelegateInfo(magic,"*",sans_exception);
if (delegate_info == (const DelegateInfo *) NULL)
delegate_info=GetDelegateInfo("*",magic,sans_exception);
if (((magick_info != (const MagickInfo *) NULL) ||
(delegate_info != (const DelegateInfo *) NULL)) &&
(IsMagickConflict(magic) == MagickFalse))
{
image_info->affirm=MagickTrue;
(void) CopyMagickString(image_info->magick,magic,MaxTextExtent);
GetPathComponent(image_info->filename,CanonicalPath,filename);
(void) CopyMagickString(image_info->filename,filename,MaxTextExtent);
}
}
sans_exception=DestroyExceptionInfo(sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
if ((image_info->adjoin != MagickFalse) && (frames > 1))
{
/*
Test for multiple image support (e.g. image%02d.png).
*/
(void) InterpretImageFilename(image_info,(Image *) NULL,
image_info->filename,(int) image_info->scene,filename);
if ((LocaleCompare(filename,image_info->filename) != 0) &&
(strchr(filename,'%') == (char *) NULL))
image_info->adjoin=MagickFalse;
}
if ((image_info->adjoin != MagickFalse) && (frames > 0))
{
/*
Some image formats do not support multiple frames per file.
*/
magick_info=GetMagickInfo(magic,exception);
if (magick_info != (const MagickInfo *) NULL)
if (GetMagickAdjoin(magick_info) == MagickFalse)
image_info->adjoin=MagickFalse;
}
if (image_info->affirm != MagickFalse)
return(MagickTrue);
if (frames == 0)
{
/*
Determine the image format from the first few bytes of the file.
*/
image=AcquireImage(image_info);
(void) CopyMagickString(image->filename,image_info->filename,
MaxTextExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
if ((IsBlobSeekable(image) == MagickFalse) ||
(IsBlobExempt(image) != MagickFalse))
{
/*
Copy image to a seekable temporary file.
*/
*filename='\0';
status=ImageToFile(image,filename,exception);
(void) CloseBlob(image);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
SetImageInfoFile(image_info,(FILE *) NULL);
(void) CopyMagickString(image->filename,filename,MaxTextExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
(void) CopyMagickString(image_info->filename,filename,MaxTextExtent);
image_info->temporary=MagickTrue;
}
(void) memset(magick,0,sizeof(magick));
count=ReadBlob(image,2*MaxTextExtent,magick);
(void) SeekBlob(image,-((MagickOffsetType) count),SEEK_CUR);
(void) CloseBlob(image);
image=DestroyImage(image);
/*
Check magic.xml configuration file.
*/
sans_exception=AcquireExceptionInfo();
magic_info=GetMagicInfo(magick,(size_t) count,sans_exception);
if ((magic_info != (const MagicInfo *) NULL) &&
(GetMagicName(magic_info) != (char *) NULL))
{
(void) CopyMagickString(image_info->magick,GetMagicName(magic_info),
MaxTextExtent);
magick_info=GetMagickInfo(image_info->magick,sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
return(MagickTrue);
}
magick_info=GetMagickInfo(image_info->magick,sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o B l o b %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoBlob() sets the image info blob member.
%
% The format of the SetImageInfoBlob method is:
%
% void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
% const size_t length)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o blob: the blob.
%
% o length: the blob length.
%
*/
MagickExport void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
const size_t length)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->blob=(void *) blob;
image_info->length=length;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoFile() sets the image info file member.
%
% The format of the SetImageInfoFile method is:
%
% void SetImageInfoFile(ImageInfo *image_info,FILE *file)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o file: the file.
%
*/
MagickExport void SetImageInfoFile(ImageInfo *image_info,FILE *file)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->file=file;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageMask() associates a mask with the image. The mask must be the same
% dimensions as the image.
%
% The format of the SetImageMask method is:
%
% MagickBooleanType SetImageMask(Image *image,const Image *mask)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mask: the image mask.
%
*/
MagickExport MagickBooleanType SetImageMask(Image *image,const Image *mask)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (mask != (const Image *) NULL)
if ((mask->columns != image->columns) || (mask->rows != image->rows))
ThrowBinaryImageException(ImageError,"ImageSizeDiffers",image->filename);
if (image->mask != (Image *) NULL)
image->mask=DestroyImage(image->mask);
image->mask=NewImageList();
if (mask == (Image *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
image->mask=CloneImage(mask,0,0,MagickTrue,&image->exception);
if (image->mask == (Image *) NULL)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e O p a c i t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageOpacity() sets the opacity levels of the image.
%
% The format of the SetImageOpacity method is:
%
% MagickBooleanType SetImageOpacity(Image *image,const Quantum opacity)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o opacity: the level of transparency: 0 is fully opaque and QuantumRange is
% fully transparent.
%
*/
MagickExport MagickBooleanType SetImageOpacity(Image *image,
const Quantum opacity)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
image->matte=MagickTrue;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelOpacity(q,opacity);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageVirtualPixelMethod() sets the "virtual pixels" method for the
% image and returns the previous setting. A virtual pixel is any pixel access
% that is outside the boundaries of the image cache.
%
% The format of the SetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod SetImageVirtualPixelMethod(const Image *image,
% const VirtualPixelMethod virtual_pixel_method)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
*/
MagickExport VirtualPixelMethod SetImageVirtualPixelMethod(const Image *image,
const VirtualPixelMethod virtual_pixel_method)
{
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(SetPixelCacheVirtualMethod(image,virtual_pixel_method));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S m u s h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SmushImages() takes all images from the current image pointer to the end
% of the image list and smushes them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting now effects how the image is justified in the
% final image.
%
% The format of the SmushImages method is:
%
% Image *SmushImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o offset: minimum distance in pixels between images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t SmushXGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*left_view,
*right_view;
const Image
*left_image,
*right_image;
RectangleInfo
left_geometry,
right_geometry;
register const PixelPacket
*p;
register ssize_t
i,
y;
size_t
gap;
ssize_t
x;
if (images->previous == (Image *) NULL)
return(0);
right_image=images;
SetGeometry(smush_image,&right_geometry);
GravityAdjustGeometry(right_image->columns,right_image->rows,
right_image->gravity,&right_geometry);
left_image=images->previous;
SetGeometry(smush_image,&left_geometry);
GravityAdjustGeometry(left_image->columns,left_image->rows,
left_image->gravity,&left_geometry);
gap=right_image->columns;
left_view=AcquireVirtualCacheView(left_image,exception);
right_view=AcquireVirtualCacheView(right_image,exception);
for (y=0; y < (ssize_t) smush_image->rows; y++)
{
for (x=(ssize_t) left_image->columns-1; x > 0; x--)
{
p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(GetPixelOpacity(p) != TransparentOpacity) ||
((left_image->columns-x-1) >= gap))
break;
}
i=(ssize_t) left_image->columns-x-1;
for (x=0; x < (ssize_t) right_image->columns; x++)
{
p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1,
exception);
if ((p == (const PixelPacket *) NULL) ||
(GetPixelOpacity(p) != TransparentOpacity) ||
((x+i) >= (ssize_t) gap))
break;
}
if ((x+i) < (ssize_t) gap)
gap=(size_t) (x+i);
}
right_view=DestroyCacheView(right_view);
left_view=DestroyCacheView(left_view);
if (y < (ssize_t) smush_image->rows)
return(offset);
return((ssize_t) gap-offset);
}
static ssize_t SmushYGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*bottom_view,
*top_view;
const Image
*bottom_image,
*top_image;
RectangleInfo
bottom_geometry,
top_geometry;
register const PixelPacket
*p;
register ssize_t
i,
x;
size_t
gap;
ssize_t
y;
if (images->previous == (Image *) NULL)
return(0);
bottom_image=images;
SetGeometry(smush_image,&bottom_geometry);
GravityAdjustGeometry(bottom_image->columns,bottom_image->rows,
bottom_image->gravity,&bottom_geometry);
top_image=images->previous;
SetGeometry(smush_image,&top_geometry);
GravityAdjustGeometry(top_image->columns,top_image->rows,top_image->gravity,
&top_geometry);
gap=bottom_image->rows;
top_view=AcquireVirtualCacheView(top_image,exception);
bottom_view=AcquireVirtualCacheView(bottom_image,exception);
for (x=0; x < (ssize_t) smush_image->columns; x++)
{
for (y=(ssize_t) top_image->rows-1; y > 0; y--)
{
p=GetCacheViewVirtualPixels(top_view,top_geometry.x+x,y,1,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(GetPixelOpacity(p) != TransparentOpacity) ||
((top_image->rows-y-1) >= gap))
break;
}
i=(ssize_t) top_image->rows-y-1;
for (y=0; y < (ssize_t) bottom_image->rows; y++)
{
p=GetCacheViewVirtualPixels(bottom_view,bottom_geometry.x+x,y,1,1,
exception);
if ((p == (const PixelPacket *) NULL) ||
(GetPixelOpacity(p) != TransparentOpacity) ||
((y+i) >= (ssize_t) gap))
break;
}
if ((y+i) < (ssize_t) gap)
gap=(size_t) (y+i);
}
bottom_view=DestroyCacheView(bottom_view);
top_view=DestroyCacheView(top_view);
if (x < (ssize_t) smush_image->columns)
return(offset);
return((ssize_t) gap-offset);
}
MagickExport Image *SmushImages(const Image *images,
const MagickBooleanType stack,const ssize_t offset,ExceptionInfo *exception)
{
#define SmushImageTag "Smush/Image"
CacheView
*smush_view;
const Image
*image;
Image
*smush_image;
MagickBooleanType
matte,
proceed,
status;
MagickOffsetType
n;
RectangleInfo
geometry;
register const Image
*next;
size_t
height,
number_images,
width;
ssize_t
x_offset,
y_offset;
/*
Compute maximum area of smushed area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=images;
matte=image->matte;
number_images=1;
width=image->columns;
height=image->rows;
next=GetNextImageInList(image);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->matte != MagickFalse)
matte=MagickTrue;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
if (next->previous != (Image *) NULL)
height+=offset;
continue;
}
width+=next->columns;
if (next->previous != (Image *) NULL)
width+=offset;
if (next->rows > height)
height=next->rows;
}
/*
Smush images.
*/
smush_image=CloneImage(image,width,height,MagickTrue,exception);
if (smush_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(smush_image,DirectClass) == MagickFalse)
{
InheritException(exception,&smush_image->exception);
smush_image=DestroyImage(smush_image);
return((Image *) NULL);
}
smush_image->matte=matte;
(void) SetImageBackgroundColor(smush_image);
status=MagickTrue;
x_offset=0;
y_offset=0;
smush_view=AcquireVirtualCacheView(smush_image,exception);
for (n=0; n < (MagickOffsetType) number_images; n++)
{
SetGeometry(smush_image,&geometry);
GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry);
if (stack != MagickFalse)
{
x_offset-=geometry.x;
y_offset-=SmushYGap(smush_image,image,offset,exception);
}
else
{
x_offset-=SmushXGap(smush_image,image,offset,exception);
y_offset-=geometry.y;
}
status=CompositeImage(smush_image,OverCompositeOp,image,x_offset,y_offset);
proceed=SetImageProgress(image,SmushImageTag,n,number_images);
if (proceed == MagickFalse)
break;
if (stack == MagickFalse)
{
x_offset+=(ssize_t) image->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) image->rows;
}
image=GetNextImageInList(image);
}
if (stack == MagickFalse)
smush_image->columns=(size_t) x_offset;
else
smush_image->rows=(size_t) y_offset;
smush_view=DestroyCacheView(smush_view);
if (status == MagickFalse)
smush_image=DestroyImage(smush_image);
return(smush_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t r i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StripImage() strips an image of all profiles and comments.
%
% The format of the StripImage method is:
%
% MagickBooleanType StripImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType StripImage(Image *image)
{
MagickBooleanType
status;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
DestroyImageProfiles(image);
(void) DeleteImageProperty(image,"comment");
(void) DeleteImageProperty(image,"date:create");
(void) DeleteImageProperty(image,"date:modify");
status=SetImageArtifact(image,"png:exclude-chunk",
"bKGD,caNv,cHRM,eXIf,gAMA,iCCP,iTXt,pHYs,sRGB,tEXt,zCCP,zTXt,date");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImage() initializes the red, green, and blue intensities of each pixel
% as defined by the colormap index.
%
% The format of the SyncImage method is:
%
% MagickBooleanType SyncImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static inline IndexPacket PushColormapIndex(Image *image,
const size_t index,MagickBooleanType *range_exception)
{
if (index < image->colors)
return((IndexPacket) index);
*range_exception=MagickTrue;
return((IndexPacket) 0);
}
MagickExport MagickBooleanType SyncImage(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
range_exception,
status,
taint;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->ping != MagickFalse)
return(MagickTrue);
if (image->storage_class != PseudoClass)
return(MagickFalse);
assert(image->colormap != (PixelPacket *) NULL);
range_exception=MagickFalse;
status=MagickTrue;
taint=image->taint;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(range_exception,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
IndexPacket
index;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
index=PushColormapIndex(image,(size_t) GetPixelIndex(indexes+x),
&range_exception);
if (image->matte == MagickFalse)
SetPixelRgb(q,image->colormap+(ssize_t) index)
else
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->taint=taint;
if ((image->ping == MagickFalse) && (range_exception != MagickFalse))
(void) ThrowMagickException(&image->exception,GetMagickModule(),
CorruptImageWarning,"InvalidColormapIndex","`%s'",image->filename);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e S e t t i n g s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImageSettings() syncs image_info options into per-image attributes.
%
% The format of the SyncImageSettings method is:
%
% MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
% Image *image)
% MagickBooleanType SyncImagesSettings(const ImageInfo *image_info,
% Image *image)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
*/
MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info,
Image *images)
{
Image
*image;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
(void) SyncImageSettings(image_info,image);
(void) DeleteImageOption(image_info,"page");
return(MagickTrue);
}
MagickExport MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
Image *image)
{
char
property[MaxTextExtent];
const char
*option,
*value;
GeometryInfo
geometry_info;
MagickStatusType
flags;
ResolutionType
units;
/*
Sync image options.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
option=GetImageOption(image_info,"background");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->background_color,
&image->exception);
option=GetImageOption(image_info,"bias");
if (option != (const char *) NULL)
image->bias=StringToDoubleInterval(option,(double) QuantumRange+1.0);
option=GetImageOption(image_info,"black-point-compensation");
if (option != (const char *) NULL)
image->black_point_compensation=(MagickBooleanType) ParseCommandOption(
MagickBooleanOptions,MagickFalse,option);
option=GetImageOption(image_info,"blue-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.blue_primary.x=geometry_info.rho;
image->chromaticity.blue_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.blue_primary.y=image->chromaticity.blue_primary.x;
}
option=GetImageOption(image_info,"bordercolor");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->border_color,&image->exception);
option=GetImageOption(image_info,"colors");
if (option != (const char *) NULL)
image->colors=StringToUnsignedLong(option);
option=GetImageOption(image_info,"compose");
if (option != (const char *) NULL)
image->compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions,
MagickFalse,option);
option=GetImageOption(image_info,"compress");
if (option != (const char *) NULL)
image->compression=(CompressionType) ParseCommandOption(
MagickCompressOptions,MagickFalse,option);
option=GetImageOption(image_info,"debug");
if (option != (const char *) NULL)
image->debug=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"density");
if (option != (const char *) NULL)
{
GeometryInfo
geometry_info;
/*
Set image density.
*/
flags=ParseGeometry(option,&geometry_info);
image->x_resolution=geometry_info.rho;
image->y_resolution=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->y_resolution=image->x_resolution;
}
option=GetImageOption(image_info,"depth");
if (option != (const char *) NULL)
image->depth=StringToUnsignedLong(option);
option=GetImageOption(image_info,"endian");
if (option != (const char *) NULL)
image->endian=(EndianType) ParseCommandOption(MagickEndianOptions,
MagickFalse,option);
option=GetImageOption(image_info,"filter");
if (option != (const char *) NULL)
image->filter=(FilterTypes) ParseCommandOption(MagickFilterOptions,
MagickFalse,option);
option=GetImageOption(image_info,"fuzz");
if (option != (const char *) NULL)
image->fuzz=StringToDoubleInterval(option,(double) QuantumRange+1.0);
option=GetImageOption(image_info,"gravity");
if (option != (const char *) NULL)
image->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(image_info,"green-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.green_primary.x=geometry_info.rho;
image->chromaticity.green_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.green_primary.y=image->chromaticity.green_primary.x;
}
option=GetImageOption(image_info,"intensity");
if (option != (const char *) NULL)
image->intensity=(PixelIntensityMethod) ParseCommandOption(
MagickPixelIntensityOptions,MagickFalse,option);
option=GetImageOption(image_info,"intent");
if (option != (const char *) NULL)
image->rendering_intent=(RenderingIntent) ParseCommandOption(
MagickIntentOptions,MagickFalse,option);
option=GetImageOption(image_info,"interlace");
if (option != (const char *) NULL)
image->interlace=(InterlaceType) ParseCommandOption(MagickInterlaceOptions,
MagickFalse,option);
option=GetImageOption(image_info,"interpolate");
if (option != (const char *) NULL)
image->interpolate=(InterpolatePixelMethod) ParseCommandOption(
MagickInterpolateOptions,MagickFalse,option);
option=GetImageOption(image_info,"loop");
if (option != (const char *) NULL)
image->iterations=StringToUnsignedLong(option);
option=GetImageOption(image_info,"mattecolor");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->matte_color,&image->exception);
option=GetImageOption(image_info,"orient");
if (option != (const char *) NULL)
image->orientation=(OrientationType) ParseCommandOption(
MagickOrientationOptions,MagickFalse,option);
option=GetImageOption(image_info,"page");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"quality");
if (option != (const char *) NULL)
image->quality=StringToUnsignedLong(option);
option=GetImageOption(image_info,"red-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.red_primary.x=geometry_info.rho;
image->chromaticity.red_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.red_primary.y=image->chromaticity.red_primary.x;
}
if (image_info->quality != UndefinedCompressionQuality)
image->quality=image_info->quality;
option=GetImageOption(image_info,"scene");
if (option != (const char *) NULL)
image->scene=StringToUnsignedLong(option);
option=GetImageOption(image_info,"taint");
if (option != (const char *) NULL)
image->taint=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"tile-offset");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->tile_offset);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"transparent-color");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->transparent_color,
&image->exception);
option=GetImageOption(image_info,"type");
if (option != (const char *) NULL)
image->type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse,
option);
option=GetImageOption(image_info,"units");
if (option != (const char *) NULL)
units=(ResolutionType) ParseCommandOption(MagickResolutionOptions,
MagickFalse,option);
else
units = image_info->units;
if (units != UndefinedResolution)
{
if (image->units != units)
switch (image->units)
{
case PixelsPerInchResolution:
{
if (units == PixelsPerCentimeterResolution)
{
image->x_resolution/=2.54;
image->y_resolution/=2.54;
}
break;
}
case PixelsPerCentimeterResolution:
{
if (units == PixelsPerInchResolution)
{
image->x_resolution=(double) ((size_t) (100.0*2.54*
image->x_resolution+0.5))/100.0;
image->y_resolution=(double) ((size_t) (100.0*2.54*
image->y_resolution+0.5))/100.0;
}
break;
}
default:
break;
}
image->units=units;
}
option=GetImageOption(image_info,"white-point");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.white_point.x=geometry_info.rho;
image->chromaticity.white_point.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.white_point.y=image->chromaticity.white_point.x;
}
ResetImageOptionIterator(image_info);
for (option=GetNextImageOption(image_info); option != (const char *) NULL; )
{
value=GetImageOption(image_info,option);
if (value != (const char *) NULL)
{
(void) FormatLocaleString(property,MaxTextExtent,"%s",option);
(void) SetImageArtifact(image,property,value);
}
option=GetNextImageOption(image_info);
}
return(MagickTrue);
}
|
GB_unop__one_int8_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__one_int8_int8)
// op(A') function: GB (_unop_tran__one_int8_int8)
// C type: int8_t
// A type: int8_t
// cast: ;
// unaryop: cij = 1
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = 1 ;
// casting
#define GB_CAST(z, aij) \
; ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
; ; \
/* Cx [pC] = op (cast (aij)) */ \
; ; \
Cx [pC] = 1 ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ONE || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__one_int8_int8)
(
int8_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int8_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
; ;
; ;
Cx [p] = 1 ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
; ;
; ;
Cx [p] = 1 ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__one_int8_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRACC_OMP_045_Wrong_ordered_clause_simd_Inter_yes.c | /*
Data race between the values in countervar leading to changing results with simd. by utilising the ordered construct the execution will be sequentially consistent.
.*/
#include <stdio.h>
#include <stdbool.h>
#include <stdlib.h>
#define N 42000
#define C 16
int countervar[N];
int init(){
for(int i=0; i<N; i++){
countervar[i]=0;
}
return 0;
}
int count(){
#pragma omp target map(tofrom:countervar[0:N]) device(0)
#pragma omp teams distribute simd safelen(C)
for (int i=C; i<N; i++){
countervar[i]=countervar[i-C]+1;
}
return 0;
}
int check(){
bool test = false;
for(int i=C; i<N; i++){
if(countervar[i]!=i-C+1){
test = true;
}
}
printf("Memory Access Issue visible: %s\n",test ? "true" : "false");
return 0;
}
int main(){
init();
count();
check();
return 0;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.