source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
beam_vdif.c | /********************************************************
* *
* Licensed under the Academic Free License version 3.0 *
* *
********************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <fftw3.h>
#include "vdifio.h"
#include "psrfits.h"
#include "slamac.h"
#include "beam_common.h"
#include "beam_vdif.h"
#include "mwa_header.h"
#include "vdifio.h"
#include "ascii_header.h"
#include "filter.h"
#include "mycomplex.h"
#ifndef HAVE_CUDA
#include <omp.h>
#endif
void vdif_write_second( struct vdifinfo *vf, vdif_header *vhdr,
float *data_buffer_vdif )
{
float *data_buffer_ptr = data_buffer_vdif;
size_t offset_out_vdif = 0;
int8_t *out_buffer_8_vdif = (int8_t *)malloc(vf->block_size);
while (offset_out_vdif < vf->block_size) {
// Add the current header
memcpy( (out_buffer_8_vdif + offset_out_vdif), vhdr, VDIF_HEADER_SIZE );
// Offset into the output array
offset_out_vdif += VDIF_HEADER_SIZE;
// Convert from float to int8
float2int8_trunc( data_buffer_ptr, vf->sizeof_beam, -126.0, 127.0,
(out_buffer_8_vdif + offset_out_vdif) );
to_offset_binary( (out_buffer_8_vdif + offset_out_vdif),
vf->sizeof_beam );
offset_out_vdif += vf->frame_length - VDIF_HEADER_SIZE; // increment output offset
data_buffer_ptr += vf->sizeof_beam;
nextVDIFHeader( vhdr, vf->frame_rate );
}
// Write a full second's worth of samples
vdif_write_data( vf, out_buffer_8_vdif );
free( out_buffer_8_vdif );
}
void vdif_write_data( struct vdifinfo *vf, int8_t *output )
{
// form the filename
// there is a standard naming convention
char filename[1030];
sprintf( filename, "%s.vdif", vf->basefilename );
//fprintf(stderr,"Attempting to open VDIF file for writing: %s\n",filename);
FILE *fs = fopen( filename, "a" );
fwrite( output, vf->block_size, 1, fs );
fclose( fs );
// write a CPSR2 test header for DSPSR
char ascii_header[MWA_HEADER_SIZE] = MWA_HEADER_INIT;
//ascii_header_set( ascii_header, "UTC_START", "%s", vf->date_obs );
ascii_header_set( ascii_header, "DATAFILE", "%s", filename );
ascii_header_set( ascii_header, "INSTRUMENT", "%s", "VDIF" );
ascii_header_set( ascii_header, "TELESCOPE", "%s", vf->telescope );
ascii_header_set( ascii_header, "MODE", "%s", vf->obs_mode );
ascii_header_set( ascii_header, "FREQ", "%f", vf->fctr );
ascii_header_set( ascii_header, "BW", "%f", vf->BW );
ascii_header_set( ascii_header, "RA", "%s", vf->ra_str );
ascii_header_set( ascii_header, "DEC", "%s", vf->dec_str );
ascii_header_set( ascii_header, "SOURCE", "%s", vf->source );
sprintf( filename, "%s.hdr", vf->basefilename );
fs = fopen( filename,"w" );
fwrite( ascii_header, MWA_HEADER_SIZE, 1, fs );
fclose( fs );
}
void populate_vdif_header(
struct vdifinfo *vf,
vdif_header *vhdr,
char *metafits,
char *obsid,
char *time_utc,
int sample_rate,
long int frequency,
int nchan,
long int chan_width,
char *rec_channel,
struct delays *delay_vals,
int npointing )
{
for ( int p=0; p<npointing; p++ )
{
// First how big is a DataFrame
vf[p].bits = 8; // this is because it is all the downstream apps support (dspsr/diFX)
vf[p].iscomplex = 1; // (it is complex data)
vf[p].nchan = 2; // I am hardcoding this to 2 channels per thread - one per pol
vf[p].samples_per_frame = 128; // also hardcoding to 128 time-samples per frame
vf[p].sample_rate = sample_rate*128; // = 1280000 (also hardcoding this to the raw channel rate)
vf[p].BW = 1.28;
vf[p].frame_length = (vf[p].nchan * (vf[p].iscomplex+1) * vf[p].samples_per_frame) +
VDIF_HEADER_SIZE; // = 544
vf[p].threadid = 0;
sprintf( vf[p].stationid, "mw" );
vf[p].frame_rate = sample_rate; // = 10000
vf[p].block_size = vf[p].frame_length * vf[p].frame_rate; // = 5440000
// A single frame (128 samples). Remember vf.nchan is kludged to npol
vf[p].sizeof_beam = vf[p].samples_per_frame * vf[p].nchan * (vf[p].iscomplex+1); // = 512
// One full second (1.28 million 2 bit samples)
vf[p].sizeof_buffer = vf[p].frame_rate * vf[p].sizeof_beam; // = 5120000
createVDIFHeader( vhdr, vf[p].frame_length, vf[p].threadid, vf[p].bits, vf[p].nchan,
vf[p].iscomplex, vf[p].stationid);
// Now we have to add the time
uint64_t start_day = delay_vals->intmjd;
uint64_t start_sec = roundf( delay_vals->fracmjd * 86400.0 );
uint64_t mjdsec = (start_day * 86400) + start_sec; // Note the VDIFEpoch is strange - from the standard
setVDIFEpoch( vhdr, start_day );
setVDIFMJDSec( vhdr, mjdsec );
setVDIFFrameNumber( vhdr, 0 );
// Get the project ID directly from the metafits file
fitsfile *fptr = NULL;
int status = 0;
fits_open_file(&fptr, metafits, READONLY, &status);
fits_read_key(fptr, TSTRING, "PROJECT", vf[p].exp_name, NULL, &status);
fits_close_file(fptr, &status);
strncpy( vf[p].scan_name, obsid, 17 );
vf[p].b_scales = (float *)malloc( sizeof(float) * vf[p].nchan );
vf[p].b_offsets = (float *)malloc( sizeof(float) * vf[p].nchan );
vf[p].got_scales = 1;
strncpy( vf[p].telescope, "MWA", 24);
strncpy( vf[p].obs_mode, "PSR", 8);
// Determine the RA and Dec strings
double ra2000 = delay_vals[p].mean_ra * DR2D;
double dec2000 = delay_vals[p].mean_dec * DR2D;
dec2hms(vf[p].ra_str, ra2000/15.0, 0); // 0 = no '+' sign
dec2hms(vf[p].dec_str, dec2000, 1); // 1 = with '+' sign
strncpy( vf[p].date_obs, time_utc, 24);
vf[p].MJD_epoch = delay_vals->intmjd + delay_vals->fracmjd;
vf[p].fctr = (frequency + (nchan/2.0)*chan_width)/1.0e6; // (MHz)
strncpy( vf[p].source, "unset", 24 );
// The output file basename
int ch = atoi(rec_channel);
sprintf( vf[p].basefilename, "%s_%s_%s_%s_ch%03d",
vf[p].exp_name, vf[p].scan_name, vf[p].ra_str, vf[p].dec_str, ch);
}
}
ComplexFloat get_std_dev_complex(ComplexFloat *input, int nsamples)
{
// assume zero mean
float rtotal = 0;
float itotal = 0;
float isigma = 0;
float rsigma = 0;
int i;
for (i=0;i<nsamples;i++){
rtotal = rtotal+(CRealf(input[i])*CRealf(input[i]));
itotal = itotal+(CImagf(input[i])*CImagf(input[i]));
}
rsigma = sqrtf((1.0/(nsamples-1))*rtotal);
isigma = sqrtf((1.0/(nsamples-1))*itotal);
return CMakef( rsigma, isigma );
}
void set_level_occupancy(ComplexFloat *input, int nsamples, float *new_gain)
{
//float percentage = 0.0;
//float occupancy = 17.0;
//float limit = 0.00001;
//float step = 0.001;
int i = 0;
float gain = *new_gain;
float percentage_clipped = 100;
//while (percentage_clipped > 0 && percentage_clipped > limit) {
int clipped = 0;
for (i = 0; i < nsamples; i++) {
if (isnan(CRealf(input[i])) || isnan(CImagf(input[i])))
{
fprintf( stderr, "error: set_level_occupancy: input[%d] = "
"NaN\n", i );
exit(EXIT_FAILURE);
}
if (fabs(gain*CRealf(input[i])) > 127 || fabs(gain*CImagf(input[i])) > 127 )
{
clipped++;
}
}
percentage_clipped = ((float) clipped/nsamples) * 100;
//The reduction in the gain was commented out until we work our a robust solution
//if (percentage_clipped > limit) {
// gain = gain - step;
//}
if (clipped > 0)
{
fprintf(stdout,"warning: percentage samples clipped %f percent\n",percentage_clipped);
}
//}
*new_gain = gain;
}
void get_mean_complex( ComplexFloat *input, int nsamples, float *rmean,
float *imean, ComplexFloat *cmean)
{
int i;
float rtotal = 0;
float itotal = 0 ;
ComplexFloat ctotal = CMakef( 0.0, 0.0 );
for (i = 0; i < nsamples; i++)
{
//if (isnan(CRealf(input[i])) || isnan(CImagf(input[i]))) { fprintf(stderr, "\ninput[%d] = %e + %e*I\n\n", i, CRealf(input[i]), CImagf(input[i])); exit(1); }
rtotal += CRealf( input[i] );
itotal += CImagf( input[i] );
ctotal = CAddf( ctotal, input[i] );
}
*rmean = rtotal / nsamples;
*imean = itotal / nsamples;
*cmean = CSclf( ctotal, 1.0 / (float)nsamples );
}
void normalise_complex(ComplexFloat *input, int nsamples, float scale)
{
int i=0;
for (i=0;i<nsamples;i++){
input[i] = CSclf( input[i], scale );
}
}
void to_offset_binary(int8_t *i, int n)
{
int j;
for (j = 0; j < n; j++) {
i[j] = i[j] ^ 0x80;
}
}
#ifndef HAVE_CUDA
void invert_pfb_ifft( ComplexDouble ***detected_beam, int file_no,
int nsamples, int nchan, int npol,
float *data_buffer_vdif )
/* "Invert the PFB" by simply applying an inverse FFT.
* This function expects "detected_beam" to be structured as follows:
*
* detected_beam[2*nsamples][nchan][npol]
*
* Although detected_samples potentially contains 2 seconds' worth of data,
* this function only FFTs one second. The appropriate second is worked out
* using file_no: if it is even, the first half of detected_beam is used,
* if odd, the second half.
*
* The output of the inverse FFT is packed back into data_buffer_vdif, a 1D
* array whose ordering is as follows:
*
* time, pol, complexity
*
* This ordering is suited for immediate output to the VDIF format.
*/
{
// Allocate FFTW arrays
int arr_size = nsamples * nchan * npol;
fftwf_complex *in = (fftwf_complex *)fftwf_malloc( arr_size * sizeof(fftwf_complex) );
// Create a plan for doing column-wise 1D transforms
int rank = 1;
int n[] = { nchan };
int howmany = nsamples * npol;
int idist = nchan;
int odist = nchan;
int istride = 1;
int ostride = 1;
int *inembed = n, *onembed = n;
fftwf_plan p = fftwf_plan_many_dft( rank, n, howmany,
in, inembed, istride, idist,
in, onembed, ostride, odist,
FFTW_BACKWARD, FFTW_ESTIMATE );
// Populate the FFTW arrays such that the middle channel of detected_beam
// is placed nearest the DC term.
int s; // sample index
#pragma omp parallel for
for (s = 0; s < nsamples; s ++)
{
int ds, ch, pol;
int ii; // "in" index
int chi; // corrected channel index for "in" array
// Calculate the proper sample index for this second
ds = (file_no % 2)*nsamples + s;
for (ch = 0; ch < nchan; ch++ )
for (pol = 0; pol < npol; pol++)
{
// Swap the two halves of the array
chi = (ch < nchan/2 ? ch + (nchan/2) : ch - (nchan/2));
// Calculate the "in" index
ii = nchan * npol * s +
nchan * pol +
chi;
// Copy across the data (but set DC bin to 0)
in[ii] = (chi == 0 ? 0.0 : detected_beam[ds][ch][pol]);
}
}
/*
fprintf( stderr, " First column to be iFFT'd (inside invert_pfb_ifft()): [\n" );
for (s = 0; s < nchan; s++)
fprintf( stderr, " %f + %f*I\n", creal(in[s]), cimag(in[s]) );
fprintf( stderr, "]\n" );
*/
// Execute the FFT
fftwf_execute( p );
// Pack result into the output array
#pragma omp parallel for
for (s = 0; s < nsamples; s ++)
{
int ch, pol;
int ii, oi; // "in" index & "out" index
for (ch = 0; ch < nchan; ch++ )
for (pol = 0; pol < npol; pol++)
{
// Calculate the "in" index
ii = nchan * npol * s +
nchan * pol +
ch;
// Calculate the "out" index ("ch" here turns into a subdivision
// of time)
oi = 2 * npol * nchan * s +
2 * npol * ch +
2 * pol;
// Copy data across, dividing by nchan to account for the lack of
// normalisation in the FFTW library.
data_buffer_vdif[oi] = crealf(in[ii]) / (double)nchan;
data_buffer_vdif[oi+1] = cimagf(in[ii]) / (double)nchan;
}
}
// Clean up
fftwf_free( in );
fftwf_destroy_plan( p );
}
void invert_pfb_ord( ComplexDouble ***detected_beam, int file_no,
int nsamples, int nchan, int npol,
ComplexDouble **fils, int fil_size,
float *data_buffer_uvdif )
/* "Invert the PFB" by applying a resynthesis filter.
* This function expects "detected_beam" to be structured as follows:
*
* detected_beam[2*nsamples][nchan][npol]
*
* Although detected_samples potentially contains 2 seconds' worth of data,
* this function only inverts 1 second. The appropriate second is worked out
* using file_no: if it is even, the first half of detected_beam is used,
* if odd, the second half.
*
* The output of the inversion is packed back into data_buffer_vdif, a 1D
* array whose ordering is as follows:
*
* time, pol, complexity
*
* This ordering is suited for immediate output to the VDIF format.
*
* Finally, fils points to a 2D array of filter coefficients, each row of
* which has been "rotated" with phase ramps of different amounts. It is
* assumed that fils has size:
*
* fils[nchan][fil_size]
*/
{
// Set the output buffer to zeros
int s;
#pragma omp parallel for
for (s = 0; s < npol*nchan*nsamples*2; s++)
{
data_buffer_uvdif[s] = 0.0;
}
// Loop over (output) sample -- embarassingly parallel
#pragma omp parallel for
for (s = 0; s < nchan*nsamples; s++)
{
//fprintf( stderr, " Thread num: %d, s = %d\n", omp_get_thread_num(), s );
int U = nchan; // upsampling factor = number of channels
int i0; // The index of the first input sample to
// be included in the output sum
int f0; // The index of the first filter coeffi-
// cient to be included in the output sum
int N = nsamples * U; // The total number of output samples
int ch, f, i, pol, oi; // Various loop counters
ComplexDouble part;
for (pol = 0; pol < npol; pol++)
{
// Calculate the output index for data_buffer_uvdif
oi = 2*npol*s + 2*pol;
// First take care of the corner case = the very first second
if (file_no == 0 && s < fil_size - 1)
{
//data_buffer_uvdif[oi ] = 0.0; // "real"
//data_buffer_uvdif[oi+1] = 0.0; // "imag"
continue;
}
// Calculate the first input idx to be included in this out sample
if (file_no % 2 == 0)
i0 = ((s + 2*N - fil_size + U) / U) % (2*nsamples);
else // file_no % 2 == 1
i0 = (s + 1*N - fil_size + U) / U;
// Calculate the first filter coefficient index
f0 = (U - (s % U) - 1) % U;
// Loop over channels and filter coefficients to calculate output
for (ch = 0; ch < nchan; ch++)
//for (ch = 3; ch < 4; ch++)
{
i = i0;
for (f = f0; f < fil_size; f += U)
{
part = CMuld( fils[ch][(fil_size-1) - f], detected_beam[i][ch][pol] );
data_buffer_uvdif[oi ] += CReald(part);
data_buffer_uvdif[oi+1] += CImagd(part);
// Update input index simultaneously with filter coeff
i++;
if (i == 2*nsamples) i = 0; // (i.e. loop back around to
// the other second)
} // Loop over relevant filter coefficients
} // Loop over channels
// Normalise the result
data_buffer_uvdif[oi ] /= nchan;
data_buffer_uvdif[oi+1] /= nchan;
} // Loop over X/Y pol
} // Loop over samples
}
#endif
|
omp_ex_30.c | #include <stdio.h>
#include <omp.h>
/*
MIT License
Copyright (c) 2019 NOUREDDINE DAGHBOUDJ
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
int main()
{
#pragma omp parallel for schedule(runtime)
for(unsigned int i=0; i<16; i++)
{
unsigned id = omp_get_thread_num();
printf("i = %i from thread: %i\n", i, id);
}
return 0;
}
|
GB_unop__creal_fp64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__creal_fp64_fc64)
// op(A') function: GB (_unop_tran__creal_fp64_fc64)
// C type: double
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = (aij)
// unaryop: cij = creal (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = creal (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = (aij) ; \
Cx [pC] = creal (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_CREAL || GxB_NO_FP64 || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__creal_fp64_fc64)
(
double *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = (aij) ;
Cx [p] = creal (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = (aij) ;
Cx [p] = creal (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__creal_fp64_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
build_HF_mat.c | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <assert.h>
#include <math.h>
#include <omp.h>
#include "linalg_lib_wrapper.h"
#include "utils.h"
#include "TinyDFT_typedef.h"
#include "build_HF_mat.h"
#include "ket_sp_list.h"
#include "acc_JKmat.h"
#include "libCMS.h"
void TinyDFT_build_Hcore_S_X_mat(TinyDFT_p TinyDFT, double *Hcore_mat, double *S_mat, double *X_mat)
{
assert(TinyDFT != NULL);
int nbf = TinyDFT->nbf;
int nshell = TinyDFT->nshell;
int mat_size = TinyDFT->mat_size;
int *shell_bf_sind = TinyDFT->shell_bf_sind;
int *shell_bf_num = TinyDFT->shell_bf_num;
Simint_p simint = TinyDFT->simint;
BasisSet_p basis = TinyDFT->basis;
// Compute core Hamiltonian and overlap matrix
memset(Hcore_mat, 0, DBL_MSIZE * mat_size);
memset(S_mat, 0, DBL_MSIZE * mat_size);
#pragma omp parallel for schedule(dynamic)
for (int M = 0; M < nshell; M++)
{
int tid = omp_get_thread_num();
for (int N = 0; N < nshell; N++)
{
int nint, offset, nrows, ncols;
double *integrals, *S_ptr, *Hcore_ptr;
offset = shell_bf_sind[M] * nbf + shell_bf_sind[N];
S_ptr = S_mat + offset;
Hcore_ptr = Hcore_mat + offset;
nrows = shell_bf_num[M];
ncols = shell_bf_num[N];
// Compute the contribution of current shell pair to core Hamiltonian matrix
CMS_Simint_calc_pair_ovlp(simint, tid, M, N, &integrals, &nint);
if (nint > 0) copy_matrix_block(sizeof(double), nrows, ncols, integrals, ncols, S_ptr, nbf);
// Compute the contribution of current shell pair to overlap matrix
CMS_Simint_calc_pair_Hcore(basis, simint, tid, M, N, &integrals, &nint);
if (nint > 0) copy_matrix_block(sizeof(double), nrows, ncols, integrals, ncols, Hcore_ptr, nbf);
}
}
// Construct basis transformation matrix
double *workbuf = (double*) malloc(sizeof(double) * (2 * mat_size + nbf));
assert(workbuf != NULL);
double *U_mat = workbuf;
double *U0_mat = U_mat + mat_size;
double *eigval = U0_mat + mat_size;
// [U, D] = eig(S);
memcpy(U_mat, S_mat, DBL_MSIZE * mat_size);
LAPACKE_dsyev(LAPACK_ROW_MAJOR, 'V', 'U', nbf, U_mat, nbf, eigval); // U_mat will be overwritten by eigenvectors
// X = U * D^{-1/2} * U'^T
memcpy(U0_mat, U_mat, DBL_MSIZE * mat_size);
int cnt = 0;
double S_ev_thres = 1.0e-5; // This is the default threshold used in NWChem
for (int i = 0; i < nbf; i++)
{
if (eigval[i] >= S_ev_thres)
{
eigval[i] = 1.0 / sqrt(eigval[i]);
cnt++;
} else {
eigval[i] = 0.0;
}
}
printf("Eliminating linear dependency in overlap matrix S: %d eigenvalues < %.2e are removed\n", nbf - cnt, S_ev_thres);
for (int i = 0; i < nbf; i++)
{
#pragma omp simd
for (int j = 0; j < nbf; j++)
U0_mat[i * nbf + j] *= eigval[j];
}
cblas_dgemm(
CblasRowMajor, CblasNoTrans, CblasTrans, nbf, nbf, nbf,
1.0, U0_mat, nbf, U_mat, nbf, 0.0, X_mat, nbf
);
free(workbuf);
}
// Get the final J and K matrices: J = (J + J^T) / 2, K = (K + K^T) / 2
static void TinyDFT_finalize_JKmat(const int nbf, double *J_mat, double *K_mat, const int build_J, const int build_K)
{
if (build_J == 1 && build_K == 1)
{
#pragma omp for schedule(dynamic)
for (int irow = 0; irow < nbf; irow++)
{
for (int icol = irow + 1; icol < nbf; icol++)
{
int idx1 = irow * nbf + icol;
int idx2 = icol * nbf + irow;
double Jval = (J_mat[idx1] + J_mat[idx2]) * 0.5;
double Kval = (K_mat[idx1] + K_mat[idx2]) * 0.5;
J_mat[idx1] = Jval;
J_mat[idx2] = Jval;
K_mat[idx1] = Kval;
K_mat[idx2] = Kval;
}
}
}
if (build_J == 1 && build_K == 0)
{
#pragma omp for schedule(dynamic)
for (int irow = 0; irow < nbf; irow++)
{
for (int icol = irow + 1; icol < nbf; icol++)
{
int idx1 = irow * nbf + icol;
int idx2 = icol * nbf + irow;
double Jval = (J_mat[idx1] + J_mat[idx2]) * 0.5;
J_mat[idx1] = Jval;
J_mat[idx2] = Jval;
}
}
}
if (build_J == 0 && build_K == 1)
{
#pragma omp for schedule(dynamic)
for (int irow = 0; irow < nbf; irow++)
{
for (int icol = irow + 1; icol < nbf; icol++)
{
int idx1 = irow * nbf + icol;
int idx2 = icol * nbf + irow;
double Kval = (K_mat[idx1] + K_mat[idx2]) * 0.5;
K_mat[idx1] = Kval;
K_mat[idx2] = Kval;
}
}
}
}
static void TinyDFT_JKblkmat_to_JKmat(
const int nshell, const int nbf, const int *shell_bf_num, const int *shell_bf_sind,
const int *blk_mat_ptr, const double *J_blk_mat, const double *K_blk_mat,
double *J_mat, double *K_mat, const int build_J, const int build_K
)
{
if (build_J)
{
#pragma omp for
for (int i = 0; i < nshell; i++)
{
for (int j = 0; j < nshell; j++)
{
int Jblk_offset = blk_mat_ptr[i * nshell + j];
int J_offset = shell_bf_sind[i] * nbf + shell_bf_sind[j];
copy_matrix_block(
sizeof(double), shell_bf_num[i], shell_bf_num[j],
J_blk_mat + Jblk_offset, shell_bf_num[j],
J_mat + J_offset, nbf
);
}
}
}
if (build_K)
{
#pragma omp for
for (int i = 0; i < nshell; i++)
{
for (int j = 0; j < nshell; j++)
{
int Kblk_offset = blk_mat_ptr[i * nshell + j];
int K_offset = shell_bf_sind[i] * nbf + shell_bf_sind[j];
copy_matrix_block(
sizeof(double), shell_bf_num[i], shell_bf_num[j],
K_blk_mat + Kblk_offset, shell_bf_num[j],
K_mat + K_offset, nbf
);
}
}
}
}
static void TinyDFT_Dmat_to_Dblkmat(
const int nshell, const int nbf, const int *shell_bf_num, const int *shell_bf_sind,
const int *blk_mat_ptr, const double *D_mat, double *D_blk_mat
)
{
#pragma omp for
for (int i = 0; i < nshell; i++)
{
for (int j = 0; j < nshell; j++)
{
int Dblk_offset = blk_mat_ptr[i * nshell + j];
int D_offset = shell_bf_sind[i] * nbf + shell_bf_sind[j];
copy_matrix_block(
sizeof(double), shell_bf_num[i], shell_bf_num[j],
D_mat + D_offset, nbf,
D_blk_mat + Dblk_offset, shell_bf_num[j]
);
}
}
}
void TinyDFT_build_JKmat(TinyDFT_p TinyDFT, const double *D_mat, double *J_mat, double *K_mat)
{
int nbf = TinyDFT->nbf;
int nshell = TinyDFT->nshell;
int num_valid_sp = TinyDFT->num_valid_sp;
int max_dim = TinyDFT->max_dim;
int mat_size = TinyDFT->mat_size;
int max_JKacc_buf = TinyDFT->max_JKacc_buf;
int *shell_bf_num = TinyDFT->shell_bf_num;
int *shell_bf_sind = TinyDFT->shell_bf_sind;
int *valid_sp_lid = TinyDFT->valid_sp_lid;
int *valid_sp_rid = TinyDFT->valid_sp_rid;
int *blk_mat_ptr = TinyDFT->blk_mat_ptr;
int *Mpair_flag = TinyDFT->Mpair_flag;
int *Npair_flag = TinyDFT->Npair_flag;
double scrtol2 = TinyDFT->shell_scrtol2;
double *sp_scrval = TinyDFT->sp_scrval;
double *J_blk_mat = TinyDFT->J_blk_mat;
double *K_blk_mat = TinyDFT->K_blk_mat;
double *D_blk_mat = TinyDFT->D_blk_mat;
double *FM_strip_buf = TinyDFT->FM_strip_buf;
double *FN_strip_buf = TinyDFT->FN_strip_buf;
Simint_p simint = TinyDFT->simint;
int build_J = (J_mat == NULL) ? 0 : 1;
int build_K = (K_mat == NULL) ? 0 : 1;
if (build_J == 0 && build_K == 0) return;
if (build_J) memset(J_blk_mat, 0, DBL_MSIZE * mat_size);
if (build_K) memset(K_blk_mat, 0, DBL_MSIZE * mat_size);
#pragma omp parallel
{
int tid = omp_get_thread_num();
TinyDFT_Dmat_to_Dblkmat(
nshell, nbf, shell_bf_num, shell_bf_sind,
blk_mat_ptr, D_mat, D_blk_mat
);
// Create ERI batching auxiliary data structures
// Ket-side shell pair lists that needs to be computed
ThreadKetShellpairLists_p thread_ksp_lists;
create_ThreadKetShellpairLists(&thread_ksp_lists);
// Simint multi_shellpair buffer for batched ERI computation
void *thread_multi_shellpair;
CMS_Simint_create_multi_sp(&thread_multi_shellpair);
double *thread_FM_strip_buf = FM_strip_buf + tid * nbf * max_dim;
double *thread_FN_strip_buf = FN_strip_buf + tid * nbf * max_dim;
int *thread_Mpair_flag = Mpair_flag + tid * nshell;
int *thread_Npair_flag = Npair_flag + tid * nshell;
#pragma omp for schedule(dynamic)
for (int MN = 0; MN < num_valid_sp; MN++)
{
int M = valid_sp_lid[MN];
int N = valid_sp_rid[MN];
double scrval1 = sp_scrval[M * nshell + N];
double *J_MN_buf = TinyDFT->JKacc_buf + tid * max_JKacc_buf;
double *J_MN = J_blk_mat + blk_mat_ptr[M * nshell + N];
int dimM = shell_bf_num[M], dimN = shell_bf_num[N];
if (build_J) memset(J_MN_buf, 0, sizeof(double) * dimM * dimN);
if (build_K)
{
memset(thread_FM_strip_buf, 0, sizeof(double) * nbf * max_dim);
memset(thread_FN_strip_buf, 0, sizeof(double) * nbf * max_dim);
memset(thread_Mpair_flag, 0, sizeof(int) * nshell);
memset(thread_Npair_flag, 0, sizeof(int) * nshell);
}
for (int PQ = 0; PQ < num_valid_sp; PQ++)
{
int P = valid_sp_lid[PQ];
int Q = valid_sp_rid[PQ];
double scrval2 = sp_scrval[P * nshell + Q];
// Symmetric uniqueness check, from GTFock
if ((M > P && (M + P) % 2 == 1) ||
(M < P && (M + P) % 2 == 0))
continue;
if ((M == P) && ((N > Q && (N + Q) % 2 == 1) ||
(N < Q && (N + Q) % 2 == 0)))
continue;
// Shell screening
if (fabs(scrval1 * scrval2) <= scrtol2) continue;
// Push ket-side shell pair to corresponding list
int ket_id = CMS_Simint_get_sp_AM_idx(simint, P, Q);
KetShellpairList_p dst_sp_list = &thread_ksp_lists->ket_shellpair_lists[ket_id];
add_shellpair_to_KetShellPairList(dst_sp_list, P, Q);
// If the ket-side shell pair list we just used is full, handle it
if (dst_sp_list->npairs == MAX_LIST_SIZE)
{
double *thread_batch_eris;
int thread_nints;
// Compute batched ERIs
CMS_Simint_calc_shellquartet_batch(
simint, tid, M, N,
dst_sp_list->npairs,
dst_sp_list->P_list,
dst_sp_list->Q_list,
&thread_batch_eris, &thread_nints,
&thread_multi_shellpair
);
// Accumulate ERI results to global matrices
if (thread_nints > 0)
{
double st = get_wtime_sec();
acc_JKmat_with_ket_sp_list(
TinyDFT, tid, M, N,
dst_sp_list->npairs,
dst_sp_list->P_list,
dst_sp_list->Q_list,
thread_batch_eris, thread_nints,
thread_FM_strip_buf, thread_FN_strip_buf,
thread_Mpair_flag, thread_Npair_flag,
build_J, build_K
);
double et = get_wtime_sec();
if (tid == 0) CMS_Simint_add_accF_timer(simint, et - st);
}
// Reset the computed ket-side shell pair list
dst_sp_list->npairs = 0;
} // End of "if (dst_sp_list->npairs == MAX_LIST_SIZE)"
} // End of PQ loop
// Handles all non-empty ket-side shell pair lists
for (int ket_id = 0; ket_id < MAX_AM_PAIRS; ket_id++)
{
KetShellpairList_p dst_sp_list = &thread_ksp_lists->ket_shellpair_lists[ket_id];
if (dst_sp_list->npairs > 0)
{
double *thread_batch_eris;
int thread_nints;
// Compute batched ERIs
CMS_Simint_calc_shellquartet_batch(
simint, tid, M, N,
dst_sp_list->npairs,
dst_sp_list->P_list,
dst_sp_list->Q_list,
&thread_batch_eris, &thread_nints,
&thread_multi_shellpair
);
// Accumulate ERI results to global matrices
if (thread_nints > 0)
{
double st = get_wtime_sec();
acc_JKmat_with_ket_sp_list(
TinyDFT, tid, M, N,
dst_sp_list->npairs,
dst_sp_list->P_list,
dst_sp_list->Q_list,
thread_batch_eris, thread_nints,
thread_FM_strip_buf, thread_FN_strip_buf,
thread_Mpair_flag, thread_Npair_flag,
build_J, build_K
);
double et = get_wtime_sec();
if (tid == 0) CMS_Simint_add_accF_timer(simint, et - st);
}
// Reset the computed ket-side shell pair list
dst_sp_list->npairs = 0;
} // End of "if (dst_sp_list->npairs > 0)"
} // End of ket_id loop
// Accumulate thread-local J and K results to global J and K mat
if (build_J) atomic_add_vector(J_MN, J_MN_buf, dimM * dimN);
if (build_K)
{
int FM_strip_offset = blk_mat_ptr[M * nshell];
int FN_strip_offset = blk_mat_ptr[N * nshell];
for (int iPQ = 0; iPQ < nshell; iPQ++)
{
int dim_iPQ = shell_bf_num[iPQ];
if (thread_Mpair_flag[iPQ])
{
int MPQ_blk_ptr = blk_mat_ptr[M * nshell + iPQ];
double *K_blk_ptr = K_blk_mat + MPQ_blk_ptr;
double *thread_FM_strip_blk_ptr = thread_FM_strip_buf + MPQ_blk_ptr - FM_strip_offset;
atomic_add_vector(K_blk_ptr, thread_FM_strip_blk_ptr, dimM * dim_iPQ);
}
if (thread_Npair_flag[iPQ])
{
int NPQ_blk_ptr = blk_mat_ptr[N * nshell + iPQ];
double *K_blk_ptr = K_blk_mat + NPQ_blk_ptr;
double *thread_FN_strip_blk_ptr = thread_FN_strip_buf + NPQ_blk_ptr - FN_strip_offset;
atomic_add_vector(K_blk_ptr, thread_FN_strip_blk_ptr, dimN * dim_iPQ);
}
} // End of iPQ loop
} // End of "if (build_K)"
} // End of MN loop
TinyDFT_JKblkmat_to_JKmat(
nshell, nbf, shell_bf_num, shell_bf_sind, blk_mat_ptr,
J_blk_mat, K_blk_mat, J_mat, K_mat, build_J, build_K
);
TinyDFT_finalize_JKmat(nbf, J_mat, K_mat, build_J, build_K);
CMS_Simint_free_multi_sp(thread_multi_shellpair);
free_ThreadKetShellpairLists(thread_ksp_lists);
} // End of "#pragma omp parallel"
}
void TinyDFT_calc_HF_energy(
const int mat_size, const double *D_mat, const double *Hcore_mat, const double *J_mat,
const double *K_mat, double *E_one_elec, double *E_two_elec, double *E_HF_exchange
)
{
double Eoe = 0.0, Ete = 0.0, Exc = 0.0;
if (K_mat != NULL)
{
#pragma omp parallel for simd reduction(+:Eoe, Ete, Exc)
for (int i = 0; i < mat_size; i++)
{
Eoe += D_mat[i] * Hcore_mat[i];
Ete += D_mat[i] * J_mat[i];
Exc += D_mat[i] * K_mat[i];
}
Eoe *= 2.0;
Ete *= 2.0;
Exc *= -1.0;
*E_one_elec = Eoe;
*E_two_elec = Ete;
*E_HF_exchange = Exc;
} else {
#pragma omp parallel for simd reduction(+:Eoe, Ete)
for (int i = 0; i < mat_size; i++)
{
Eoe += D_mat[i] * Hcore_mat[i];
Ete += D_mat[i] * J_mat[i];
}
Eoe *= 2.0;
Ete *= 2.0;
*E_one_elec = Eoe;
*E_two_elec = Ete;
}
}
|
chlpca.h | /*
#
# File : chlpca.cpp
# ( C++ source file )
#
# Description : Example of use for the CImg plugin 'plugins/chlpca.h'.
# This file is a part of the CImg Library project.
# ( http://cimg.sourceforge.net )
#
# Copyright : Jerome Boulanger
# ( http://www.irisa.fr/vista/Equipe/People/Jerome.Boulanger.html )
#
#
# License : CeCILL v2.0
# ( http://www.cecill.info/licences/Licence_CeCILL_V2-en.html )
#
# This software is governed by the CeCILL license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/ or redistribute the software under the terms of the CeCILL
# license as circulated by CEA, CNRS and INRIA at the following URL
# "http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
# same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
#
*/
// Define some useful macros.
//! Some loops
#define cimg_for_step1(bound,i,step) for (int i = 0; i<(int)(bound); i+=step)
#define cimg_for_stepX(img,x,step) cimg_for_step1((img)._width,x,step)
#define cimg_for_stepY(img,y,step) cimg_for_step1((img)._height,y,step)
#define cimg_for_stepZ(img,z,step) cimg_for_step1((img)._depth,z,step)
#define cimg_for_stepXY(img,x,y,step) cimg_for_stepY(img,y,step) cimg_for_stepX(img,x,step)
#define cimg_for_stepXYZ(img,x,y,step) cimg_for_stepZ(img,z,step) cimg_for_stepY(img,y,step) cimg_for_stepX(img,x,step)
//! Loop for point J(xj,yj) in the neighborhood of a point I(xi,yi) of size (2*rx+1,2*ry+1)
/**
Point J is kept inside the boundaries of the image img.
example of summing the pixels values in a neighborhood 11x11
cimg_forXY(img,xi,yi) cimg_for_windowXY(img,xi,yi,xj,yj,5,5) dest(yi,yi) += src(xj,yj);
**/
#define cimg_forXY_window(img,xi,yi,xj,yj,rx,ry) \
for (int yi0=cimg::max(0,yi-ry), yi1=cimg::min(yi+ry,(int)img.height()-1), yj=yi0;yj<=yi1;++yj) \
for (int xi0=cimg::max(0,xi-rx), xi1=cimg::min(xi+rx,(int)img.width()-1), xj=xi0;xj<=xi1;++xj)
#define cimg_forXYZ_window(img,xi,yi,zi,xj,yj,zj,rx,ry,rz) \
for (int zi0=cimg::max(0,zi-rz), zi1=cimg::min(zi+rz,(int)img.depth()-1) , zj=zi0;zj<=zi1;++zj) \
for (int yi0=cimg::max(0,yi-ry), yi1=cimg::min(yi+ry,(int)img.height()-1), yj=yi0;yj<=yi1;++yj) \
for (int xi0=cimg::max(0,xi-rx), xi1=cimg::min(xi+rx,(int)img.width()-1) , xj=xi0;xj<=xi1;++xj)
//! Crop a patch in the image around position x,y,z and return a column vector
/**
\param x x-coordinate of the center of the patch
\param y y-coordinate of the center of the patch
\param z z-coordinate of the center of the patch
\param px the patch half width
\param px the patch half height
\param px the patch half depth
\return img.get_crop(x0,y0,z0,x1,y1,z1).unroll('y');
**/
CImg<T> get_patch(int x, int y, int z,
int px, int py, int pz) const {
if (depth() == 1){
const int x0 = x - px, y0 = y - py, x1 = x + px, y1 = y + py;
return get_crop(x0, y0, x1, y1).unroll('y');
} else {
const int
x0 = x - px, y0 = y - py, z0 = z - pz,
x1 = x + px, y1 = y + py, z1 = z + pz;
return get_crop(x0, y0, z0, x1, y1, z1).unroll('y');
}
}
//! Extract a local patch dictionnary around point xi,yi,zi
CImg<T> get_patch_dictionnary(const int xi, const int yi, const int zi,
const int px, const int py, const int pz,
const int wx, const int wy, const int wz,
int & idc) const {
const int
n = (2*wx+1) * (2*wy+1) * (2 * (depth()==1?0:wz) + 1),
d = (2*px+1) * (2*py+1) * (2 * (depth()==1?0:px) + 1) * spectrum();
CImg<> S(n, d);
int idx = 0;
if (depth() == 1) {
cimg_forXY_window((*this), xi, yi, xj, yj, wx, wy){
CImg<T> patch = get_patch(xj, yj, 0, px, py, 1);
cimg_forY(S,y) S(idx,y) = patch(y);
if (xj==xi && yj==yi) idc = idx;
idx++;
}
} else {
cimg_forXYZ_window((*this), xi,yi,zi,xj,yj,zj,wx,wy,wz){
CImg<T> patch = get_patch(xj, yj, zj, px, py, pz);
cimg_forY(S,y) S(idx,y) = patch(y);
if (xj==xi && yj==yi && zj==zi) idc = idx;
idx++;
}
}
S.columns(0, idx - 1);
return S;
}
//! Add a patch to the image
/**
\param x x-coordinate of the center of the patch
\param y y-coordinate of the center of the patch
\param z z-coordinate of the center of the patch
\param img the patch as a 1D column vector
\param px the patch half width
\param px the patch half height
\param px the patch half depth
**/
CImg<T> & add_patch(const int xi, const int yi, const int zi,
const CImg<T> & patch,
const int px, const int py, const int pz) {
const int
x0 = xi - px, y0 = yi - py, z0 = (depth() == 1 ? 0 : zi - pz),
sx = 2 * px + 1, sy = 2 * py + 1, sz = (depth() == 1 ? 1 : 2 * pz +1);
draw_image(x0, y0, z0, 0, patch.get_resize(sx, sy, sz, spectrum(), -1), -1);
return (*this);
}
//! Add a constant patch to the image
/**
\param x x-coordinate of the center of the patch
\param y y-coordinate of the center of the patch
\param z z-coordinate of the center of the patch
\param value in the patch
\param px the patch half width
\param px the patch half height
\param px the patch half depth
**/
CImg<T> & add_patch(const int xi, const int yi, const int zi, const T value,
const int px, const int py, const int pz) {
const int
x0 = xi - px, y0 = yi - py, z0 = (depth() == 1 ? 0 : zi - pz),
x1 = xi + px, y1 = yi + py, z1 = (depth() == 1 ? 0 : zi + pz);
draw_rectangle(x0, y0, z0, 0, x1, y1, z1, spectrum()-1, value, -1);
return (*this);
}
//! CHLPCA denoising from the PhD thesis of Hu Haijuan
/**
\param px the patch half width
\param px the patch half height
\param px the patch half depth
\param wx the training region half width
\param wy the training region half height
\param wz the training region half depth
\param nstep the subsampling of the image domain
\param nsim the number of patches used for training as a factor of the patch size
\param lambda_min the threshold on the eigen values of the PCA for dimension reduction
\param threshold the threshold on the value of the coefficients
\param pca_use_svd if true use the svd approach to perform the pca otherwise use the covariance method
\note please cite the PhD thesis of Hu Haijuan http://www.univ-ubs.fr/soutenance-de-these-hu-haijuan-337653.kjsp?RH=1318498222799
**/
CImg<T> get_chlpca(const int px, const int py, const int pz,
const int wx, const int wy, const int wz,
const int nstep, const float nsim,
const float lambda_min, const float threshold,
const float noise_std, const bool pca_use_svd) const {
const int
nd = (2*px+1) * (2*py+1) * (depth()==1?1:2*pz+1) * spectrum(),
K = nsim * nd;
#ifdef DEBUG
fprintf(stderr,"chlpca: p:%dx%dx%d,w:%dx%dx%d,nd:%d,K:%d\n",
2*px+1,2*py+1,2*pz+1,2*wx+1,2*wy+1,2*wz+1,nd,K);
#endif
float sigma;
if (noise_std < 0) sigma = std::sqrt(variance_noise());
else sigma = noise_std;
CImg<T> dest(*this), count(*this);
dest.fill(0);
count.fill(0);
cimg_for_stepZ(*this,zi,(depth()==1||pz==0)?1:nstep){
#ifdef cimg_use_openmp
#pragma omp parallel for
#endif
cimg_for_stepXY((*this),xi,yi,nstep){
// extract the training region X
int idc = 0;
CImg<T> S = get_patch_dictionnary(xi,yi,zi,px,py,pz,wx,wy,wz,idc);
// select the K most similar patches within the training set
CImg<T> Sk(S);
CImg<unsigned int> index(S.width());
if (K < Sk.width() - 1){
CImg<T> mse(S.width());
CImg<unsigned int> perms;
cimg_forX(S,x){mse(x) = S.get_column(idc).MSE(S.get_column(x)); }
mse.sort(perms,true);
cimg_foroff(perms,i) {
cimg_forY(S,j) Sk(i,j) = S(perms(i),j);
index(perms(i)) = i;
}
Sk.columns(0, K);
perms.threshold(K);
} else {
cimg_foroff(index,i) index(i)=i;
}
// centering the patches
CImg<T> M(1, Sk.height(), 1, 1, 0);
cimg_forXY(Sk,x,y) { M(y) += Sk(x,y); }
M /= (T)Sk.width();
cimg_forXY(Sk,x,y) { Sk(x,y) -= M(y); }
// compute the principal component of the training set S
CImg<T> P, lambda;
if (pca_use_svd) {
CImg<T> V;
Sk.get_transpose().SVD(V,lambda,P,100);
} else {
(Sk * Sk.get_transpose()).symmetric_eigen(lambda, P);
lambda.sqrt();
}
// dimension reduction
int s = 0;
const T tx = std::sqrt((double)Sk.width()-1.0) * lambda_min * sigma;
while((lambda(s) > tx) && (s < ((int)lambda.size() - 1))) { s++; }
P.columns(0,s);
// project all the patches on the basis (compute scalar product)
Sk = P.get_transpose() * Sk;
// threshold the coefficients
if (threshold > 0) { Sk.threshold(threshold, 1); }
// project back to pixel space
Sk = P * Sk;
// recenter the patches
cimg_forXY(Sk,x,y) { Sk(x,y) += M(y); }
int j = 0;
cimg_forXYZ_window((*this),xi,yi,zi,xj,yj,zj,wx,wy,wz){
const int id = index(j);
if (id < Sk.width()) {
dest.add_patch(xj, yj, zj, Sk.get_column(id), px, py, pz);
count.add_patch(xj, yj, zj, (T)1, px, py, pz);
}
j++;
}
}
}
cimg_foroff(dest, i) {
if(count(i) != 0) { dest(i) /= count(i); }
else { dest(i) = (*this)(i); }
}
return dest;
}
//! CHLPCA denoising from the PhD thesis of Hu Haijuan
/**
\param px the patch half width
\param px the patch half height
\param px the patch half depth
\param wx the training region half width
\param wy the training region half height
\param wz the training region half depth
\param nstep the subsampling of the image domain
\param nsim the number of patches used for training as a factor of the patch size
\param lambda_min the threshold on the eigen values of the PCA for dimension reduction
\param threshold the threshold on the value of the coefficients
\param pca_use_svd if true use the svd approach to perform the pca otherwise use the covariance method
\note please cite the PhD thesis of Hu Haijuan http://www.univ-ubs.fr/soutenance-de-these-hu-haijuan-337653.kjsp?RH=1318498222799
**/
CImg<T> & chlpca(const int px, const int py, const int pz,
const int wx, const int wy, const int wz,
const int nstep, const float nsim,
const float lambda_min, const float threshold,
const float noise_std, const bool pca_use_svd) {
(*this) = get_chlpca(px, py, pz, wx, wy, wz, nstep, nsim, lambda_min,
threshold, noise_std, pca_use_svd);
return (*this);
}
//! CHLPCA denoising from the PhD thesis of Hu Haijuan
/**
\param p the patch half size
\param w the training region half size
\param nstep the subsampling of the image domain
\param nsim the number of patches used for training as a factor of the patch size
\param lambda_min the threshold on the eigen values of the PCA for dimension reduction
\param threshold the threshold on the value of the coefficients
\param pca_use_svd if true use the svd approach to perform the pca otherwise use the covariance method
\note please cite the PhD thesis of Hu Haijuan http://www.univ-ubs.fr/soutenance-de-these-hu-haijuan-337653.kjsp?RH=1318498222799
**/
CImg<T> get_chlpca(const int p=3, const int w=10,
const int nstep=5, const float nsim=10,
const float lambda_min=2, const float threshold = -1,
const float noise_std=-1, const bool pca_use_svd=true) const {
if (depth()==1) return get_chlpca(p, p, 0, w, w, 0, nstep, nsim, lambda_min,
threshold, noise_std, pca_use_svd);
else return get_chlpca(p, p, p, w, w, w, nstep, nsim, lambda_min,
threshold, noise_std, pca_use_svd);
}
CImg<T> chlpca(const int p=3, const int w=10,
const int nstep=5, const float nsim=10,
const float lambda_min=2, const float threshold = -1,
const float noise_std=-1, const bool pca_use_svd=true) {
(*this) = get_chlpca(p, w, nstep, nsim, lambda_min,
threshold, noise_std, pca_use_svd);
return (*this);
}
|
jacobi_parallel_omp.c | #include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <stdbool.h>
#include <omp.h>
#include <memory.h>
#include "matrix.h"
#include "jacobi.h"
jacobi_result* jacobi_parallel_omp(matrix *m, int thread_count, bool verbose) {
int k = 0, t, termina = 0;
double norma = 0, norma_ant = 0, n1 = 0, n2 = 0;
//initialize temp arrays
int line_size = m->size * sizeof(double);
double *x = malloc(line_size);
double *x0 = malloc(line_size);
//initial position
for (t = 0; t < m->size; t++) {
x0[t] = 1;
}
//main loop
#pragma omp parallel num_threads(thread_count) \
shared(norma, norma_ant, k, termina, n1, n2, m)
{
double soma, x2;
int i, j;
int thread_num = omp_get_thread_num();
if (verbose) {
printf("THREAD COUNT = %i, EXPECTED = %i, CURRENT=%i\n\n",
omp_get_num_threads(), thread_count, thread_num);
}
while (termina == 0 && k < 100) {
//sum up items for each row
//#pragma omp for reduction(+:n1, n2) private (j, soma, x2) //nowait
//# dynamic, static, guided, runtime
#pragma omp for reduction(+:n1, n2) private (j, soma, x2) schedule(static, 2)
for (i = 0; i < m->size; i++) {
//if (verbose && i < 100) printf("Thread %i processing line %i\n", thread_num, i);
soma = 0;
item_matrix *item = m->a[i];
if (item) {
double diagonal_value = 0;
while (item->column >= 0) {
j = item->column;
if (j != i) {
soma += item->value * x0[j];
} else {
diagonal_value = item->value;
}
item++;
}
//printf("linha = %i, soma = %f\n", i, soma);
x[i] = (m->b[i] - soma) / diagonal_value;
x2 = x[i] - x0[i];
//reduction for norma
n1 += x2 * x2;
n2 += x[i] * x[i];
}
}
//synchronize all threads (already has an implicit barrier after for)
//#pragma omp barrier
//only main thread
//#pragma omp master
#pragma omp single
{
//calculate current error as "norma"
norma = sqrt(n1 / n2);
if (verbose) printf("\nk = %i, norma = %.20f, norma_ant = %.6f, n1 = %.6f, n2 = %.6f \n", k, norma, norma_ant, n1, n2);
//printf("\nnorma = %.6f, norma_ant = %.6f, n1 = %.6f, n2 = %.6f \n", norma, norma_ant, n1, n2);
//reset totals
n1 = 0;
n2 = 0;
if ((k > 1 && (norma <= precision)) || isnan(norma)) {
termina = 1;
} else {
norma_ant = norma;
memcpy(x0, x, line_size);
k++;
}
/*
printf("\nx0 = ");
for (i = 0; i < m->size; i++) {
printf("%f, ", x0[i]);
}
printf("\n");
*/
}
} //end of while, the main loop from jacobi
} //end of parallel block
//preparer results
jacobi_result* res = malloc(sizeof(jacobi_result));
res->x = malloc(line_size);
/*for (t = 0; t < m->size; t++) {
res->x[t] = x[t];
}*/
memcpy(res->x, x, line_size);
res->e = norma;
res->k = k;
//free memory
free(x);
free(x0);
return res;
}
|
atomic-14.c | /* PR middle-end/45423 */
/* { dg-do compile } */
/* { dg-options "-fopenmp -Wno-deprecated" } */
/* { dg-skip-if "invalid in C++1z" { c++1z } } */
#ifdef __cplusplus
bool *baz ();
#else
_Bool *baz ();
#endif
int *bar ();
int
foo (void)
{
#pragma omp barrier
#pragma omp atomic
(*bar ())++;
#pragma omp barrier
#pragma omp atomic
++(*bar ());
#pragma omp barrier
#pragma omp atomic
(*bar ())--;
#pragma omp barrier
#pragma omp atomic
--(*bar ());
#pragma omp barrier
#pragma omp atomic
(*baz ())++;
#pragma omp barrier
#pragma omp atomic
++(*baz ());
#ifndef __cplusplus
#pragma omp barrier
#pragma omp atomic
(*baz ())--;
#pragma omp barrier
#pragma omp atomic
--(*baz ());
#pragma omp barrier
#endif
return 0;
}
|
distort.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD IIIII SSSSS TTTTT OOO RRRR TTTTT %
% D D I SS T O O R R T %
% D D I SSS T O O RRRR T %
% D D I SS T O O R R T %
% DDDD IIIII SSSSS T OOO R R T %
% %
% %
% MagickCore Image Distortion Methods %
% %
% Software Design %
% John Cristy %
% Anthony Thyssen %
% June 2007 %
% %
% %
% Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/colorspace-private.h"
#include "magick/composite-private.h"
#include "magick/distort.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/hashmap.h"
#include "magick/image.h"
#include "magick/list.h"
#include "magick/matrix.h"
#include "magick/memory_.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel.h"
#include "magick/pixel-private.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/registry.h"
#include "magick/semaphore.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/transform.h"
/*
Numerous internal routines for image distortions.
*/
static inline double MagickMin(const double x,const double y)
{
return( x < y ? x : y);
}
static inline double MagickMax(const double x,const double y)
{
return( x > y ? x : y);
}
static inline void AffineArgsToCoefficients(double *affine)
{
/* map external sx,ry,rx,sy,tx,ty to internal c0,c2,c4,c1,c3,c5 */
double tmp[4]; /* note indexes 0 and 5 remain unchanged */
tmp[0]=affine[1]; tmp[1]=affine[2]; tmp[2]=affine[3]; tmp[3]=affine[4];
affine[3]=tmp[0]; affine[1]=tmp[1]; affine[4]=tmp[2]; affine[2]=tmp[3];
}
static inline void CoefficientsToAffineArgs(double *coeff)
{
/* map internal c0,c1,c2,c3,c4,c5 to external sx,ry,rx,sy,tx,ty */
double tmp[4]; /* note indexes 0 and 5 remain unchanged */
tmp[0]=coeff[3]; tmp[1]=coeff[1]; tmp[2]=coeff[4]; tmp[3]=coeff[2];
coeff[1]=tmp[0]; coeff[2]=tmp[1]; coeff[3]=tmp[2]; coeff[4]=tmp[3];
}
static void InvertAffineCoefficients(const double *coeff,double *inverse)
{
/* From "Digital Image Warping" by George Wolberg, page 50 */
double determinant;
determinant=1.0/(coeff[0]*coeff[4]-coeff[1]*coeff[3]);
inverse[0]=determinant*coeff[4];
inverse[1]=determinant*(-coeff[1]);
inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[2]*coeff[4]);
inverse[3]=determinant*(-coeff[3]);
inverse[4]=determinant*coeff[0];
inverse[5]=determinant*(coeff[2]*coeff[3]-coeff[0]*coeff[5]);
}
static void InvertPerspectiveCoefficients(const double *coeff,
double *inverse)
{
/* From "Digital Image Warping" by George Wolberg, page 53 */
double determinant;
determinant=1.0/(coeff[0]*coeff[4]-coeff[3]*coeff[1]);
inverse[0]=determinant*(coeff[4]-coeff[7]*coeff[5]);
inverse[1]=determinant*(coeff[7]*coeff[2]-coeff[1]);
inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[4]*coeff[2]);
inverse[3]=determinant*(coeff[6]*coeff[5]-coeff[3]);
inverse[4]=determinant*(coeff[0]-coeff[6]*coeff[2]);
inverse[5]=determinant*(coeff[3]*coeff[2]-coeff[0]*coeff[5]);
inverse[6]=determinant*(coeff[3]*coeff[7]-coeff[6]*coeff[4]);
inverse[7]=determinant*(coeff[6]*coeff[1]-coeff[0]*coeff[7]);
}
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if (x >= 0.0)
return((double) ((ssize_t) (x+0.5)));
return((double) ((ssize_t) (x-0.5)));
}
/*
* Polynomial Term Defining Functions
*
* Order must either be an integer, or 1.5 to produce
* the 2 number_valuesal polynomial function...
* affine 1 (3) u = c0 + c1*x + c2*y
* bilinear 1.5 (4) u = '' + c3*x*y
* quadratic 2 (6) u = '' + c4*x*x + c5*y*y
* cubic 3 (10) u = '' + c6*x^3 + c7*x*x*y + c8*x*y*y + c9*y^3
* quartic 4 (15) u = '' + c10*x^4 + ... + c14*y^4
* quintic 5 (21) u = '' + c15*x^5 + ... + c20*y^5
* number in parenthesis minimum number of points needed.
* Anything beyond quintic, has not been implemented until
* a more automated way of determining terms is found.
* Note the slight re-ordering of the terms for a quadratic polynomial
* which is to allow the use of a bi-linear (order=1.5) polynomial.
* All the later polynomials are ordered simply from x^N to y^N
*/
static size_t poly_number_terms(double order)
{
/* Return the number of terms for a 2d polynomial */
if ( order < 1 || order > 5 ||
( order != floor(order) && (order-1.5) > MagickEpsilon) )
return 0; /* invalid polynomial order */
return((size_t) floor((order+1)*(order+2)/2));
}
static double poly_basis_fn(ssize_t n, double x, double y)
{
/* Return the result for this polynomial term */
switch(n) {
case 0: return( 1.0 ); /* constant */
case 1: return( x );
case 2: return( y ); /* affine order = 1 terms = 3 */
case 3: return( x*y ); /* bilinear order = 1.5 terms = 4 */
case 4: return( x*x );
case 5: return( y*y ); /* quadratic order = 2 terms = 6 */
case 6: return( x*x*x );
case 7: return( x*x*y );
case 8: return( x*y*y );
case 9: return( y*y*y ); /* cubic order = 3 terms = 10 */
case 10: return( x*x*x*x );
case 11: return( x*x*x*y );
case 12: return( x*x*y*y );
case 13: return( x*y*y*y );
case 14: return( y*y*y*y ); /* quartic order = 4 terms = 15 */
case 15: return( x*x*x*x*x );
case 16: return( x*x*x*x*y );
case 17: return( x*x*x*y*y );
case 18: return( x*x*y*y*y );
case 19: return( x*y*y*y*y );
case 20: return( y*y*y*y*y ); /* quintic order = 5 terms = 21 */
}
return( 0 ); /* should never happen */
}
static const char *poly_basis_str(ssize_t n)
{
/* return the result for this polynomial term */
switch(n) {
case 0: return(""); /* constant */
case 1: return("*ii");
case 2: return("*jj"); /* affine order = 1 terms = 3 */
case 3: return("*ii*jj"); /* bilinear order = 1.5 terms = 4 */
case 4: return("*ii*ii");
case 5: return("*jj*jj"); /* quadratic order = 2 terms = 6 */
case 6: return("*ii*ii*ii");
case 7: return("*ii*ii*jj");
case 8: return("*ii*jj*jj");
case 9: return("*jj*jj*jj"); /* cubic order = 3 terms = 10 */
case 10: return("*ii*ii*ii*ii");
case 11: return("*ii*ii*ii*jj");
case 12: return("*ii*ii*jj*jj");
case 13: return("*ii*jj*jj*jj");
case 14: return("*jj*jj*jj*jj"); /* quartic order = 4 terms = 15 */
case 15: return("*ii*ii*ii*ii*ii");
case 16: return("*ii*ii*ii*ii*jj");
case 17: return("*ii*ii*ii*jj*jj");
case 18: return("*ii*ii*jj*jj*jj");
case 19: return("*ii*jj*jj*jj*jj");
case 20: return("*jj*jj*jj*jj*jj"); /* quintic order = 5 terms = 21 */
}
return( "UNKNOWN" ); /* should never happen */
}
static double poly_basis_dx(ssize_t n, double x, double y)
{
/* polynomial term for x derivative */
switch(n) {
case 0: return( 0.0 ); /* constant */
case 1: return( 1.0 );
case 2: return( 0.0 ); /* affine order = 1 terms = 3 */
case 3: return( y ); /* bilinear order = 1.5 terms = 4 */
case 4: return( x );
case 5: return( 0.0 ); /* quadratic order = 2 terms = 6 */
case 6: return( x*x );
case 7: return( x*y );
case 8: return( y*y );
case 9: return( 0.0 ); /* cubic order = 3 terms = 10 */
case 10: return( x*x*x );
case 11: return( x*x*y );
case 12: return( x*y*y );
case 13: return( y*y*y );
case 14: return( 0.0 ); /* quartic order = 4 terms = 15 */
case 15: return( x*x*x*x );
case 16: return( x*x*x*y );
case 17: return( x*x*y*y );
case 18: return( x*y*y*y );
case 19: return( y*y*y*y );
case 20: return( 0.0 ); /* quintic order = 5 terms = 21 */
}
return( 0.0 ); /* should never happen */
}
static double poly_basis_dy(ssize_t n, double x, double y)
{
/* polynomial term for y derivative */
switch(n) {
case 0: return( 0.0 ); /* constant */
case 1: return( 0.0 );
case 2: return( 1.0 ); /* affine order = 1 terms = 3 */
case 3: return( x ); /* bilinear order = 1.5 terms = 4 */
case 4: return( 0.0 );
case 5: return( y ); /* quadratic order = 2 terms = 6 */
default: return( poly_basis_dx(n-1,x,y) ); /* weird but true */
}
/* NOTE: the only reason that last is not true for 'quadratic'
is due to the re-arrangement of terms to allow for 'bilinear'
*/
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e n e r a t e C o e f f i c i e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GenerateCoefficients() takes user provided input arguments and generates
% the coefficients, needed to apply the specific distortion for either
% distorting images (generally using control points) or generating a color
% gradient from sparsely separated color points.
%
% The format of the GenerateCoefficients() method is:
%
% Image *GenerateCoefficients(const Image *image,DistortImageMethod method,
% const size_t number_arguments,const double *arguments,
% size_t number_values, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be distorted.
%
% o method: the method of image distortion/ sparse gradient
%
% o number_arguments: the number of arguments given.
%
% o arguments: the arguments for this distortion method.
%
% o number_values: the style and format of given control points, (caller type)
% 0: 2 dimensional mapping of control points (Distort)
% Format: u,v,x,y where u,v is the 'source' of the
% the color to be plotted, for DistortImage()
% N: Interpolation of control points with N values (usally r,g,b)
% Format: x,y,r,g,b mapping x,y to color values r,g,b
% IN future, variable number of values may be given (1 to N)
%
% o exception: return any errors or warnings in this structure
%
% Note that the returned array of double values must be freed by the
% calling method using RelinquishMagickMemory(). This however may change in
% the future to require a more 'method' specific method.
%
% Because of this this method should not be classed as stable or used
% outside other MagickCore library methods.
*/
static double *GenerateCoefficients(const Image *image,
DistortImageMethod *method,const size_t number_arguments,
const double *arguments,size_t number_values,ExceptionInfo *exception)
{
double
*coeff;
register size_t
i;
size_t
number_coeff, /* number of coefficients to return (array size) */
cp_size, /* number floating point numbers per control point */
cp_x,cp_y, /* the x,y indexes for control point */
cp_values; /* index of values for this control point */
/* number_values Number of values given per control point */
if ( number_values == 0 ) {
/* Image distortion using control points (or other distortion)
That is generate a mapping so that x,y->u,v given u,v,x,y
*/
number_values = 2; /* special case: two values of u,v */
cp_values = 0; /* the values i,j are BEFORE the destination CP x,y */
cp_x = 2; /* location of x,y in input control values */
cp_y = 3;
/* NOTE: cp_values, also used for later 'reverse map distort' tests */
}
else {
cp_x = 0; /* location of x,y in input control values */
cp_y = 1;
cp_values = 2; /* and the other values are after x,y */
/* Typically in this case the values are R,G,B color values */
}
cp_size = number_values+2; /* each CP defintion involves this many numbers */
/* If not enough control point pairs are found for specific distortions
fall back to Affine distortion (allowing 0 to 3 point pairs)
*/
if ( number_arguments < 4*cp_size &&
( *method == BilinearForwardDistortion
|| *method == BilinearReverseDistortion
|| *method == PerspectiveDistortion
) )
*method = AffineDistortion;
number_coeff=0;
switch (*method) {
case AffineDistortion:
/* also BarycentricColorInterpolate: */
number_coeff=3*number_values;
break;
case PolynomialDistortion:
/* number of coefficents depend on the given polynomal 'order' */
if ( number_arguments <= 1 && (number_arguments-1)%cp_size != 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Polynomial",
"Invalid number of args: order [CPs]...");
return((double *) NULL);
}
i = poly_number_terms(arguments[0]);
number_coeff = 2 + i*number_values;
if ( i == 0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Polynomial",
"Invalid order, should be interger 1 to 5, or 1.5");
return((double *) NULL);
}
if ( number_arguments < 1+i*cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
"Polynomial", (double) i);
return((double *) NULL);
}
break;
case BilinearReverseDistortion:
number_coeff=4*number_values;
break;
/*
The rest are constants as they are only used for image distorts
*/
case BilinearForwardDistortion:
number_coeff=10; /* 2*4 coeff plus 2 constants */
cp_x = 0; /* Reverse src/dest coords for forward mapping */
cp_y = 1;
cp_values = 2;
break;
#if 0
case QuadraterialDistortion:
number_coeff=19; /* BilinearForward + BilinearReverse */
#endif
break;
case ShepardsDistortion:
number_coeff=1; /* not used, but provide some type of return */
break;
case ArcDistortion:
number_coeff=5;
break;
case ScaleRotateTranslateDistortion:
case AffineProjectionDistortion:
case Plane2CylinderDistortion:
case Cylinder2PlaneDistortion:
number_coeff=6;
break;
case PolarDistortion:
case DePolarDistortion:
number_coeff=8;
break;
case PerspectiveDistortion:
case PerspectiveProjectionDistortion:
number_coeff=9;
break;
case BarrelDistortion:
case BarrelInverseDistortion:
number_coeff=10;
break;
default:
assert(! "Unknown Method Given"); /* just fail assertion */
}
/* allocate the array of coefficients needed */
coeff = (double *) AcquireQuantumMemory(number_coeff,sizeof(*coeff));
if (coeff == (double *) NULL) {
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "GenerateCoefficients");
return((double *) NULL);
}
/* zero out coefficients array */
for (i=0; i < number_coeff; i++)
coeff[i] = 0.0;
switch (*method)
{
case AffineDistortion:
{
/* Affine Distortion
v = c0*x + c1*y + c2
for each 'value' given
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
"Affine", 1.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* handle special cases of not enough arguments */
if ( number_arguments == cp_size ) {
/* Only 1 CP Set Given */
if ( cp_values == 0 ) {
/* image distortion - translate the image */
coeff[0] = 1.0;
coeff[2] = arguments[0] - arguments[2];
coeff[4] = 1.0;
coeff[5] = arguments[1] - arguments[3];
}
else {
/* sparse gradient - use the values directly */
for (i=0; i<number_values; i++)
coeff[i*3+2] = arguments[cp_values+i];
}
}
else {
/* 2 or more points (usally 3) given.
Solve a least squares simultaneous equation for coefficients.
*/
double
**matrix,
**vectors,
terms[3];
MagickBooleanType
status;
/* create matrix, and a fake vectors matrix */
matrix = AcquireMagickMatrix(3UL,3UL);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
if (matrix == (double **) NULL || vectors == (double **) NULL)
{
matrix = RelinquishMagickMatrix(matrix, 3UL);
vectors = (double **) RelinquishMagickMemory(vectors);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x3 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[i*3]);
/* Add given control point pairs for least squares solving */
for (i=0; i < number_arguments; i+=cp_size) {
terms[0] = arguments[i+cp_x]; /* x */
terms[1] = arguments[i+cp_y]; /* y */
terms[2] = 1; /* 1 */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),3UL,number_values);
}
if ( number_arguments == 2*cp_size ) {
/* Only two pairs were given, but we need 3 to solve the affine.
Fake extra coordinates by rotating p1 around p0 by 90 degrees.
x2 = x0 - (y1-y0) y2 = y0 + (x1-x0)
*/
terms[0] = arguments[cp_x]
- ( arguments[cp_size+cp_y] - arguments[cp_y] ); /* x2 */
terms[1] = arguments[cp_y] +
+ ( arguments[cp_size+cp_x] - arguments[cp_x] ); /* y2 */
terms[2] = 1; /* 1 */
if ( cp_values == 0 ) {
/* Image Distortion - rotate the u,v coordients too */
double
uv2[2];
uv2[0] = arguments[0] - arguments[5] + arguments[1]; /* u2 */
uv2[1] = arguments[1] + arguments[4] - arguments[0]; /* v2 */
LeastSquaresAddTerms(matrix,vectors,terms,uv2,3UL,2UL);
}
else {
/* Sparse Gradient - use values of p0 for linear gradient */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[cp_values]),3UL,number_values);
}
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,3UL,number_values);
matrix = RelinquishMagickMatrix(matrix, 3UL);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
}
return(coeff);
}
case AffineProjectionDistortion:
{
/*
Arguments: Affine Matrix (forward mapping)
Arguments sx, rx, ry, sy, tx, ty
Where u = sx*x + ry*y + tx
v = rx*x + sy*y + ty
Returns coefficients (in there inverse form) ordered as...
sx ry tx rx sy ty
AffineProjection Distortion Notes...
+ Will only work with a 2 number_values for Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
*/
double inverse[8];
if (number_arguments != 6) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Needs 6 coeff values'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* FUTURE: trap test for sx*sy-rx*ry == 0 (determinant = 0, no inverse) */
for(i=0; i<6UL; i++ )
inverse[i] = arguments[i];
AffineArgsToCoefficients(inverse); /* map into coefficents */
InvertAffineCoefficients(inverse, coeff); /* invert */
*method = AffineDistortion;
return(coeff);
}
case ScaleRotateTranslateDistortion:
{
/* Scale, Rotate and Translate Distortion
An alternative Affine Distortion
Argument options, by number of arguments given:
7: x,y, sx,sy, a, nx,ny
6: x,y, s, a, nx,ny
5: x,y, sx,sy, a
4: x,y, s, a
3: x,y, a
2: s, a
1: a
Where actions are (in order of application)
x,y 'center' of transforms (default = image center)
sx,sy scale image by this amount (default = 1)
a angle of rotation (argument required)
nx,ny move 'center' here (default = x,y or no movement)
And convert to affine mapping coefficients
ScaleRotateTranslate Distortion Notes...
+ Does not use a set of CPs in any normal way
+ Will only work with a 2 number_valuesal Image Distortion
+ Cannot be used for generating a sparse gradient (interpolation)
*/
double
cosine, sine,
x,y,sx,sy,a,nx,ny;
/* set default center, and default scale */
x = nx = (double)(image->columns)/2.0 + (double)image->page.x;
y = ny = (double)(image->rows)/2.0 + (double)image->page.y;
sx = sy = 1.0;
switch ( number_arguments ) {
case 0:
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Needs at least 1 argument'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
case 1:
a = arguments[0];
break;
case 2:
sx = sy = arguments[0];
a = arguments[1];
break;
default:
x = nx = arguments[0];
y = ny = arguments[1];
switch ( number_arguments ) {
case 3:
a = arguments[2];
break;
case 4:
sx = sy = arguments[2];
a = arguments[3];
break;
case 5:
sx = arguments[2];
sy = arguments[3];
a = arguments[4];
break;
case 6:
sx = sy = arguments[2];
a = arguments[3];
nx = arguments[4];
ny = arguments[5];
break;
case 7:
sx = arguments[2];
sy = arguments[3];
a = arguments[4];
nx = arguments[5];
ny = arguments[6];
break;
default:
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Too Many Arguments (7 or less)'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
break;
}
/* Trap if sx or sy == 0 -- image is scaled out of existance! */
if ( fabs(sx) < MagickEpsilon || fabs(sy) < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Zero Scale Given'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* Save the given arguments as an affine distortion */
a=DegreesToRadians(a); cosine=cos(a); sine=sin(a);
*method = AffineDistortion;
coeff[0]=cosine/sx;
coeff[1]=sine/sx;
coeff[2]=x-nx*coeff[0]-ny*coeff[1];
coeff[3]=(-sine)/sy;
coeff[4]=cosine/sy;
coeff[5]=y-nx*coeff[3]-ny*coeff[4];
return(coeff);
}
case PerspectiveDistortion:
{ /*
Perspective Distortion (a ratio of affine distortions)
p(x,y) c0*x + c1*y + c2
u = ------ = ------------------
r(x,y) c6*x + c7*y + 1
q(x,y) c3*x + c4*y + c5
v = ------ = ------------------
r(x,y) c6*x + c7*y + 1
c8 = Sign of 'r', or the denominator affine, for the actual image.
This determines what part of the distorted image is 'ground'
side of the horizon, the other part is 'sky' or invalid.
Valid values are +1.0 or -1.0 only.
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
Perspective Distortion Notes...
+ Can be thought of as ratio of 3 affine transformations
+ Not separatable: r() or c6 and c7 are used by both equations
+ All 8 coefficients must be determined simultaniously
+ Will only work with a 2 number_valuesal Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
+ It is not linear, but is simple to generate an inverse
+ All lines within an image remain lines.
+ but distances between points may vary.
*/
double
**matrix,
*vectors[1],
terms[8];
size_t
cp_u = cp_values,
cp_v = cp_values+1;
MagickBooleanType
status;
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size*4 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* fake 1x8 vectors matrix directly using the coefficients array */
vectors[0] = &(coeff[0]);
/* 8x8 least-squares matrix (zeroed) */
matrix = AcquireMagickMatrix(8UL,8UL);
if (matrix == (double **) NULL) {
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* Add control points for least squares solving */
for (i=0; i < number_arguments; i+=4) {
terms[0]=arguments[i+cp_x]; /* c0*x */
terms[1]=arguments[i+cp_y]; /* c1*y */
terms[2]=1.0; /* c2*1 */
terms[3]=0.0;
terms[4]=0.0;
terms[5]=0.0;
terms[6]=-terms[0]*arguments[i+cp_u]; /* 1/(c6*x) */
terms[7]=-terms[1]*arguments[i+cp_u]; /* 1/(c7*y) */
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_u]),
8UL,1UL);
terms[0]=0.0;
terms[1]=0.0;
terms[2]=0.0;
terms[3]=arguments[i+cp_x]; /* c3*x */
terms[4]=arguments[i+cp_y]; /* c4*y */
terms[5]=1.0; /* c5*1 */
terms[6]=-terms[3]*arguments[i+cp_v]; /* 1/(c6*x) */
terms[7]=-terms[4]*arguments[i+cp_v]; /* 1/(c7*y) */
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_v]),
8UL,1UL);
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,8UL,1UL);
matrix = RelinquishMagickMatrix(matrix, 8UL);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/*
Calculate 9'th coefficient! The ground-sky determination.
What is sign of the 'ground' in r() denominator affine function?
Just use any valid image coordinate (first control point) in
destination for determination of what part of view is 'ground'.
*/
coeff[8] = coeff[6]*arguments[cp_x]
+ coeff[7]*arguments[cp_y] + 1.0;
coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0;
return(coeff);
}
case PerspectiveProjectionDistortion:
{
/*
Arguments: Perspective Coefficents (forward mapping)
*/
if (number_arguments != 8) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'Needs 8 coefficient values'",
CommandOptionToMnemonic(MagickDistortOptions, *method));
return((double *) NULL);
}
/* FUTURE: trap test c0*c4-c3*c1 == 0 (determinate = 0, no inverse) */
InvertPerspectiveCoefficients(arguments, coeff);
/*
Calculate 9'th coefficient! The ground-sky determination.
What is sign of the 'ground' in r() denominator affine function?
Just use any valid image cocodinate in destination for determination.
For a forward mapped perspective the images 0,0 coord will map to
c2,c5 in the distorted image, so set the sign of denominator of that.
*/
coeff[8] = coeff[6]*arguments[2]
+ coeff[7]*arguments[5] + 1.0;
coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0;
*method = PerspectiveDistortion;
return(coeff);
}
case BilinearForwardDistortion:
case BilinearReverseDistortion:
{
/* Bilinear Distortion (Forward mapping)
v = c0*x + c1*y + c2*x*y + c3;
for each 'value' given
This is actually a simple polynomial Distortion! The difference
however is when we need to reverse the above equation to generate a
BilinearForwardDistortion (see below).
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
double
**matrix,
**vectors,
terms[4];
MagickBooleanType
status;
/* check the number of arguments */
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size*4 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* create matrix, and a fake vectors matrix */
matrix = AcquireMagickMatrix(4UL,4UL);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
if (matrix == (double **) NULL || vectors == (double **) NULL)
{
matrix = RelinquishMagickMatrix(matrix, 4UL);
vectors = (double **) RelinquishMagickMemory(vectors);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x4 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[i*4]);
/* Add given control point pairs for least squares solving */
for (i=0; i < number_arguments; i+=cp_size) {
terms[0] = arguments[i+cp_x]; /* x */
terms[1] = arguments[i+cp_y]; /* y */
terms[2] = terms[0]*terms[1]; /* x*y */
terms[3] = 1; /* 1 */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),4UL,number_values);
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,4UL,number_values);
matrix = RelinquishMagickMatrix(matrix, 4UL);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
if ( *method == BilinearForwardDistortion ) {
/* Bilinear Forward Mapped Distortion
The above least-squares solved for coefficents but in the forward
direction, due to changes to indexing constants.
i = c0*x + c1*y + c2*x*y + c3;
j = c4*x + c5*y + c6*x*y + c7;
where i,j are in the destination image, NOT the source.
Reverse Pixel mapping however needs to use reverse of these
functions. It required a full page of algbra to work out the
reversed mapping formula, but resolves down to the following...
c8 = c0*c5-c1*c4;
c9 = 2*(c2*c5-c1*c6); // '2*a' in the quadratic formula
i = i - c3; j = j - c7;
b = c6*i - c2*j + c8; // So that a*y^2 + b*y + c == 0
c = c4*i - c0*j; // y = ( -b +- sqrt(bb - 4ac) ) / (2*a)
r = b*b - c9*(c+c);
if ( c9 != 0 )
y = ( -b + sqrt(r) ) / c9;
else
y = -c/b;
x = ( i - c1*y) / ( c1 - c2*y );
NB: if 'r' is negative there is no solution!
NB: the sign of the sqrt() should be negative if image becomes
flipped or flopped, or crosses over itself.
NB: techniqually coefficient c5 is not needed, anymore,
but kept for completness.
See Anthony Thyssen <A.Thyssen@griffith.edu.au>
or Fred Weinhaus <fmw@alink.net> for more details.
*/
coeff[8] = coeff[0]*coeff[5] - coeff[1]*coeff[4];
coeff[9] = 2*(coeff[2]*coeff[5] - coeff[1]*coeff[6]);
}
return(coeff);
}
#if 0
case QuadrilateralDistortion:
{
/* Map a Quadrilateral to a unit square using BilinearReverse
Then map that unit square back to the final Quadrilateral
using BilinearForward.
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
/* UNDER CONSTRUCTION */
return(coeff);
}
#endif
case PolynomialDistortion:
{
/* Polynomial Distortion
First two coefficents are used to hole global polynomal information
c0 = Order of the polynimial being created
c1 = number_of_terms in one polynomial equation
Rest of the coefficients map to the equations....
v = c0 + c1*x + c2*y + c3*x*y + c4*x^2 + c5*y^2 + c6*x^3 + ...
for each 'value' (number_values of them) given.
As such total coefficients = 2 + number_terms * number_values
Input Arguments are sets of control points...
For Distort Images order [u,v, x,y] ...
For Sparse Gradients order [x,y, r,g,b] ...
Polynomial Distortion Notes...
+ UNDER DEVELOPMENT -- Do not expect this to remain as is.
+ Currently polynomial is a reversed mapped distortion.
+ Order 1.5 is fudged to map into a bilinear distortion.
though it is not the same order as that distortion.
*/
double
**matrix,
**vectors,
*terms;
size_t
nterms; /* number of polynomial terms per number_values */
register ssize_t
j;
MagickBooleanType
status;
/* first two coefficients hold polynomial order information */
coeff[0] = arguments[0];
coeff[1] = (double) poly_number_terms(arguments[0]);
nterms = (size_t) coeff[1];
/* create matrix, a fake vectors matrix, and least sqs terms */
matrix = AcquireMagickMatrix(nterms,nterms);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
terms = (double *) AcquireQuantumMemory(nterms, sizeof(*terms));
if (matrix == (double **) NULL ||
vectors == (double **) NULL ||
terms == (double *) NULL )
{
matrix = RelinquishMagickMatrix(matrix, nterms);
vectors = (double **) RelinquishMagickMemory(vectors);
terms = (double *) RelinquishMagickMemory(terms);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x3 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[2+i*nterms]);
/* Add given control point pairs for least squares solving */
for (i=1; i < number_arguments; i+=cp_size) { /* NB: start = 1 not 0 */
for (j=0; j < (ssize_t) nterms; j++)
terms[j] = poly_basis_fn(j,arguments[i+cp_x],arguments[i+cp_y]);
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),nterms,number_values);
}
terms = (double *) RelinquishMagickMemory(terms);
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,nterms,number_values);
matrix = RelinquishMagickMatrix(matrix, nterms);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
return(coeff);
}
case ArcDistortion:
{
/* Arc Distortion
Args: arc_width rotate top_edge_radius bottom_edge_radius
All but first argument are optional
arc_width The angle over which to arc the image side-to-side
rotate Angle to rotate image from vertical center
top_radius Set top edge of source image at this radius
bottom_radius Set bootom edge to this radius (radial scaling)
By default, if the radii arguments are nor provided the image radius
is calculated so the horizontal center-line is fits the given arc
without scaling.
The output image size is ALWAYS adjusted to contain the whole image,
and an offset is given to position image relative to the 0,0 point of
the origin, allowing users to use relative positioning onto larger
background (via -flatten).
The arguments are converted to these coefficients
c0: angle for center of source image
c1: angle scale for mapping to source image
c2: radius for top of source image
c3: radius scale for mapping source image
c4: centerline of arc within source image
Note the coefficients use a center angle, so asymptotic join is
furthest from both sides of the source image. This also means that
for arc angles greater than 360 the sides of the image will be
trimmed equally.
Arc Distortion Notes...
+ Does not use a set of CPs
+ Will only work with Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
*/
if ( number_arguments >= 1 && arguments[0] < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Arc Angle Too Small'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
if ( number_arguments >= 3 && arguments[2] < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Outer Radius Too Small'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
coeff[0] = -MagickPI2; /* -90, place at top! */
if ( number_arguments >= 1 )
coeff[1] = DegreesToRadians(arguments[0]);
else
coeff[1] = MagickPI2; /* zero arguments - center is at top */
if ( number_arguments >= 2 )
coeff[0] += DegreesToRadians(arguments[1]);
coeff[0] /= Magick2PI; /* normalize radians */
coeff[0] -= MagickRound(coeff[0]);
coeff[0] *= Magick2PI; /* de-normalize back to radians */
coeff[3] = (double)image->rows-1;
coeff[2] = (double)image->columns/coeff[1] + coeff[3]/2.0;
if ( number_arguments >= 3 ) {
if ( number_arguments >= 4 )
coeff[3] = arguments[2] - arguments[3];
else
coeff[3] *= arguments[2]/coeff[2];
coeff[2] = arguments[2];
}
coeff[4] = ((double)image->columns-1.0)/2.0;
return(coeff);
}
case PolarDistortion:
case DePolarDistortion:
{
/* (De)Polar Distortion (same set of arguments)
Args: Rmax, Rmin, Xcenter,Ycenter, Afrom,Ato
DePolar can also have the extra arguments of Width, Height
Coefficients 0 to 5 is the sanatized version first 6 input args
Coefficient 6 is the angle to coord ratio and visa-versa
Coefficient 7 is the radius to coord ratio and visa-versa
WARNING: It is possible for Radius max<min and/or Angle from>to
*/
if ( number_arguments == 3
|| ( number_arguments > 6 && *method == PolarDistortion )
|| number_arguments > 8 ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument", "%s : number of arguments",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* Rmax - if 0 calculate appropriate value */
if ( number_arguments >= 1 )
coeff[0] = arguments[0];
else
coeff[0] = 0.0;
/* Rmin - usally 0 */
coeff[1] = number_arguments >= 2 ? arguments[1] : 0.0;
/* Center X,Y */
if ( number_arguments >= 4 ) {
coeff[2] = arguments[2];
coeff[3] = arguments[3];
}
else { /* center of actual image */
coeff[2] = (double)(image->columns)/2.0+image->page.x;
coeff[3] = (double)(image->rows)/2.0+image->page.y;
}
/* Angle from,to - about polar center 0 is downward */
coeff[4] = -MagickPI;
if ( number_arguments >= 5 )
coeff[4] = DegreesToRadians(arguments[4]);
coeff[5] = coeff[4];
if ( number_arguments >= 6 )
coeff[5] = DegreesToRadians(arguments[5]);
if ( fabs(coeff[4]-coeff[5]) < MagickEpsilon )
coeff[5] += Magick2PI; /* same angle is a full circle */
/* if radius 0 or negative, its a special value... */
if ( coeff[0] < MagickEpsilon ) {
/* Use closest edge if radius == 0 */
if ( fabs(coeff[0]) < MagickEpsilon ) {
coeff[0]=MagickMin(fabs(coeff[2]-image->page.x),
fabs(coeff[3]-image->page.y));
coeff[0]=MagickMin(coeff[0],
fabs(coeff[2]-image->page.x-image->columns));
coeff[0]=MagickMin(coeff[0],
fabs(coeff[3]-image->page.y-image->rows));
}
/* furthest diagonal if radius == -1 */
if ( fabs(-1.0-coeff[0]) < MagickEpsilon ) {
double rx,ry;
rx = coeff[2]-image->page.x;
ry = coeff[3]-image->page.y;
coeff[0] = rx*rx+ry*ry;
ry = coeff[3]-image->page.y-image->rows;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
rx = coeff[2]-image->page.x-image->columns;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
ry = coeff[3]-image->page.y;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
coeff[0] = sqrt(coeff[0]);
}
}
/* IF Rmax <= 0 or Rmin < 0 OR Rmax < Rmin, THEN error */
if ( coeff[0] < MagickEpsilon || coeff[1] < -MagickEpsilon
|| (coeff[0]-coeff[1]) < MagickEpsilon ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : Invalid Radius",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* converstion ratios */
if ( *method == PolarDistortion ) {
coeff[6]=(double) image->columns/(coeff[5]-coeff[4]);
coeff[7]=(double) image->rows/(coeff[0]-coeff[1]);
}
else { /* *method == DePolarDistortion */
coeff[6]=(coeff[5]-coeff[4])/image->columns;
coeff[7]=(coeff[0]-coeff[1])/image->rows;
}
return(coeff);
}
case Cylinder2PlaneDistortion:
case Plane2CylinderDistortion:
{
/* 3D Cylinder to/from a Tangential Plane
Projection between a clinder and flat plain from a point on the
center line of the cylinder.
The two surfaces coincide in 3D space at the given centers of
distortion (perpendicular to projection point) on both images.
Args: FOV_arc_width
Coefficents: FOV(radians), Radius, center_x,y, dest_center_x,y
FOV (Field Of View) the angular field of view of the distortion,
across the width of the image, in degrees. The centers are the
points of least distortion in the input and resulting images.
These centers are however determined later.
Coeff 0 is the FOV angle of view of image width in radians
Coeff 1 is calculated radius of cylinder.
Coeff 2,3 center of distortion of input image
Coefficents 4,5 Center of Distortion of dest (determined later)
*/
if ( arguments[0] < MagickEpsilon || arguments[0] > 160.0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : Invalid FOV Angle",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
coeff[0] = DegreesToRadians(arguments[0]);
if ( *method == Cylinder2PlaneDistortion )
/* image is curved around cylinder, so FOV angle (in radians)
* scales directly to image X coordinate, according to its radius.
*/
coeff[1] = image->columns/coeff[0];
else
/* radius is distance away from an image with this angular FOV */
coeff[1] = image->columns / ( 2 * tan(coeff[0]/2) );
coeff[2] = (double)(image->columns)/2.0+image->page.x;
coeff[3] = (double)(image->rows)/2.0+image->page.y;
coeff[4] = coeff[2];
coeff[5] = coeff[3]; /* assuming image size is the same */
return(coeff);
}
case BarrelDistortion:
case BarrelInverseDistortion:
{
/* Barrel Distortion
Rs=(A*Rd^3 + B*Rd^2 + C*Rd + D)*Rd
BarrelInv Distortion
Rs=Rd/(A*Rd^3 + B*Rd^2 + C*Rd + D)
Where Rd is the normalized radius from corner to middle of image
Input Arguments are one of the following forms (number of arguments)...
3: A,B,C
4: A,B,C,D
5: A,B,C X,Y
6: A,B,C,D X,Y
8: Ax,Bx,Cx,Dx Ay,By,Cy,Dy
10: Ax,Bx,Cx,Dx Ay,By,Cy,Dy X,Y
Returns 10 coefficent values, which are de-normalized (pixel scale)
Ax, Bx, Cx, Dx, Ay, By, Cy, Dy, Xc, Yc
*/
/* Radius de-normalization scaling factor */
double
rscale = 2.0/MagickMin((double) image->columns,(double) image->rows);
/* sanity check number of args must = 3,4,5,6,8,10 or error */
if ( (number_arguments < 3) || (number_arguments == 7) ||
(number_arguments == 9) || (number_arguments > 10) )
{
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument", "%s : number of arguments",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* A,B,C,D coefficients */
coeff[0] = arguments[0];
coeff[1] = arguments[1];
coeff[2] = arguments[2];
if ((number_arguments == 3) || (number_arguments == 5) )
coeff[3] = 1.0 - coeff[0] - coeff[1] - coeff[2];
else
coeff[3] = arguments[3];
/* de-normalize the coefficients */
coeff[0] *= pow(rscale,3.0);
coeff[1] *= rscale*rscale;
coeff[2] *= rscale;
/* Y coefficients: as given OR same as X coefficients */
if ( number_arguments >= 8 ) {
coeff[4] = arguments[4] * pow(rscale,3.0);
coeff[5] = arguments[5] * rscale*rscale;
coeff[6] = arguments[6] * rscale;
coeff[7] = arguments[7];
}
else {
coeff[4] = coeff[0];
coeff[5] = coeff[1];
coeff[6] = coeff[2];
coeff[7] = coeff[3];
}
/* X,Y Center of Distortion (image coodinates) */
if ( number_arguments == 5 ) {
coeff[8] = arguments[3];
coeff[9] = arguments[4];
}
else if ( number_arguments == 6 ) {
coeff[8] = arguments[4];
coeff[9] = arguments[5];
}
else if ( number_arguments == 10 ) {
coeff[8] = arguments[8];
coeff[9] = arguments[9];
}
else {
/* center of the image provided (image coodinates) */
coeff[8] = (double)image->columns/2.0 + image->page.x;
coeff[9] = (double)image->rows/2.0 + image->page.y;
}
return(coeff);
}
case ShepardsDistortion:
{
/* Shepards Distortion input arguments are the coefficents!
Just check the number of arguments is valid!
Args: u1,v1, x1,y1, ...
OR : u1,v1, r1,g1,c1, ...
*/
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions, *method), 1.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
return(coeff);
}
default:
break;
}
/* you should never reach this point */
assert(! "No Method Handler"); /* just fail assertion */
return((double *) NULL);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i s t o r t R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DistortResizeImage() resize image using the equivalent but slower image
% distortion operator. The filter is applied using a EWA cylindrical
% resampling. But like resize the final image size is limited to whole pixels
% with no effects by virtual-pixels on the result.
%
% Note that images containing a transparency channel will be twice as slow to
% resize as images one without transparency.
%
% The format of the DistortResizeImage method is:
%
% Image *AdaptiveResizeImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *DistortResizeImage(const Image *image,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
#define DistortResizeImageTag "Distort/Image"
Image
*resize_image,
*tmp_image;
RectangleInfo
crop_area;
double
distort_args[12];
VirtualPixelMethod
vp_save;
/*
Distort resize image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((columns == 0) || (rows == 0))
return((Image *) NULL);
/* Do not short-circuit this resize if final image size is unchanged */
(void) SetImageVirtualPixelMethod(image,TransparentVirtualPixelMethod);
(void) ResetMagickMemory(distort_args,0,12*sizeof(double));
distort_args[4]=(double) image->columns;
distort_args[6]=(double) columns;
distort_args[9]=(double) image->rows;
distort_args[11]=(double) rows;
vp_save=GetImageVirtualPixelMethod(image);
tmp_image=CloneImage(image,0,0,MagickTrue,exception);
if ( tmp_image == (Image *) NULL )
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(tmp_image,TransparentVirtualPixelMethod);
if (image->matte == MagickFalse)
{
/*
Image has not transparency channel, so we free to use it
*/
(void) SetImageAlphaChannel(tmp_image,SetAlphaChannel);
resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if ( resize_image == (Image *) NULL )
return((Image *) NULL);
(void) SetImageAlphaChannel(resize_image,DeactivateAlphaChannel);
InheritException(exception,&image->exception);
}
else
{
/*
Image has transparency so handle colors and alpha separatly.
Basically we need to separate Virtual-Pixel alpha in the resized
image, so only the actual original images alpha channel is used.
*/
Image
*resize_alpha;
/* distort alpha channel separately */
(void) SeparateImageChannel(tmp_image,TrueAlphaChannel);
(void) SetImageAlphaChannel(tmp_image,OpaqueAlphaChannel);
resize_alpha=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if ( resize_alpha == (Image *) NULL )
return((Image *) NULL);
/* distort the actual image containing alpha + VP alpha */
tmp_image=CloneImage(image,0,0,MagickTrue,exception);
if ( tmp_image == (Image *) NULL )
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(tmp_image,
TransparentVirtualPixelMethod);
resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if ( resize_image == (Image *) NULL)
{
resize_alpha=DestroyImage(resize_alpha);
return((Image *) NULL);
}
/* replace resize images alpha with the separally distorted alpha */
(void) SetImageAlphaChannel(resize_image,DeactivateAlphaChannel);
(void) SetImageAlphaChannel(resize_alpha,DeactivateAlphaChannel);
(void) CompositeImage(resize_image,CopyOpacityCompositeOp,resize_alpha,
0,0);
InheritException(exception,&resize_image->exception);
resize_alpha=DestroyImage(resize_alpha);
}
(void) SetImageVirtualPixelMethod(resize_image,vp_save);
/*
Clean up the results of the Distortion
*/
crop_area.width=columns;
crop_area.height=rows;
crop_area.x=0;
crop_area.y=0;
tmp_image=resize_image;
resize_image=CropImage(tmp_image,&crop_area,exception);
tmp_image=DestroyImage(tmp_image);
if ( resize_image == (Image *) NULL )
return((Image *) NULL);
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D i s t o r t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DistortImage() distorts an image using various distortion methods, by
% mapping color lookups of the source image to a new destination image
% usally of the same size as the source image, unless 'bestfit' is set to
% true.
%
% If 'bestfit' is enabled, and distortion allows it, the destination image is
% adjusted to ensure the whole source 'image' will just fit within the final
% destination image, which will be sized and offset accordingly. Also in
% many cases the virtual offset of the source image will be taken into
% account in the mapping.
%
% If the '-verbose' control option has been set print to standard error the
% equicelent '-fx' formula with coefficients for the function, if practical.
%
% The format of the DistortImage() method is:
%
% Image *DistortImage(const Image *image,const DistortImageMethod method,
% const size_t number_arguments,const double *arguments,
% MagickBooleanType bestfit, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be distorted.
%
% o method: the method of image distortion.
%
% ArcDistortion always ignores source image offset, and always
% 'bestfit' the destination image with the top left corner offset
% relative to the polar mapping center.
%
% Affine, Perspective, and Bilinear, do least squares fitting of the
% distrotion when more than the minimum number of control point pairs
% are provided.
%
% Perspective, and Bilinear, fall back to a Affine distortion when less
% than 4 control point pairs are provided. While Affine distortions
% let you use any number of control point pairs, that is Zero pairs is
% a No-Op (viewport only) distortion, one pair is a translation and
% two pairs of control points do a scale-rotate-translate, without any
% shearing.
%
% o number_arguments: the number of arguments given.
%
% o arguments: an array of floating point arguments for this method.
%
% o bestfit: Attempt to 'bestfit' the size of the resulting image.
% This also forces the resulting image to be a 'layered' virtual
% canvas image. Can be overridden using 'distort:viewport' setting.
%
% o exception: return any errors or warnings in this structure
%
% Extra Controls from Image meta-data (artifacts)...
%
% o "verbose"
% Output to stderr alternatives, internal coefficents, and FX
% equivalents for the distortion operation (if feasible).
% This forms an extra check of the distortion method, and allows users
% access to the internal constants IM calculates for the distortion.
%
% o "distort:viewport"
% Directly set the output image canvas area and offest to use for the
% resulting image, rather than use the original images canvas, or a
% calculated 'bestfit' canvas.
%
% o "distort:scale"
% Scale the size of the output canvas by this amount to provide a
% method of Zooming, and for super-sampling the results.
%
% Other settings that can effect results include
%
% o 'interpolate' For source image lookups (scale enlargements)
%
% o 'filter' Set filter to use for area-resampling (scale shrinking).
% Set to 'point' to turn off and use 'interpolate' lookup
% instead
%
*/
MagickExport Image *DistortImage(const Image *image,DistortImageMethod method,
const size_t number_arguments,const double *arguments,
MagickBooleanType bestfit,ExceptionInfo *exception)
{
#define DistortImageTag "Distort/Image"
double
*coeff,
output_scaling;
Image
*distort_image;
RectangleInfo
geometry; /* geometry of the distorted space viewport */
MagickBooleanType
viewport_given;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
/*
Handle Special Compound Distortions
*/
if ( method == ResizeDistortion )
{
if ( number_arguments != 2 )
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Resize",
"Invalid number of args: 2 only");
return((Image *) NULL);
}
distort_image=DistortResizeImage(image,(size_t)arguments[0],
(size_t)arguments[1], exception);
return(distort_image);
}
/*
Convert input arguments (usually as control points for reverse mapping)
into mapping coefficients to apply the distortion.
Note that some distortions are mapped to other distortions,
and as such do not require specific code after this point.
*/
coeff = GenerateCoefficients(image, &method, number_arguments,
arguments, 0, exception);
if ( coeff == (double *) NULL )
return((Image *) NULL);
/*
Determine the size and offset for a 'bestfit' destination.
Usally the four corners of the source image is enough.
*/
/* default output image bounds, when no 'bestfit' is requested */
geometry.width=image->columns;
geometry.height=image->rows;
geometry.x=0;
geometry.y=0;
if ( method == ArcDistortion ) {
bestfit = MagickTrue; /* always calculate a 'best fit' viewport */
}
/* Work out the 'best fit', (required for ArcDistortion) */
if ( bestfit ) {
PointInfo
s,d,min,max; /* source, dest coords --mapping--> min, max coords */
MagickBooleanType
fix_bounds = MagickTrue; /* enlarge bounds for VP handling */
s.x=s.y=min.x=max.x=min.y=max.y=0.0; /* keep compiler happy */
/* defines to figure out the bounds of the distorted image */
#define InitalBounds(p) \
{ \
/* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \
min.x = max.x = p.x; \
min.y = max.y = p.y; \
}
#define ExpandBounds(p) \
{ \
/* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \
min.x = MagickMin(min.x,p.x); \
max.x = MagickMax(max.x,p.x); \
min.y = MagickMin(min.y,p.y); \
max.y = MagickMax(max.y,p.y); \
}
switch (method)
{
case AffineDistortion:
{ double inverse[6];
InvertAffineCoefficients(coeff, inverse);
s.x = (double) image->page.x;
s.y = (double) image->page.y;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
InitalBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
s.x = (double) image->page.x;
s.y = (double) image->page.y+image->rows;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y+image->rows;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
break;
}
case PerspectiveDistortion:
{ double inverse[8], scale;
InvertPerspectiveCoefficients(coeff, inverse);
s.x = (double) image->page.x;
s.y = (double) image->page.y;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=1.0/( (fabs(scale) <= MagickEpsilon) ? 1.0 : scale );
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
InitalBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=1.0/( (fabs(scale) <= MagickEpsilon) ? 1.0 : scale );
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
s.x = (double) image->page.x;
s.y = (double) image->page.y+image->rows;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=1.0/( (fabs(scale) <= MagickEpsilon) ? 1.0 : scale );
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y+image->rows;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=1.0/( (fabs(scale) <= MagickEpsilon) ? 1.0 : scale );
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
break;
}
case ArcDistortion:
{ double a, ca, sa;
/* Forward Map Corners */
a = coeff[0]-coeff[1]/2; ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
InitalBounds(d);
d.x = (coeff[2]-coeff[3])*ca;
d.y = (coeff[2]-coeff[3])*sa;
ExpandBounds(d);
a = coeff[0]+coeff[1]/2; ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
ExpandBounds(d);
d.x = (coeff[2]-coeff[3])*ca;
d.y = (coeff[2]-coeff[3])*sa;
ExpandBounds(d);
/* Orthogonal points along top of arc */
for( a=(double) (ceil((double) ((coeff[0]-coeff[1]/2.0)/MagickPI2))*MagickPI2);
a<(coeff[0]+coeff[1]/2.0); a+=MagickPI2 ) {
ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
ExpandBounds(d);
}
/*
Convert the angle_to_width and radius_to_height
to appropriate scaling factors, to allow faster processing
in the mapping function.
*/
coeff[1] = (double) (Magick2PI*image->columns/coeff[1]);
coeff[3] = (double)image->rows/coeff[3];
break;
}
case PolarDistortion:
{
if (number_arguments < 2)
coeff[2] = coeff[3] = 0.0;
min.x = coeff[2]-coeff[0];
max.x = coeff[2]+coeff[0];
min.y = coeff[3]-coeff[0];
max.y = coeff[3]+coeff[0];
/* should be about 1.0 if Rmin = 0 */
coeff[7]=(double) geometry.height/(coeff[0]-coeff[1]);
break;
}
case DePolarDistortion:
{
/* direct calculation as it needs to tile correctly
* for reversibility in a DePolar-Polar cycle */
fix_bounds = MagickFalse;
geometry.x = geometry.y = 0;
geometry.height = (size_t) ceil(coeff[0]-coeff[1]);
geometry.width = (size_t)
ceil((coeff[0]-coeff[1])*(coeff[5]-coeff[4])*0.5);
/* correct scaling factors relative to new size */
coeff[6]=(coeff[5]-coeff[4])/geometry.width; /* changed width */
coeff[7]=(coeff[0]-coeff[1])/geometry.height; /* should be about 1.0 */
break;
}
case Cylinder2PlaneDistortion:
{
/* direct calculation so center of distortion is either a pixel
* center, or pixel edge. This allows for reversibility of the
* distortion */
geometry.x = geometry.y = 0;
geometry.width = ceil( 2.0*coeff[1]*tan(coeff[0]/2.0) );
geometry.height = ceil( 2.0*coeff[3]/cos(coeff[0]/2.0) );
/* correct center of distortion relative to new size */
coeff[4] = geometry.width/2.0;
coeff[5] = geometry.height/2.0;
fix_bounds = MagickFalse;
break;
}
case Plane2CylinderDistortion:
{
/* direct calculation center is either pixel center, or pixel edge
* so as to allow reversibility of the image distortion */
geometry.x = geometry.y = 0;
geometry.width = ceil(coeff[0]*coeff[1]); /* FOV * radius */
geometry.height = 2*coeff[3]; /* input image height */
/* correct center of distortion relative to new size */
coeff[4] = geometry.width/2.0;
coeff[5] = geometry.height/2.0;
fix_bounds = MagickFalse;
break;
}
case ShepardsDistortion:
case BilinearForwardDistortion:
case BilinearReverseDistortion:
#if 0
case QuadrilateralDistortion:
#endif
case PolynomialDistortion:
case BarrelDistortion:
case BarrelInverseDistortion:
default:
/* no calculated bestfit available for these distortions */
bestfit = MagickFalse;
fix_bounds = MagickFalse;
break;
}
/* Set the output image geometry to calculated 'bestfit'.
Yes this tends to 'over do' the file image size, ON PURPOSE!
Do not do this for DePolar which needs to be exact for virtual tiling.
*/
if ( fix_bounds ) {
geometry.x = (ssize_t) floor(min.x-0.5);
geometry.y = (ssize_t) floor(min.y-0.5);
geometry.width=(size_t) ceil(max.x-geometry.x+0.5);
geometry.height=(size_t) ceil(max.y-geometry.y+0.5);
}
} /* end bestfit destination image calculations */
/* The user provided a 'viewport' expert option which may
overrides some parts of the current output image geometry.
This also overrides its default 'bestfit' setting.
*/
{ const char *artifact=GetImageArtifact(image,"distort:viewport");
viewport_given = MagickFalse;
if ( artifact != (const char *) NULL ) {
(void) ParseAbsoluteGeometry(artifact,&geometry);
viewport_given = MagickTrue;
}
}
/* Verbose output */
if ( GetImageArtifact(image,"verbose") != (const char *) NULL ) {
register ssize_t
i;
char image_gen[MaxTextExtent];
const char *lookup;
/* Set destination image size and virtual offset */
if ( bestfit || viewport_given ) {
(void) FormatLocaleString(image_gen, MaxTextExtent," -size %.20gx%.20g "
"-page %+.20g%+.20g xc: +insert \\\n",(double) geometry.width,
(double) geometry.height,(double) geometry.x,(double) geometry.y);
lookup="v.p{ xx-v.page.x-.5, yy-v.page.y-.5 }";
}
else {
image_gen[0] = '\0'; /* no destination to generate */
lookup = "p{ xx-page.x-.5, yy-page.y-.5 }"; /* simplify lookup */
}
switch (method) {
case AffineDistortion:
{
double *inverse;
inverse = (double *) AcquireQuantumMemory(6,sizeof(*inverse));
if (inverse == (double *) NULL) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortImages");
return((Image *) NULL);
}
InvertAffineCoefficients(coeff, inverse);
CoefficientsToAffineArgs(inverse);
(void) FormatLocaleFile(stderr, "Affine Projection:\n");
(void) FormatLocaleFile(stderr, " -distort AffineProjection \\\n '");
for (i=0; i < 5; i++)
(void) FormatLocaleFile(stderr, "%lf,", inverse[i]);
(void) FormatLocaleFile(stderr, "%lf'\n", inverse[5]);
inverse = (double *) RelinquishMagickMemory(inverse);
(void) FormatLocaleFile(stderr, "Affine Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx=%+lf*ii %+lf*jj %+lf;\n",
coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " yy=%+lf*ii %+lf*jj %+lf;\n",
coeff[3], coeff[4], coeff[5]);
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
}
case PerspectiveDistortion:
{
double *inverse;
inverse = (double *) AcquireQuantumMemory(8,sizeof(*inverse));
if (inverse == (double *) NULL) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((Image *) NULL);
}
InvertPerspectiveCoefficients(coeff, inverse);
(void) FormatLocaleFile(stderr, "Perspective Projection:\n");
(void) FormatLocaleFile(stderr, " -distort PerspectiveProjection \\\n '");
for (i=0; i<4; i++)
(void) FormatLocaleFile(stderr, "%lf, ", inverse[i]);
(void) FormatLocaleFile(stderr, "\n ");
for (; i<7; i++)
(void) FormatLocaleFile(stderr, "%lf, ", inverse[i]);
(void) FormatLocaleFile(stderr, "%lf'\n", inverse[7]);
inverse = (double *) RelinquishMagickMemory(inverse);
(void) FormatLocaleFile(stderr, "Perspective Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " rr=%+lf*ii %+lf*jj + 1;\n",
coeff[6], coeff[7]);
(void) FormatLocaleFile(stderr, " xx=(%+lf*ii %+lf*jj %+lf)/rr;\n",
coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " yy=(%+lf*ii %+lf*jj %+lf)/rr;\n",
coeff[3], coeff[4], coeff[5]);
(void) FormatLocaleFile(stderr, " rr%s0 ? %s : blue' \\\n",
coeff[8] < 0 ? "<" : ">", lookup);
break;
}
case BilinearForwardDistortion:
(void) FormatLocaleFile(stderr, "BilinearForward Mapping Equations:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " i = %+lf*x %+lf*y %+lf*x*y %+lf;\n",
coeff[0], coeff[1], coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr, " j = %+lf*x %+lf*y %+lf*x*y %+lf;\n",
coeff[4], coeff[5], coeff[6], coeff[7]);
#if 0
/* for debugging */
(void) FormatLocaleFile(stderr, " c8 = %+lf c9 = 2*a = %+lf;\n",
coeff[8], coeff[9]);
#endif
(void) FormatLocaleFile(stderr, "BilinearForward Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",
0.5-coeff[3], 0.5-coeff[7]);
(void) FormatLocaleFile(stderr, " bb=%lf*ii %+lf*jj %+lf;\n",
coeff[6], -coeff[2], coeff[8]);
/* Handle Special degenerate (non-quadratic) or trapezoidal case */
if ( coeff[9] != 0 ) {
(void) FormatLocaleFile(stderr, " rt=bb*bb %+lf*(%lf*ii%+lf*jj);\n",
-2*coeff[9], coeff[4], -coeff[0]);
(void) FormatLocaleFile(stderr, " yy=( -bb + sqrt(rt) ) / %lf;\n",
coeff[9]);
} else
(void) FormatLocaleFile(stderr, " yy=(%lf*ii%+lf*jj)/bb;\n",
-coeff[4], coeff[0]);
(void) FormatLocaleFile(stderr, " xx=(ii %+lf*yy)/(%lf %+lf*yy);\n",
-coeff[1], coeff[0], coeff[2]);
if ( coeff[9] != 0 )
(void) FormatLocaleFile(stderr, " (rt < 0 ) ? red : %s'\n", lookup);
else
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
case BilinearReverseDistortion:
#if 0
(void) FormatLocaleFile(stderr, "Polynomial Projection Distort:\n");
(void) FormatLocaleFile(stderr, " -distort PolynomialProjection \\\n");
(void) FormatLocaleFile(stderr, " '1.5, %lf, %lf, %lf, %lf,\n",
coeff[3], coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " %lf, %lf, %lf, %lf'\n",
coeff[7], coeff[4], coeff[5], coeff[6]);
#endif
(void) FormatLocaleFile(stderr, "BilinearReverse Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",
coeff[0], coeff[1], coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr, " yy=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",
coeff[4], coeff[5], coeff[6], coeff[7]);
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
case PolynomialDistortion:
{
size_t nterms = (size_t) coeff[1];
(void) FormatLocaleFile(stderr, "Polynomial (order %lg, terms %lu), FX Equivelent\n",
coeff[0],(unsigned long) nterms);
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx =");
for (i=0; i<(ssize_t) nterms; i++) {
if ( i != 0 && i%4 == 0 ) (void) FormatLocaleFile(stderr, "\n ");
(void) FormatLocaleFile(stderr, " %+lf%s", coeff[2+i],
poly_basis_str(i));
}
(void) FormatLocaleFile(stderr, ";\n yy =");
for (i=0; i<(ssize_t) nterms; i++) {
if ( i != 0 && i%4 == 0 ) (void) FormatLocaleFile(stderr, "\n ");
(void) FormatLocaleFile(stderr, " %+lf%s", coeff[2+i+nterms],
poly_basis_str(i));
}
(void) FormatLocaleFile(stderr, ";\n %s' \\\n", lookup);
break;
}
case ArcDistortion:
{
(void) FormatLocaleFile(stderr, "Arc Distort, Internal Coefficients:\n");
for ( i=0; i<5; i++ )
(void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]);
(void) FormatLocaleFile(stderr, "Arc Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x; jj=j+page.y;\n");
(void) FormatLocaleFile(stderr, " xx=(atan2(jj,ii)%+lf)/(2*pi);\n",
-coeff[0]);
(void) FormatLocaleFile(stderr, " xx=xx-round(xx);\n");
(void) FormatLocaleFile(stderr, " xx=xx*%lf %+lf;\n",
coeff[1], coeff[4]);
(void) FormatLocaleFile(stderr, " yy=(%lf - hypot(ii,jj)) * %lf;\n",
coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n");
break;
}
case PolarDistortion:
{
(void) FormatLocaleFile(stderr, "Polar Distort, Internal Coefficents\n");
for ( i=0; i<8; i++ )
(void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]);
(void) FormatLocaleFile(stderr, "Polar Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",
-coeff[2], -coeff[3]);
(void) FormatLocaleFile(stderr, " xx=(atan2(ii,jj)%+lf)/(2*pi);\n",
-(coeff[4]+coeff[5])/2 );
(void) FormatLocaleFile(stderr, " xx=xx-round(xx);\n");
(void) FormatLocaleFile(stderr, " xx=xx*2*pi*%lf + v.w/2;\n",
coeff[6] );
(void) FormatLocaleFile(stderr, " yy=(hypot(ii,jj)%+lf)*%lf;\n",
-coeff[1], coeff[7] );
(void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n");
break;
}
case DePolarDistortion:
{
(void) FormatLocaleFile(stderr, "DePolar Distort, Internal Coefficents\n");
for ( i=0; i<8; i++ )
(void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]);
(void) FormatLocaleFile(stderr, "DePolar Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'aa=(i+.5)*%lf %+lf;\n", coeff[6], -coeff[4] );
(void) FormatLocaleFile(stderr, " rr=(j+.5)*%lf %+lf;\n", coeff[7], +coeff[1] );
(void) FormatLocaleFile(stderr, " xx=rr*sin(aa) %+lf;\n", coeff[2] );
(void) FormatLocaleFile(stderr, " yy=rr*cos(aa) %+lf;\n", coeff[3] );
(void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n");
break;
}
case Cylinder2PlaneDistortion:
{
(void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, Internal Coefficents\n");
(void) FormatLocaleFile(stderr, " cylinder_radius = %+lf\n", coeff[1]);
(void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",
-coeff[4], -coeff[5]);
(void) FormatLocaleFile(stderr, " aa=atan(ii/%+lf);\n", coeff[1] );
(void) FormatLocaleFile(stderr, " xx=%lf*aa%+lf;\n",
coeff[1], coeff[2] );
(void) FormatLocaleFile(stderr, " yy=jj*cos(aa)%+lf;\n", coeff[3] );
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
}
case Plane2CylinderDistortion:
{
(void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, Internal Coefficents\n");
(void) FormatLocaleFile(stderr, " cylinder_radius = %+lf\n", coeff[1]);
(void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",
-coeff[4], -coeff[5]);
(void) FormatLocaleFile(stderr, " ii=ii/%+lf;\n", coeff[1] );
(void) FormatLocaleFile(stderr, " xx=%lf*tan(ii)%+lf;\n",
coeff[1], coeff[2] );
(void) FormatLocaleFile(stderr, " yy=jj/cos(ii)%+lf;\n",
coeff[3] );
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
break;
}
case BarrelDistortion:
case BarrelInverseDistortion:
{ double xc,yc;
/* NOTE: This does the barrel roll in pixel coords not image coords
** The internal distortion must do it in image coordinates,
** so that is what the center coeff (8,9) is given in.
*/
xc = ((double)image->columns-1.0)/2.0 + image->page.x;
yc = ((double)image->rows-1.0)/2.0 + image->page.y;
(void) FormatLocaleFile(stderr, "Barrel%s Distort, FX Equivelent:\n",
method == BarrelDistortion ? "" : "Inv");
(void) FormatLocaleFile(stderr, "%s", image_gen);
if ( fabs(coeff[8]-xc-0.5) < 0.1 && fabs(coeff[9]-yc-0.5) < 0.1 )
(void) FormatLocaleFile(stderr, " -fx 'xc=(w-1)/2; yc=(h-1)/2;\n");
else
(void) FormatLocaleFile(stderr, " -fx 'xc=%lf; yc=%lf;\n",
coeff[8]-0.5, coeff[9]-0.5);
(void) FormatLocaleFile(stderr,
" ii=i-xc; jj=j-yc; rr=hypot(ii,jj);\n");
(void) FormatLocaleFile(stderr, " ii=ii%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n",
method == BarrelDistortion ? "*" : "/",
coeff[0],coeff[1],coeff[2],coeff[3]);
(void) FormatLocaleFile(stderr, " jj=jj%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n",
method == BarrelDistortion ? "*" : "/",
coeff[4],coeff[5],coeff[6],coeff[7]);
(void) FormatLocaleFile(stderr, " v.p{fx*ii+xc,fy*jj+yc}' \\\n");
}
default:
break;
}
}
/* The user provided a 'scale' expert option will scale the
output image size, by the factor given allowing for super-sampling
of the distorted image space. Any scaling factors must naturally
be halved as a result.
*/
{ const char *artifact;
artifact=GetImageArtifact(image,"distort:scale");
output_scaling = 1.0;
if (artifact != (const char *) NULL) {
output_scaling = fabs(InterpretLocaleValue(artifact,(char **) NULL));
geometry.width *= output_scaling;
geometry.height *= output_scaling;
geometry.x *= output_scaling;
geometry.y *= output_scaling;
if ( output_scaling < 0.1 ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s", "-set option:distort:scale" );
return((Image *) NULL);
}
output_scaling = 1/output_scaling;
}
}
#define ScaleFilter(F,A,B,C,D) \
ScaleResampleFilter( (F), \
output_scaling*(A), output_scaling*(B), \
output_scaling*(C), output_scaling*(D) )
/*
Initialize the distort image attributes.
*/
distort_image=CloneImage(image,geometry.width,geometry.height,MagickTrue,
exception);
if (distort_image == (Image *) NULL)
return((Image *) NULL);
/* if image is ColorMapped - change it to DirectClass */
if (SetImageStorageClass(distort_image,DirectClass) == MagickFalse)
{
InheritException(exception,&distort_image->exception);
distort_image=DestroyImage(distort_image);
return((Image *) NULL);
}
distort_image->page.x=geometry.x;
distort_image->page.y=geometry.y;
if (distort_image->background_color.opacity != OpaqueOpacity)
distort_image->matte=MagickTrue;
{ /* ----- MAIN CODE -----
Sample the source image to each pixel in the distort image.
*/
CacheView
*distort_view;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
ResampleFilter
**restrict resample_filter;
ssize_t
j;
status=MagickTrue;
progress=0;
GetMagickPixelPacket(distort_image,&zero);
resample_filter=AcquireResampleFilterThreadSet(image,
UndefinedVirtualPixelMethod,MagickFalse,exception);
distort_view=AcquireCacheView(distort_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (j=0; j < (ssize_t) distort_image->rows; j++)
{
const int
id = GetOpenMPThreadId();
double
validity; /* how mathematically valid is this the mapping */
MagickBooleanType
sync;
MagickPixelPacket
pixel, /* pixel color to assign to distorted image */
invalid; /* the color to assign when distort result is invalid */
PointInfo
d,
s; /* transform destination image x,y to source image x,y */
register IndexPacket
*restrict indexes;
register ssize_t
i;
register PixelPacket
*restrict q;
q=QueueCacheViewAuthenticPixels(distort_view,0,j,distort_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(distort_view);
pixel=zero;
/* Define constant scaling vectors for Affine Distortions
Other methods are either variable, or use interpolated lookup
*/
switch (method)
{
case AffineDistortion:
ScaleFilter( resample_filter[id],
coeff[0], coeff[1],
coeff[3], coeff[4] );
break;
default:
break;
}
/* Initialize default pixel validity
* negative: pixel is invalid output 'matte_color'
* 0.0 to 1.0: antialiased, mix with resample output
* 1.0 or greater: use resampled output.
*/
validity = 1.0;
GetMagickPixelPacket(distort_image,&invalid);
SetMagickPixelPacket(distort_image,&distort_image->matte_color,
(IndexPacket *) NULL, &invalid);
if (distort_image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&invalid); /* what about other color spaces? */
for (i=0; i < (ssize_t) distort_image->columns; i++)
{
/* map pixel coordinate to distortion space coordinate */
d.x = (double) (geometry.x+i+0.5)*output_scaling;
d.y = (double) (geometry.y+j+0.5)*output_scaling;
s = d; /* default is a no-op mapping */
switch (method)
{
case AffineDistortion:
{
s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2];
s.y=coeff[3]*d.x+coeff[4]*d.y+coeff[5];
/* Affine partial derivitives are constant -- set above */
break;
}
case PerspectiveDistortion:
{
double
p,q,r,abs_r,abs_c6,abs_c7,scale;
/* perspective is a ratio of affines */
p=coeff[0]*d.x+coeff[1]*d.y+coeff[2];
q=coeff[3]*d.x+coeff[4]*d.y+coeff[5];
r=coeff[6]*d.x+coeff[7]*d.y+1.0;
/* Pixel Validity -- is it a 'sky' or 'ground' pixel */
validity = (r*coeff[8] < 0.0) ? 0.0 : 1.0;
/* Determine horizon anti-alias blending */
abs_r = fabs(r)*2;
abs_c6 = fabs(coeff[6]);
abs_c7 = fabs(coeff[7]);
if ( abs_c6 > abs_c7 ) {
if ( abs_r < abs_c6*output_scaling )
validity = 0.5 - coeff[8]*r/(coeff[6]*output_scaling);
}
else if ( abs_r < abs_c7*output_scaling )
validity = 0.5 - coeff[8]*r/(coeff[7]*output_scaling);
/* Perspective Sampling Point (if valid) */
if ( validity > 0.0 ) {
/* divide by r affine, for perspective scaling */
scale = 1.0/r;
s.x = p*scale;
s.y = q*scale;
/* Perspective Partial Derivatives or Scaling Vectors */
scale *= scale;
ScaleFilter( resample_filter[id],
(r*coeff[0] - p*coeff[6])*scale,
(r*coeff[1] - p*coeff[7])*scale,
(r*coeff[3] - q*coeff[6])*scale,
(r*coeff[4] - q*coeff[7])*scale );
}
break;
}
case BilinearReverseDistortion:
{
/* Reversed Mapped is just a simple polynomial */
s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2]*d.x*d.y+coeff[3];
s.y=coeff[4]*d.x+coeff[5]*d.y
+coeff[6]*d.x*d.y+coeff[7];
/* Bilinear partial derivitives of scaling vectors */
ScaleFilter( resample_filter[id],
coeff[0] + coeff[2]*d.y,
coeff[1] + coeff[2]*d.x,
coeff[4] + coeff[6]*d.y,
coeff[5] + coeff[6]*d.x );
break;
}
case BilinearForwardDistortion:
{
/* Forward mapped needs reversed polynomial equations
* which unfortunatally requires a square root! */
double b,c;
d.x -= coeff[3]; d.y -= coeff[7];
b = coeff[6]*d.x - coeff[2]*d.y + coeff[8];
c = coeff[4]*d.x - coeff[0]*d.y;
validity = 1.0;
/* Handle Special degenerate (non-quadratic) case
* Currently without horizon anti-alising */
if ( fabs(coeff[9]) < MagickEpsilon )
s.y = -c/b;
else {
c = b*b - 2*coeff[9]*c;
if ( c < 0.0 )
validity = 0.0;
else
s.y = ( -b + sqrt(c) )/coeff[9];
}
if ( validity > 0.0 )
s.x = ( d.x - coeff[1]*s.y) / ( coeff[0] + coeff[2]*s.y );
/* NOTE: the sign of the square root should be -ve for parts
where the source image becomes 'flipped' or 'mirrored'.
FUTURE: Horizon handling
FUTURE: Scaling factors or Deritives (how?)
*/
break;
}
#if 0
case BilinearDistortion:
/* Bilinear mapping of any Quadrilateral to any Quadrilateral */
/* UNDER DEVELOPMENT */
break;
#endif
case PolynomialDistortion:
{
/* multi-ordered polynomial */
register ssize_t
k;
ssize_t
nterms=(ssize_t)coeff[1];
PointInfo
du,dv; /* the du,dv vectors from unit dx,dy -- derivatives */
s.x=s.y=du.x=du.y=dv.x=dv.y=0.0;
for(k=0; k < nterms; k++) {
s.x += poly_basis_fn(k,d.x,d.y)*coeff[2+k];
du.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k];
du.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k];
s.y += poly_basis_fn(k,d.x,d.y)*coeff[2+k+nterms];
dv.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k+nterms];
dv.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k+nterms];
}
ScaleFilter( resample_filter[id], du.x,du.y,dv.x,dv.y );
break;
}
case ArcDistortion:
{
/* what is the angle and radius in the destination image */
s.x = (double) ((atan2(d.y,d.x) - coeff[0])/Magick2PI);
s.x -= MagickRound(s.x); /* angle */
s.y = hypot(d.x,d.y); /* radius */
/* Arc Distortion Partial Scaling Vectors
Are derived by mapping the perpendicular unit vectors
dR and dA*R*2PI rather than trying to map dx and dy
The results is a very simple orthogonal aligned ellipse.
*/
if ( s.y > MagickEpsilon )
ScaleFilter( resample_filter[id],
(double) (coeff[1]/(Magick2PI*s.y)), 0, 0, coeff[3] );
else
ScaleFilter( resample_filter[id],
distort_image->columns*2, 0, 0, coeff[3] );
/* now scale the angle and radius for source image lookup point */
s.x = s.x*coeff[1] + coeff[4] + image->page.x +0.5;
s.y = (coeff[2] - s.y) * coeff[3] + image->page.y;
break;
}
case PolarDistortion:
{ /* 2D Cartesain to Polar View */
d.x -= coeff[2];
d.y -= coeff[3];
s.x = atan2(d.x,d.y) - (coeff[4]+coeff[5])/2;
s.x /= Magick2PI;
s.x -= MagickRound(s.x);
s.x *= Magick2PI; /* angle - relative to centerline */
s.y = hypot(d.x,d.y); /* radius */
/* Polar Scaling vectors are based on mapping dR and dA vectors
This results in very simple orthogonal scaling vectors
*/
if ( s.y > MagickEpsilon )
ScaleFilter( resample_filter[id],
(double) (coeff[6]/(Magick2PI*s.y)), 0, 0, coeff[7] );
else
ScaleFilter( resample_filter[id],
distort_image->columns*2, 0, 0, coeff[7] );
/* now finish mapping radius/angle to source x,y coords */
s.x = s.x*coeff[6] + (double)image->columns/2.0 + image->page.x;
s.y = (s.y-coeff[1])*coeff[7] + image->page.y;
break;
}
case DePolarDistortion:
{ /* @D Polar to Carteasain */
/* ignore all destination virtual offsets */
d.x = ((double)i+0.5)*output_scaling*coeff[6]-coeff[4];
d.y = ((double)j+0.5)*output_scaling*coeff[7]+coeff[1];
s.x = d.y*sin(d.x) + coeff[2];
s.y = d.y*cos(d.x) + coeff[3];
/* derivatives are usless - better to use SuperSampling */
break;
}
case Cylinder2PlaneDistortion:
{ /* 3D Cylinder to Tangential Plane */
double ax, cx;
/* relative to center of distortion */
d.x -= coeff[4]; d.y -= coeff[5];
d.x /= coeff[1]; /* x' = x/r */
ax=atan(d.x); /* aa = atan(x/r) = u/r */
cx=cos(ax); /* cx = cos(atan(x/r)) = 1/sqrt(x^2+u^2) */
s.x = coeff[1]*ax; /* u = r*atan(x/r) */
s.y = d.y*cx; /* v = y*cos(u/r) */
/* derivatives... (see personnal notes) */
ScaleFilter( resample_filter[id],
1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y );
#if 0
if ( i == 0 && j == 0 ) {
fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y);
fprintf(stderr, "phi = %lf\n", (double)(ax * 180.0/MagickPI) );
fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n",
1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y );
fflush(stderr); }
#endif
/* add center of distortion in source */
s.x += coeff[2]; s.y += coeff[3];
break;
}
case Plane2CylinderDistortion:
{ /* 3D Cylinder to Tangential Plane */
/* relative to center of distortion */
d.x -= coeff[4]; d.y -= coeff[5];
/* is pixel valid - horizon of a infinite Virtual-Pixel Plane
* (see Anthony Thyssen's personal note) */
validity = (coeff[1]*MagickPI2 - fabs(d.x))/output_scaling + 0.5;
if ( validity > 0.0 ) {
double cx,tx;
d.x /= coeff[1]; /* x'= x/r */
cx = 1/cos(d.x); /* cx = 1/cos(x/r) */
tx = tan(d.x); /* tx = tan(x/r) */
s.x = coeff[1]*tx; /* u = r * tan(x/r) */
s.y = d.y*cx; /* v = y / cos(x/r) */
/* derivatives... (see Anthony Thyssen's personal notes) */
ScaleFilter( resample_filter[id],
cx*cx, 0.0, s.y*cx/coeff[1], cx );
#if 1
/*if ( i == 0 && j == 0 ) {*/
if ( d.x == 0.5 && d.y == 0.5 ) {
fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y);
fprintf(stderr, "radius = %lf phi = %lf validity = %lf\n",
coeff[1], (double)(d.x * 180.0/MagickPI), validity );
fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n",
cx*cx, 0.0, s.y*cx/coeff[1], cx);
fflush(stderr); }
#endif
}
/* add center of distortion in source */
s.x += coeff[2]; s.y += coeff[3];
break;
}
case BarrelDistortion:
case BarrelInverseDistortion:
{ /* Lens Barrel Distionion Correction */
double r,fx,fy,gx,gy;
/* Radial Polynomial Distortion (de-normalized) */
d.x -= coeff[8];
d.y -= coeff[9];
r = sqrt(d.x*d.x+d.y*d.y);
if ( r > MagickEpsilon ) {
fx = ((coeff[0]*r + coeff[1])*r + coeff[2])*r + coeff[3];
fy = ((coeff[4]*r + coeff[5])*r + coeff[6])*r + coeff[7];
gx = ((3*coeff[0]*r + 2*coeff[1])*r + coeff[2])/r;
gy = ((3*coeff[4]*r + 2*coeff[5])*r + coeff[6])/r;
/* adjust functions and scaling for 'inverse' form */
if ( method == BarrelInverseDistortion ) {
fx = 1/fx; fy = 1/fy;
gx *= -fx*fx; gy *= -fy*fy;
}
/* Set the source pixel to lookup and EWA derivative vectors */
s.x = d.x*fx + coeff[8];
s.y = d.y*fy + coeff[9];
ScaleFilter( resample_filter[id],
gx*d.x*d.x + fx, gx*d.x*d.y,
gy*d.x*d.y, gy*d.y*d.y + fy );
}
else {
/* Special handling to avoid divide by zero when r==0
**
** The source and destination pixels match in this case
** which was set at the top of the loop using s = d;
** otherwise... s.x=coeff[8]; s.y=coeff[9];
*/
if ( method == BarrelDistortion )
ScaleFilter( resample_filter[id],
coeff[3], 0, 0, coeff[7] );
else /* method == BarrelInverseDistortion */
/* FUTURE, trap for D==0 causing division by zero */
ScaleFilter( resample_filter[id],
1.0/coeff[3], 0, 0, 1.0/coeff[7] );
}
break;
}
case ShepardsDistortion:
{ /* Shepards Method, or Inverse Weighted Distance for
displacement around the destination image control points
The input arguments are the coefficents to the function.
This is more of a 'displacement' function rather than an
absolute distortion function.
*/
size_t
i;
double
denominator;
denominator = s.x = s.y = 0;
for(i=0; i<number_arguments; i+=4) {
double weight =
((double)d.x-arguments[i+2])*((double)d.x-arguments[i+2])
+ ((double)d.y-arguments[i+3])*((double)d.y-arguments[i+3]);
if ( weight != 0 )
weight = 1/weight;
else
weight = 1;
s.x += (arguments[ i ]-arguments[i+2])*weight;
s.y += (arguments[i+1]-arguments[i+3])*weight;
denominator += weight;
}
s.x /= denominator;
s.y /= denominator;
s.x += d.x;
s.y += d.y;
/* We can not determine derivatives using shepards method
only color interpolatation, not area-resampling */
break;
}
default:
break; /* use the default no-op given above */
}
/* map virtual canvas location back to real image coordinate */
if ( bestfit && method != ArcDistortion ) {
s.x -= image->page.x;
s.y -= image->page.y;
}
s.x -= 0.5;
s.y -= 0.5;
if ( validity <= 0.0 ) {
/* result of distortion is an invalid pixel - don't resample */
SetPixelPacket(distort_image,&invalid,q,indexes);
}
else {
/* resample the source image to find its correct color */
(void) ResamplePixelColor(resample_filter[id],s.x,s.y,&pixel);
/* if validity between 0.0 and 1.0 mix result with invalid pixel */
if ( validity < 1.0 ) {
/* Do a blend of sample color and invalid pixel */
/* should this be a 'Blend', or an 'Over' compose */
MagickPixelCompositeBlend(&pixel,validity,&invalid,(1.0-validity),
&pixel);
}
SetPixelPacket(distort_image,&pixel,q,indexes);
}
q++;
indexes++;
}
sync=SyncCacheViewAuthenticPixels(distort_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_DistortImage)
#endif
proceed=SetImageProgress(image,DistortImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
distort_view=DestroyCacheView(distort_view);
resample_filter=DestroyResampleFilterThreadSet(resample_filter);
if (status == MagickFalse)
distort_image=DestroyImage(distort_image);
}
/* Arc does not return an offset unless 'bestfit' is in effect
And the user has not provided an overriding 'viewport'.
*/
if ( method == ArcDistortion && !bestfit && !viewport_given ) {
distort_image->page.x = 0;
distort_image->page.y = 0;
}
coeff = (double *) RelinquishMagickMemory(coeff);
return(distort_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p a r s e C o l o r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SparseColorImage(), given a set of coordinates, interpolates the colors
% found at those coordinates, across the whole image, using various methods.
%
% The format of the SparseColorImage() method is:
%
% Image *SparseColorImage(const Image *image,const ChannelType channel,
% const SparseColorMethod method,const size_t number_arguments,
% const double *arguments,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be filled in.
%
% o channel: Specify which color values (in RGBKA sequence) are being set.
% This also determines the number of color_values in above.
%
% o method: the method to fill in the gradient between the control points.
%
% The methods used for SparseColor() are often simular to methods
% used for DistortImage(), and even share the same code for determination
% of the function coefficents, though with more dimensions (or resulting
% values).
%
% o number_arguments: the number of arguments given.
%
% o arguments: array of floating point arguments for this method--
% x,y,color_values-- with color_values given as normalized values.
%
% o exception: return any errors or warnings in this structure
%
*/
MagickExport Image *SparseColorImage(const Image *image,
const ChannelType channel,const SparseColorMethod method,
const size_t number_arguments,const double *arguments,
ExceptionInfo *exception)
{
#define SparseColorTag "Distort/SparseColor"
SparseColorMethod
sparse_method;
double
*coeff;
Image
*sparse_image;
size_t
number_colors;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
/* Determine number of color values needed per control point */
number_colors=0;
if ( channel & RedChannel ) number_colors++;
if ( channel & GreenChannel ) number_colors++;
if ( channel & BlueChannel ) number_colors++;
if ( channel & IndexChannel ) number_colors++;
if ( channel & OpacityChannel ) number_colors++;
/*
Convert input arguments into mapping coefficients, this this case
we are mapping (distorting) colors, rather than coordinates.
*/
{ DistortImageMethod
distort_method;
distort_method=(DistortImageMethod) method;
if ( distort_method >= SentinelDistortion )
distort_method = ShepardsDistortion; /* Pretend to be Shepards */
coeff = GenerateCoefficients(image, &distort_method, number_arguments,
arguments, number_colors, exception);
if ( coeff == (double *) NULL )
return((Image *) NULL);
/*
Note some Distort Methods may fall back to other simpler methods,
Currently the only fallback of concern is Bilinear to Affine
(Barycentric), which is alaso sparse_colr method. This also ensures
correct two and one color Barycentric handling.
*/
sparse_method = (SparseColorMethod) distort_method;
if ( distort_method == ShepardsDistortion )
sparse_method = method; /* return non-distiort methods to normal */
}
/* Verbose output */
if ( GetImageArtifact(image,"verbose") != (const char *) NULL ) {
switch (sparse_method) {
case BarycentricColorInterpolate:
{
register ssize_t x=0;
(void) FormatLocaleFile(stderr, "Barycentric Sparse Color:\n");
if ( channel & RedChannel )
(void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ( channel & GreenChannel )
(void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ( channel & BlueChannel )
(void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ( channel & IndexChannel )
(void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ( channel & OpacityChannel )
(void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
break;
}
case BilinearColorInterpolate:
{
register ssize_t x=0;
(void) FormatLocaleFile(stderr, "Bilinear Sparse Color\n");
if ( channel & RedChannel )
(void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ( channel & GreenChannel )
(void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ( channel & BlueChannel )
(void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ( channel & IndexChannel )
(void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ( channel & OpacityChannel )
(void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
break;
}
default:
/* sparse color method is too complex for FX emulation */
break;
}
}
/* Generate new image for generated interpolated gradient.
* ASIDE: Actually we could have just replaced the colors of the original
* image, but IM Core policy, is if storage class could change then clone
* the image.
*/
sparse_image=CloneImage(image,0,0,MagickTrue,exception);
if (sparse_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sparse_image,DirectClass) == MagickFalse)
{ /* if image is ColorMapped - change it to DirectClass */
InheritException(exception,&image->exception);
sparse_image=DestroyImage(sparse_image);
return((Image *) NULL);
}
{ /* ----- MAIN CODE ----- */
CacheView
*sparse_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
j;
status=MagickTrue;
progress=0;
sparse_view=AcquireCacheView(sparse_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (j=0; j < (ssize_t) sparse_image->rows; j++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel; /* pixel to assign to distorted image */
register IndexPacket
*restrict indexes;
register ssize_t
i;
register PixelPacket
*restrict q;
q=GetCacheViewAuthenticPixels(sparse_view,0,j,sparse_image->columns,
1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(sparse_view);
GetMagickPixelPacket(sparse_image,&pixel);
for (i=0; i < (ssize_t) image->columns; i++)
{
SetMagickPixelPacket(image,q,indexes,&pixel);
switch (sparse_method)
{
case BarycentricColorInterpolate:
{
register ssize_t x=0;
if ( channel & RedChannel )
pixel.red = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ( channel & GreenChannel )
pixel.green = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ( channel & BlueChannel )
pixel.blue = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ( channel & IndexChannel )
pixel.index = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ( channel & OpacityChannel )
pixel.opacity = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
break;
}
case BilinearColorInterpolate:
{
register ssize_t x=0;
if ( channel & RedChannel )
pixel.red = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ( channel & GreenChannel )
pixel.green = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ( channel & BlueChannel )
pixel.blue = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ( channel & IndexChannel )
pixel.index = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ( channel & OpacityChannel )
pixel.opacity = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
break;
}
case InverseColorInterpolate:
case ShepardsColorInterpolate:
{ /* Inverse (Squared) Distance weights average (IDW) */
size_t
k;
double
denominator;
if ( channel & RedChannel ) pixel.red = 0.0;
if ( channel & GreenChannel ) pixel.green = 0.0;
if ( channel & BlueChannel ) pixel.blue = 0.0;
if ( channel & IndexChannel ) pixel.index = 0.0;
if ( channel & OpacityChannel ) pixel.opacity = 0.0;
denominator = 0.0;
for(k=0; k<number_arguments; k+=2+number_colors) {
register ssize_t x=(ssize_t) k+2;
double weight =
((double)i-arguments[ k ])*((double)i-arguments[ k ])
+ ((double)j-arguments[k+1])*((double)j-arguments[k+1]);
if ( method == InverseColorInterpolate )
weight = sqrt(weight); /* inverse, not inverse squared */
weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight;
if ( channel & RedChannel )
pixel.red += arguments[x++]*weight;
if ( channel & GreenChannel )
pixel.green += arguments[x++]*weight;
if ( channel & BlueChannel )
pixel.blue += arguments[x++]*weight;
if ( channel & IndexChannel )
pixel.index += arguments[x++]*weight;
if ( channel & OpacityChannel )
pixel.opacity += arguments[x++]*weight;
denominator += weight;
}
if ( channel & RedChannel ) pixel.red /= denominator;
if ( channel & GreenChannel ) pixel.green /= denominator;
if ( channel & BlueChannel ) pixel.blue /= denominator;
if ( channel & IndexChannel ) pixel.index /= denominator;
if ( channel & OpacityChannel ) pixel.opacity /= denominator;
break;
}
case VoronoiColorInterpolate:
default:
{ /* Just use the closest control point you can find! */
size_t
k;
double
minimum = MagickHuge;
for(k=0; k<number_arguments; k+=2+number_colors) {
double distance =
((double)i-arguments[ k ])*((double)i-arguments[ k ])
+ ((double)j-arguments[k+1])*((double)j-arguments[k+1]);
if ( distance < minimum ) {
register ssize_t x=(ssize_t) k+2;
if ( channel & RedChannel ) pixel.red = arguments[x++];
if ( channel & GreenChannel ) pixel.green = arguments[x++];
if ( channel & BlueChannel ) pixel.blue = arguments[x++];
if ( channel & IndexChannel ) pixel.index = arguments[x++];
if ( channel & OpacityChannel ) pixel.opacity = arguments[x++];
minimum = distance;
}
}
break;
}
}
/* set the color directly back into the source image */
if ( channel & RedChannel ) pixel.red *= QuantumRange;
if ( channel & GreenChannel ) pixel.green *= QuantumRange;
if ( channel & BlueChannel ) pixel.blue *= QuantumRange;
if ( channel & IndexChannel ) pixel.index *= QuantumRange;
if ( channel & OpacityChannel ) pixel.opacity *= QuantumRange;
SetPixelPacket(sparse_image,&pixel,q,indexes);
q++;
indexes++;
}
sync=SyncCacheViewAuthenticPixels(sparse_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SparseColorImage)
#endif
proceed=SetImageProgress(image,SparseColorTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sparse_view=DestroyCacheView(sparse_view);
if (status == MagickFalse)
sparse_image=DestroyImage(sparse_image);
}
coeff = (double *) RelinquishMagickMemory(coeff);
return(sparse_image);
}
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 4;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
target_data_messages.c | // RUN: %clang_cc1 -triple x86_64-apple-macos10.7.0 -verify -fopenmp -ferror-limit 100 -o - %s
void foo() { }
int main(int argc, char **argv) {
int a;
#pragma omp target data // expected-error {{expected at least one 'map' or 'use_device_ptr' clause for '#pragma omp target data'}}
{}
L1:
foo();
#pragma omp target data map(a)
{
foo();
goto L1; // expected-error {{use of undeclared label 'L1'}}
}
goto L2; // expected-error {{use of undeclared label 'L2'}}
#pragma omp target data map(a)
L2:
foo();
#pragma omp target data map(a)(i) // expected-warning {{extra tokens at the end of '#pragma omp target data' are ignored}}
{
foo();
}
#pragma omp target unknown // expected-warning {{extra tokens at the end of '#pragma omp target' are ignored}}
{
foo();
}
return 0;
}
|
aix_smd5_fmt_plug.c | /* AIX smd5 cracker patch for JtR. Hacked together during April of 2013 by Dhiru
* Kholia <dhiru at openwall.com>.
*
* This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> and
* it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_smd5;
#elif FMT_REGISTERS_H
john_register_one(&fmt_smd5);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 16 // tuned on i7 w/HT
#endif
#endif
#include "md5.h"
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "memdbg.h"
#define FORMAT_LABEL "aix-smd5"
#define FORMAT_NAME "AIX LPA {smd5} (modified crypt-md5)"
#define FORMAT_TAG "{smd5}"
#define FORMAT_TAG1 "$1$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define FORMAT_TAG1_LEN (sizeof(FORMAT_TAG1)-1)
#define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 16
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN sizeof(int)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests smd5_tests[] = {
/* following hashes are AIX non-standard smd5 hashes */
{"{smd5}s8/xSJ/v$uGam4GB8hOjTLQqvBfxJ2/", "password"},
{"{smd5}alRJaSLb$aKM3H1.h1ycXl5GEVDH1e1", "aixsucks?"},
{"{smd5}eLB0QWeS$Eg.YfWY8clZuCxF0xNrKg.", "0123456789ABCDE"},
/* following hashes are AIX standard smd5 hashes (with corrected tag)
* lpa_options = std_hash=true */
{"$1$JVDbGx8K$T9h8HK4LZxeLPMTAxCfpc1", "password"},
{"$1$1Cu6fEvv$42kuaJ5fMEqyVStPuFG040", "0123456789ABCDE"},
{"$1$ql5x.xXL$vYVDhExol2xUBBpERRWcn1", "jtr>hashcat"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)];
static struct custom_salt {
int is_standard;
unsigned char salt[16];
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p;
char *ctcopy;
char *keeptr;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0 &&
strncmp(ciphertext, FORMAT_TAG1, FORMAT_TAG1_LEN))
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
if (!strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
ctcopy += FORMAT_TAG_LEN;
else
ctcopy += FORMAT_TAG1_LEN;
if ((p = strtokm(ctcopy, "$")) == NULL) /* salt */
goto err;
if (strlen(p) != 8)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* hash */
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
keeptr = ctcopy;
if (!strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) {
ctcopy += FORMAT_TAG_LEN;
cs.is_standard = 0;
}
else {
ctcopy += FORMAT_TAG1_LEN;
cs.is_standard = 1;
}
p = strtokm(ctcopy, "$");
strncpy((char*)cs.salt, p, 9);
p = strtokm(NULL, "$");
MEM_FREE(keeptr);
return (void *)&cs;
}
#define TO_BINARY(b1, b2, b3) \
value = \
(uint32_t)atoi64[ARCH_INDEX(pos[0])] | \
((uint32_t)atoi64[ARCH_INDEX(pos[1])] << 6) | \
((uint32_t)atoi64[ARCH_INDEX(pos[2])] << 12) | \
((uint32_t)atoi64[ARCH_INDEX(pos[3])] << 18); \
pos += 4; \
out.b[b1] = value >> 16; \
out.b[b2] = value >> 8; \
out.b[b3] = value;
static void* get_binary(char *ciphertext)
{
static union {
char b[16];
ARCH_WORD w;
} out;
char *pos;
uint32_t value;
if (!strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
pos = ciphertext + FORMAT_TAG_LEN;
else
pos = ciphertext + FORMAT_TAG1_LEN;
while (*pos++ != '$');
TO_BINARY(0, 6, 12);
TO_BINARY(1, 7, 13);
TO_BINARY(2, 8, 14);
TO_BINARY(3, 9, 15);
TO_BINARY(4, 10, 5);
out.b[11] =
(uint32_t)atoi64[ARCH_INDEX(pos[0])] |
((uint32_t)atoi64[ARCH_INDEX(pos[1])] << 6);
return out.b;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
/*
* $Id: md5_crypt.c,v 1.1 2002-05-11 14:42:35 cpbotha Exp $
*
* ----------------------------------------------------------------------------
* "THE BEER-WARE LICENSE" (Revision 42):
* <phk@login.dknet.dk> wrote this file. As long as you retain this notice you
* can do whatever you want with this stuff. If we meet some day, and you think
* this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
* ----------------------------------------------------------------------------
*
* Origin: Id: crypt.c,v 1.3 1995/05/30 05:42:22 rgrimes Exp
*
*/
static void crypt_md5(char *pw, char *salt, int is_standard, char *passwd)
{
char *magic = "$1$";
/* This string is magic for this algorithm. Having
* it this way, we can get get better later on */
char *sp, *ep;
unsigned char final[16];
int sl, pl, i, j;
MD5_CTX ctx, ctx1;
/* Refine the Salt first */
sp = salt;
/* If it starts with the magic string, then skip that */
if (!strncmp(sp, magic, strlen(magic)))
sp += strlen(magic);
/* It stops at the first '$', max 8 chars */
for (ep = sp; *ep && *ep != '$' && ep < (sp + 8); ep++)
continue;
/* get the length of the true salt */
sl = ep - sp;
MD5_Init(&ctx);
/* The password first, since that is what is most unknown */
MD5_Update(&ctx,(unsigned char *)pw,strlen(pw));
// The following license text applies to the "if" code block
// License: belongs to the PUBLIC DOMAIN, donated to hashcat, credits MUST go to atom
// (hashcat) and philsmd for their hard work. Thx
// Disclaimer: WE PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER
// EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// Furthermore, NO GUARANTEES THAT IT WORKS FOR YOU AND WORKS CORRECTLY
if (is_standard) {
/* Then our magic string */
MD5_Update(&ctx,(unsigned char *)magic,strlen(magic));
/* Then the raw salt */
MD5_Update(&ctx,(unsigned char *)sp,sl);
}
else {
MD5_Update(&ctx,(unsigned char *)sp,sl);
}
/* Then just as many characters of the MD5_(pw,salt,pw) */
MD5_Init(&ctx1);
MD5_Update(&ctx1,(unsigned char *)pw,strlen(pw));
MD5_Update(&ctx1,(unsigned char *)sp,sl);
MD5_Update(&ctx1,(unsigned char *)pw,strlen(pw));
MD5_Final(final,&ctx1);
for (pl = strlen(pw); pl > 0; pl -= 16)
MD5_Update(&ctx,(unsigned char *)final,pl>16 ? 16 : pl);
memset(final, 0, sizeof final);
/* Then something really weird... */
for (j = 0, i = strlen(pw); i; i >>= 1)
if (i & 1)
MD5_Update(&ctx, (unsigned char *)final+j, 1);
else
MD5_Update(&ctx, (unsigned char *)pw+j, 1);
/* Now make the output string */
strcpy(passwd, magic);
strncat(passwd, sp, sl);
strcat(passwd, "$");
MD5_Final(final,&ctx);
/*
* and now, just to make sure things don't run too fast
* On a 60 Mhz Pentium this takes 34 msec, so you would
* need 30 seconds to build a 1000 entry dictionary...
*/
for (i = 0; i < 1000; i++) {
MD5_Init(&ctx1);
if (i & 1)
MD5_Update(&ctx1,(unsigned char *)pw,strlen(pw));
else
MD5_Update(&ctx1,(unsigned char *)final,16);
if (i % 3)
MD5_Update(&ctx1,(unsigned char *)sp,sl);
if (i % 7)
MD5_Update(&ctx1,(unsigned char *)pw,strlen(pw));
if (i & 1)
MD5_Update(&ctx1,(unsigned char *)final,16);
else
MD5_Update(&ctx1,(unsigned char *)pw,strlen(pw));
MD5_Final(final,&ctx1);
}
memcpy(passwd, final, 16);
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
crypt_md5(saved_key[index], (char*)cur_salt->salt, cur_salt->is_standard, (char *)crypt_out[index]);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void smd5_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int salt_hash(void *salt)
{
return *(unsigned int*)salt & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_smd5 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG, FORMAT_TAG1 },
smd5_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
smd5_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
helpers.h | #ifndef POWANNIER_HELPERS_H
#define POWANNIER_HELPERS_H
#include <complex>
#include <vector>
#include "aliases.h"
namespace POWannier {
#pragma omp declare reduction \
(compadd:arma::cx_double:omp_out=omp_out+omp_in) \
initializer(omp_priv=0)
/**
* @brief
* Convenience function returning 0 for real values
* smaller than given precision.
*
* @param number
* Number to approximate.
* @param prec
* Precision to be used.
*
* @returns
* 0 if @p |number| < @p prec, \n
* @p number otherwise.
*/
double chop(double number, double prec = 1e-15);
/**
* @brief
* Convenience function returning 0 for complex
* values smaller than given precision.
*
* @param number
* Number to approximate.
* @param prec
* Precision to be used.
*
* @returns
* @p chop(re(number)) + \f$i\f$ @p chop(im(number)).
*/
std::complex<double> chop(std::complex<double> number, double prec = 1e-15);
/**
* @brief
* The space of sets of @p dim indices, with each index ranging
* from @p -cutoff to @p cutoff.
*
* @param cutoff
* The cutoff parameter.
* @param dim
* The dimension of the index set.
*
* @returns
* Vector containing all indices in the range.
*/
std::vector<NPoint> nspace(int cutoff, int dim);
/**
* @brief
* The space of sets of @p dim indices, with each index ranging
* from @p 0 to @p N-1
*
* @param N
* The range of indices (number of single index values).
* @param dim
* The dimension of the index set.
*
* @returns
* Vector containing all indices in the range.
*/
std::vector<NPoint> mspace(int N, int dim);
/**
* The position of a given index in the @ref nspace of indices.
*
* @param n
* Given index.
* @param cutoff
* Cutoff used in @ref nspace.
* @returns
* The position of index @p n in @ref nspace.
*/
int nIndex(const NPoint& n, int cutoff);
/**
* The position of a given index in the @ref mspace of indices.
* @param n
* Given index.
* @param N
* N value used in @ref mspace.
* @returns
* The position of index @p n in @ref mspace.
*/
int mIndex(const NPoint& n, int N);
/**
* @brief
* The position of a given index in the basis of indices used
* for storing Wannier function coefficients
* @param m
* Given index (of reciprocal space vectors, \f$ \vec{k} = \sum_i (2\pi
* m_i / N) \vec{i} \f$, where \f$ \vec{i} \f$ are the Carthesian coordinate
* system basis vectors.
* @param N
* System size (number of cells in each direction).
* @param band
* The band index.
* @returns
* The position of index @p m in a basis used in Wannier
* function storage.
*/
int bmIndex(const NPoint& m, int N, int band);
}
#endif |
taskwait-depend.c | // RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
// taskwait with depend clause was introduced with gcc-9
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8
// icc does not yet support taskwait with depend clause
// XFAIL: icc
// support for taskwait with depend clause introduced in clang-14
// UNSUPPORTED: clang-5, clang-6, clang-6, clang-8, clang-9, clang-10, clang-11,
// clang-12, clang-13
#include "callback.h"
#include <omp.h>
int main() {
int x = 0;
#pragma omp parallel num_threads(2)
{
#pragma omp master
{
print_ids(0);
printf("%" PRIu64 ": address of x: %p\n", ompt_get_thread_data()->value,
&x);
#pragma omp task depend(out : x)
{ x++; }
print_fuzzy_address(1);
#pragma omp taskwait depend(in: x)
print_fuzzy_address(2);
}
}
return 0;
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_dependences'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_depende
// CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]]
// make sure initial data pointers are null
// CHECK-NOT: 0: new_task_data initially not null
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_implicit_task_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]],
// CHECK-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]],
// CHECK-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT:0x[0-f]+]],
// CHECK-SAME: reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: address of x: [[ADDRX:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create:
// CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[EXIT]],
// CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}},
// CHECK-SAME: new_task_id=[[FIRST_TASK:[0-f]+]],
// CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}},
// CHECK-SAME: task_type=ompt_task_explicit=4, has_dependences=yes
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_dependences:
// CHECK-SAME: task_id=[[FIRST_TASK]], deps=[([[ADDRX]],
// CHECK-SAME: ompt_dependence_type_inout)], ndeps=1
// CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create:
// CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[EXIT]],
// CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}},
// CHECK-SAME: new_task_id=[[SECOND_TASK:[0-f]+]],
// CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}},
// CHECK-SAME: task_type=ompt_task_taskwait|ompt_task_undeferred|
// CHECK-SAME: ompt_task_mergeable=1207959568, has_dependences=yes
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_dependences:
// CHECK-SAME: task_id=[[SECOND_TASK]], deps=[([[ADDRX]],
// CHECK-SAME: ompt_dependence_type_in)], ndeps=1
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_end: task_id=[[SECOND_TASK]]
// CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
|
move_shallow_water_particle_utility.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Miguel Maso Sotomayor
// Pablo Becker
//
#ifndef KRATOS_MOVE_SHALLOW_WATER_PARTICLE_UTILITY_H_INCLUDED
#define KRATOS_MOVE_SHALLOW_WATER_PARTICLE_UTILITY_H_INCLUDED
///@defgroup MoveShallowWaterParticleUtility
///@brief Utility to move particles on the eulerian mesh with an
/// explicit scheme. This is the basic tool of the pfem2 framework
// System includes
#include <string>
#include <iostream>
#include <algorithm>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/node.h"
#include "includes/checks.h"
#include "includes/dof.h"
#include "includes/variables.h"
#include "containers/array_1d.h"
#include "containers/data_value_container.h"
#include "includes/mesh.h"
#include "utilities/math_utils.h"
#include "includes/global_pointer_variables.h"
#include "processes/node_erase_process.h"
#include "utilities/geometry_utilities.h"
#include "includes/model_part.h"
#include "includes/kratos_parameters.h"
#include "spatial_containers/spatial_containers.h"
#include "spatial_containers/cell.h"
#include "spatial_containers/bins_dynamic_objects.h"
#include "utilities/spatial_containers_configure.h"
#include "geometries/line_2d_2.h"
#include "geometries/triangle_2d_3.h"
#include "geometries/triangle_3d_3.h"
#include "geometries/point.h"
#include "shallow_water_application_variables.h"
#include "shallow_water_particle.h"
#include "utilities/openmp_utils.h"
#include "time.h"
//#include "processes/process.h"
namespace Kratos
{
//this class is to be modified by the user to customize the interpolation process
template< unsigned int TDim>
class MoveShallowWaterParticleUtility
{
public:
typedef SpatialContainersConfigure<TDim> Configure;
typedef typename Configure::PointType PointType;
typedef typename Configure::ContainerType ContainerType;
typedef typename Configure::IteratorType IteratorType;
typedef typename Configure::ResultContainerType ResultContainerType;
typedef typename Configure::ResultIteratorType ResultIteratorType;
typedef PointerVector< ShallowParticle, ShallowParticle*, std::vector<ShallowParticle*> > ParticlePointerVector;
KRATOS_CLASS_POINTER_DEFINITION(MoveShallowWaterParticleUtility);
//template<unsigned int TDim>
MoveShallowWaterParticleUtility(ModelPart& rModelPart, Parameters rParameters) :
mrModelPart(rModelPart),
mScalarVar1(&KratosComponents< Variable<double> >::Get( rParameters["convection_scalar_variable"].GetString() ) ),
mVectorVar1(&KratosComponents< Variable<array_1d<double,3> > >::Get( rParameters["convection_vector_variable"].GetString() ) )
{
KRATOS_TRY
std::cout << "Initializing moveparticle utility for scalar transport" << std::endl;
Parameters default_parameters( R"(
{
"convection_scalar_variable" : "HEIGHT",
"convection_vector_variable" : "VELOCITY",
"maximum_number_of_particles" : 16
} )" );
// Now validate agains defaults -- this also ensures no type mismatch
rParameters.ValidateAndAssignDefaults(default_parameters);
m_scalar_var1_name = rParameters["convection_scalar_variable"].GetString();
m_vector_var1_name = rParameters["convection_vector_variable"].GetString();
mMaxNumberOfParticles = rParameters["maximum_number_of_particles"].GetDouble();
Check();
//storing water and air density and their inverses, just in case it is needed for the streamline integration
//loop in elements to change their ID to their position in the array. Easier to get information later.
//DO NOT PARALELIZE THIS! IT MUST BE SERIAL!!!!!!!!!!!!!!!!!!!!!!
ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin();
for(unsigned int ii=0; ii<mrModelPart.Elements().size(); ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
ielem->SetId(ii+1);
}
mLastElemId= (mrModelPart.ElementsEnd()-1)->Id();
int node_id=0;
// we look for the smallest edge. could be used as a weighting function when going lagrangian->eulerian instead of traditional shape functions(method currently used)
ModelPart::NodesContainerType::iterator inodebegin = mrModelPart.NodesBegin();
std::vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator pnode = inodebegin+ii;
array_1d<double,3> position_node;
double distance=0.0;
position_node = pnode->Coordinates();
GlobalPointersVector< Node<3> >& rneigh = pnode->GetValue(NEIGHBOUR_NODES);
//we loop all the nodes to check all the edges
const double number_of_neighbours = static_cast<double>(rneigh.size());
for( GlobalPointersVector<Node<3> >::iterator inode = rneigh.begin(); inode!=rneigh.end(); inode++)
{
array_1d<double,3> position_difference;
position_difference = inode->Coordinates() - position_node;
const double current_distance = norm_2( position_difference );
distance += current_distance / number_of_neighbours;
}
//and we save the largest edge.
pnode->SetValue(MEAN_SIZE, distance);
node_id=pnode->GetId();
}
}
mLastNodeId=node_id;
//we also calculate the element mean size in the same way, for the courant number
//also we set the right size to the LHS column for the pressure enrichments, in order to recover correctly the enrichment pressure
std::vector<unsigned int> element_partition;
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Elements().size(), element_partition);
//before doing anything we must reset the vector of nodes contained by each element (particles that are inside each element.
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
double elem_size;
array_1d<double,3> Edge(3,0.0);
Edge = ielem->GetGeometry()[1].Coordinates() - ielem->GetGeometry()[0].Coordinates();
elem_size = Edge[0]*Edge[0];
for (unsigned int d = 1; d < TDim; d++)
elem_size += Edge[d]*Edge[d];
for (unsigned int i = 2; i < (TDim+1); i++)
for(unsigned int j = 0; j < i; j++)
{
Edge = ielem->GetGeometry()[i].Coordinates() - ielem->GetGeometry()[j].Coordinates();
double Length = Edge[0]*Edge[0];
for (unsigned int d = 1; d < TDim; d++)
Length += Edge[d]*Edge[d];
if (Length < elem_size) elem_size = Length;
}
elem_size = sqrt(elem_size);
ielem->SetValue(MEAN_SIZE, elem_size);
}
}
//matrix containing the position of the 4/15/45 particles that we will seed at the beggining
BoundedMatrix<double, 5*(1+TDim), 3 > pos;
BoundedMatrix<double, 5*(1+TDim), (1+TDim) > N;
int particle_id=0;
mNElems = mrModelPart.Elements().size();
std::cout << " about to resize vectors" << std::endl;
//setting the right size to the vector containing the particles assigned to each element
//particles vector. this vector contains ALL the particles in the simulation.
mParticlesVector.resize(mNElems*mMaxNumberOfParticles);
//and this vector contains the current number of particles that are in each element (currently zero)
mNumOfParticlesInElems.resize(mNElems);
mNumOfParticlesInElems=ZeroVector(mNElems);
//when moving the particles, an auxiliary vector is necessary (to store the previous number)
mNumOfParticlesInElemsAux.resize(mNElems);
//each element will have a list of pointers to all the particles that are inside.
//this vector contains the pointers to the vector of (particle) pointers of each element.
mVectorOfParticlePointersVectors.resize(mNElems);
//int artz;
//std::cin >> artz;
int i_int=0; //careful! it's not the id, but the position inside the array!
std::cout << " about to create particles" << std::endl;
//now we seed: LOOP IN ELEMENTS
//using loop index, DO NOT paralelize this! change lines : mparticles_in_elems_pointers((ii*mMaxNumberOfParticles)+mparticles_in_elems_integers(ii)) = pparticle; and the next one
mOffset=0;
//ShallowParticle& firstparticle = mParticlesVector[0];
for(unsigned int ii=0; ii<mrModelPart.Elements().size(); ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
//(ielem->GetValue(BED_PARTICLE_POINTERS)) = ParticlePointerVector( mMaxNumberOfParticles*2, &firstparticle );
//ParticlePointerVector& particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
//now we link the mpointers_to_particle_pointers_vectors to the corresponding element
//mpointers_to_particle_pointers_vectors(ii) = &particle_pointers;
//now we resize the vector of particle pointers. it is double sized because we move the particles from an initial position (first half) to a final position (second half).
//for(int j=0; j<(mMaxNumberOfParticles*2); j++)
// particle_pointers.push_back(&firstparticle);
mVectorOfParticlePointersVectors[ii] = ParticlePointerVector( mMaxNumberOfParticles*2 );
ParticlePointerVector& particle_pointers = mVectorOfParticlePointersVectors[ii];
//int & number_of_particles = ielem->GetValue(NUMBER_OF_BED_PARTICLES);
int & number_of_particles = mNumOfParticlesInElems[ii];
number_of_particles=0;
Geometry< Node<3> >& geom = ielem->GetGeometry();
//unsigned int elem_id = ielem->Id();
ComputeGaussPointPositions_initial(geom, pos, N); //we also have the standard (4), and 45
//now we seed the particles in the current element
for (unsigned int j = 0; j < pos.size1(); j++)
{
++particle_id;
ShallowParticle& pparticle = mParticlesVector[particle_id-1];
//~ pparticle.X()=pos(j,0);
//~ pparticle.Y()=pos(j,1);
//~ pparticle.Z()=pos(j,2);
pparticle.Coordinates() = row(pos,j);
pparticle.GetEraseFlag()=false;
array_1d<float, 3 > & vector1 = pparticle.GetVector1();
float & scalar1 = pparticle.GetScalar1();
noalias(vector1) = ZeroVector(3);
scalar1=0.0;
for (unsigned int k = 0; k < (TDim+1); k++)
{
scalar1 += N(j, k) * geom[k].FastGetSolutionStepValue(*mScalarVar1);
noalias(vector1) += N(j, k) * geom[k].FastGetSolutionStepValue(*mVectorVar1);
}
particle_pointers(j) = &pparticle;
number_of_particles++ ;
}
++i_int;
}
mNParticles=particle_id; //we save the last particle created as the total number of particles we have. For the moment this is true.
std::cout << " [Creating particles : " << mNParticles << " particles created]" << std::endl;
mParticlePrintingToolInitialized=false;
KRATOS_CATCH("")
}
~MoveShallowWaterParticleUtility()
{}
void MountBin()
{
KRATOS_TRY
//copy the elements to a new container, as the list will
//be shuffled duringthe construction of the tree
ContainerType& rElements = mrModelPart.ElementsArray();
IteratorType it_begin = rElements.begin();
IteratorType it_end = rElements.end();
//const int number_of_elem = rElements.size();
typename BinsObjectDynamic<Configure>::Pointer paux = typename BinsObjectDynamic<Configure>::Pointer(new BinsObjectDynamic<Configure>(it_begin, it_end ) );
paux.swap(mpBinsObjectDynamic);
//BinsObjectDynamic<Configure> mpBinsObjectDynamic(it_begin, it_end );
std::cout << " finished mounting Bins" << std::endl;
KRATOS_CATCH("")
}
/// Calculates the mean velocity
/** This function computes the mean velocity within an element and
* stores it in MEAN_VEL_OVER_ELEM_SIZE variable.
* This variable keeps the courant number aprox 0.1 in each substep
*
* @see MoveParticle
* @see MoveParticleInverseWay
*/
void CalculateVelOverElemSize()
{
KRATOS_TRY
const double nodal_weight = 1.0/ (1.0 + double (TDim) );
ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin();
std::vector<unsigned int> element_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Elements().size(), element_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
Geometry<Node<3> >& geom = ielem->GetGeometry();
array_1d<double, 3 >vector_mean_velocity=ZeroVector(3);
for (unsigned int i=0; i != (TDim+1) ; i++)
vector_mean_velocity += geom[i].FastGetSolutionStepValue(VELOCITY);
vector_mean_velocity *= nodal_weight;
//~ const double mean_velocity = sqrt ( pow(vector_mean_velocity[0],2) + pow(vector_mean_velocity[1],2) + pow(vector_mean_velocity[2],2) );
const double mean_velocity = norm_2( vector_mean_velocity );
ielem->SetValue(MEAN_VEL_OVER_ELEM_SIZE, mean_velocity / ( ielem->GetValue(MEAN_SIZE) ) );
}
}
KRATOS_CATCH("")
}
/// Reset the boundary conditions
/** When a variable is fixed this function resets the nodal values
* with the previous time step
*/
void ResetBoundaryConditions()
{
KRATOS_TRY
const auto& vector_var_x = KratosComponents<Variable<double>>::Get(m_vector_var1_name+std::string("_X"));
const auto& vector_var_y = KratosComponents<Variable<double>>::Get(m_vector_var1_name+std::string("_Y"));
const auto& vector_var_z = KratosComponents<Variable<double>>::Get(m_vector_var1_name+std::string("_Z"));
ModelPart::NodesContainerType::iterator inodebegin = mrModelPart.NodesBegin();
std::vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
if (inode->IsFixed(*mScalarVar1))
{
inode->FastGetSolutionStepValue(*mScalarVar1)=inode->GetSolutionStepValue(*mScalarVar1,1);
}
if (inode->IsFixed(vector_var_x))
{
inode->FastGetSolutionStepValue(vector_var_x)=inode->GetSolutionStepValue(vector_var_x,1);
}
if (inode->IsFixed(vector_var_y))
{
inode->FastGetSolutionStepValue(vector_var_y)=inode->GetSolutionStepValue(vector_var_y,1);
}
if (inode->IsFixed(vector_var_z))
{
inode->FastGetSolutionStepValue(vector_var_z)=inode->GetSolutionStepValue(vector_var_z,1);
}
}
}
KRATOS_CATCH("")
}
/// Auxiliar function to compute the "delta variables"
/** Delta variables are the difference between two time steps.
* It's value is used to update particles info
*
* @see CorrectParticlesWithoutMovingUsingDeltaVariables
*/
void CalculateDeltaVariables()
{
KRATOS_TRY
ModelPart::NodesContainerType::iterator inodebegin = mrModelPart.NodesBegin();
std::vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->FastGetSolutionStepValue(DELTA_SCALAR1) = inode->FastGetSolutionStepValue(*mScalarVar1) - inode->FastGetSolutionStepValue(PROJECTED_SCALAR1);
inode->FastGetSolutionStepValue(DELTA_VECTOR1) = inode->FastGetSolutionStepValue(*mVectorVar1) - inode->FastGetSolutionStepValue(PROJECTED_VECTOR1); //PROJECTED_VECTOR1
}
}
KRATOS_CATCH("")
}
/// Auxiliar function
/** This function copy a scalar variable value to the previous time step
*/
void CopyScalarVarToPreviousTimeStep(const Variable<double>& OriginVariable,
ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
ModelPart::NodesContainerType::iterator inodebegin = rNodes.begin();
std::vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, rNodes.size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->GetSolutionStepValue(OriginVariable,1) = inode->FastGetSolutionStepValue(OriginVariable);
}
}
KRATOS_CATCH("")
}
/// Auxiliar function
/** This function copy a vector variable value to the previous time step
*/
void CopyVectorVarToPreviousTimeStep(const Variable<array_1d<double,3>>& OriginVariable,
ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
ModelPart::NodesContainerType::iterator inodebegin = rNodes.begin();
std::vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, rNodes.size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
noalias(inode->GetSolutionStepValue(OriginVariable,1)) = inode->FastGetSolutionStepValue(OriginVariable);
}
}
KRATOS_CATCH("")
}
/// Move all the particles
/** This function moves the particles across the streamlines
* according to the velocity given by VELOCITY variable. The
* movement is performed in nsubsteps, during a total time
* of DELTA_TIME
*
* @see Moveparticle
*/
void MoveParticles()
{
KRATOS_TRY
const ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo();
const int offset = mOffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones.
//moveparticlesdiff reads from the pointers of one part (ie odd) and saves into the other part (ie even part)
//since it is the only function in the whole procedure that does this, it must use alternatively one part and the other.
bool even_timestep;
if (offset!=0) even_timestep=false;
else even_timestep=true;
const int post_offset = mMaxNumberOfParticles * static_cast<int>(even_timestep); //and we also save the offset to know the location in which we will save the pointers after we've moved the particles
double delta_t = CurrentProcessInfo[DELTA_TIME];
array_1d<double,TDim+1> N;
const unsigned int max_results = 10000;
//double integration_distance= 2.0;
mMaxSubSteps = 10;
mMaxSubStepDt = delta_t / static_cast<double>(mMaxSubSteps);
std::vector<unsigned int> element_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Elements().size(), element_partition);
ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin();
//before doing anything we must reset the vector of nodes contained by each element (particles that are inside each element.
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
//ModelPart::ElementsContainerType::iterator old_element = ielembegin+ii;
int & number_of_particles = mNumOfParticlesInElems[ii]; //old_element->GetValue(NUMBER_OF_BED_PARTICLES);
mNumOfParticlesInElemsAux[ii] = number_of_particles;
mNumOfParticlesInElems[ii] = 0;
//we reset the local vectors for a faster access;
}
}
std::cout << "convecting particles" << std::endl;
//We move the particles across the fixed mesh and saving change data into them (using the function MoveParticle)
#pragma omp barrier
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
ResultContainerType results(max_results);
GlobalPointersVector< Element > elements_in_trajectory;
elements_in_trajectory.resize(20);
for(unsigned int ielem = element_partition[kkk]; ielem<element_partition[kkk+1]; ielem++)
{
ModelPart::ElementsContainerType::iterator old_element = ielembegin+ielem;
const int old_element_id = old_element->Id();
ParticlePointerVector& old_element_particle_pointers = mVectorOfParticlePointersVectors[old_element_id-1];
if ( (results.size()) != max_results )
results.resize(max_results);
unsigned int number_of_elements_in_trajectory = 0; //excluding the origin one (current one, ielem)
for (int ii = 0; ii < mNumOfParticlesInElemsAux[ielem]; ii++)
{
ShallowParticle& pparticle = old_element_particle_pointers[offset+ii];
Element::Pointer pcurrent_element( *old_element.base() );
ResultIteratorType result_begin = results.begin();
bool & erase_flag=pparticle.GetEraseFlag();
if (erase_flag == false){
MoveParticle(pparticle,pcurrent_element,elements_in_trajectory,number_of_elements_in_trajectory,result_begin,max_results); //saqué N de los argumentos, no lo necesito ya q empieza SIEMPRE en un nodo y no me importa donde termina
const int current_element_id = pcurrent_element->Id();
int & number_of_particles_in_current_elem = mNumOfParticlesInElems[current_element_id-1];
if (number_of_particles_in_current_elem < mMaxNumberOfParticles && erase_flag == false)
{
ParticlePointerVector& current_element_particle_pointers = mVectorOfParticlePointersVectors[current_element_id-1];
#pragma omp critical
{
if (number_of_particles_in_current_elem < mMaxNumberOfParticles) // we cant go over this node, there's no room. otherwise we would be in the position of the first particle of the next element!!
{
current_element_particle_pointers(post_offset+number_of_particles_in_current_elem) = &pparticle;
number_of_particles_in_current_elem++ ;
KRATOS_ERROR_IF( number_of_particles_in_current_elem > mMaxNumberOfParticles ) <<
"In move shallow water particle utility: exceeded maximum number of particles" << std::endl;
//~ if (number_of_particles_in_current_elem > mMaxNumberOfParticles)
//~ KRATOS_WATCH("MAL");
}
else
{
pparticle.GetEraseFlag()=true; //so we just delete it!
}
}
}
else
{
pparticle.GetEraseFlag()=true; //so we just delete it!
}
}
}
}
}
// After having changed everything we change the status of the mOddTimeStep flag:
mOffset = post_offset;; //
KRATOS_CATCH("")
}
/// Transfer particles information to the mesh nodes
/** This function explicitly projects data from particles (lagrangian)
* onto the eulerian mesh. Shape functions of the elements determine
* the particle location within the element and its contribution to
* each node as a weighting function.
*/
void TransferLagrangianToEulerian() //explicit
{
KRATOS_TRY
const double threshold = 1e-10 / (static_cast<double>(TDim)+1.0);
std::cout << "projecting info to mesh" << std::endl;
const int offset = mOffset;
// the array of pointers for each element has twice the required size so that
// we use a part in odd timesteps and the other in even ones.
//(flag managed only by MoveParticles)
// We must project data from the particles (lagrangian) onto the eulerian mesh
//int nnodes = mrModelPart.Nodes().size();
//array_1d<double,(n_nodes)> eulerian_nodes_sumweights;
// We save data from previous time step of the eulerian mesh in case we must reuse it later
// cos no particle was found around the nodes though we could've use a bigger buffer, to be changed later!
// after having saved data, we reset them to zero, this way it's easier to add the contribution
// of the surrounding particles.
ModelPart::NodesContainerType::iterator inodebegin = mrModelPart.NodesBegin();
std::vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->FastGetSolutionStepValue(PROJECTED_SCALAR1)=0.0;
inode->FastGetSolutionStepValue(PROJECTED_VECTOR1)=ZeroVector(3);
inode->FastGetSolutionStepValue(YP)=0.0;
}
}
// Adding contribution, loop on elements, since each element has stored the particles found inside of it
std::vector<unsigned int> element_partition;
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Elements().size(), element_partition);
ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin();
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
array_1d<double,3*(TDim+1)> nodes_positions;
array_1d<double,3*(TDim+1)> nodes_added_vector1 = ZeroVector(3*(TDim+1));
array_1d<double,(TDim+1)> nodes_added_scalar1 = ZeroVector((TDim+1));
array_1d<double,(TDim+1)> nodes_added_weights = ZeroVector((TDim+1));
//array_1d<double,(TDim+1)> weighting_inverse_divisor;
Geometry<Node<3> >& geom = ielem->GetGeometry();
for (int i=0 ; i!=(TDim+1) ; ++i)
{
nodes_positions[i*3+0]=geom[i].X();
nodes_positions[i*3+1]=geom[i].Y();
nodes_positions[i*3+2]=geom[i].Z();
}
int & number_of_particles_in_elem= mNumOfParticlesInElems[ii];
ParticlePointerVector& element_particle_pointers = mVectorOfParticlePointersVectors[ii];
for (int iii=0; iii<number_of_particles_in_elem ; iii++ )
{
if (iii==mMaxNumberOfParticles) // It means we are out of our portion of the array, abort loop!
break;
ShallowParticle& pparticle = element_particle_pointers[offset+iii];
if (pparticle.GetEraseFlag()==false)
{
array_1d<double,3> & position = pparticle.Coordinates();
const float& particle_scalar1 = pparticle.GetScalar1();
const array_1d<float,3>& particle_vector1 = pparticle.GetVector1();
array_1d<double,TDim+1> N;
bool is_found = CalculatePosition(nodes_positions,position[0],position[1],position[2],N);
if (is_found==false) // Something went wrong. if it was close enough to the edge we simply send it inside the element.
{
KRATOS_INFO("MoveShallowWaterParticleUtility") << N << std::endl;
for (int j=0 ; j!=(TDim+1); j++)
if (N[j]<0.0 && N[j]> -1e-5)
N[j]=1e-10;
}
for (int j=0 ; j!=(TDim+1); j++) //going through the 3/4 nodes of the element
{
// These lines for a weighting function based on the distance (or square distance) from the node insteadof the shape functions
//double sq_dist = 0;
//for (int k=0 ; k!=(TDim); k++) sq_dist += ((position[k] - nodes_positions[j*3+k])*(position[k] - nodes_positions[j*3+k]));
//double weight = (1.0 - (sqrt(sq_dist)*weighting_inverse_divisor[j] ) );
double weight=N(j)*N(j);
//weight=N(j)*N(j)*N(j);
if (weight<threshold) weight=1e-10;
nodes_added_weights[j] += weight;
nodes_added_scalar1[j] += weight*static_cast<double>(particle_scalar1);
for (int k=0 ; k!=(TDim); k++) //x,y,(z)
{
nodes_added_vector1[j*3+k] += weight * static_cast<double>(particle_vector1[k]);
}
}
}
}
for (int i=0 ; i!=(TDim+1) ; ++i) {
geom[i].SetLock();
geom[i].FastGetSolutionStepValue(PROJECTED_SCALAR1) += nodes_added_scalar1[i];
geom[i].FastGetSolutionStepValue(PROJECTED_VECTOR1_X) += nodes_added_vector1[3*i+0];
geom[i].FastGetSolutionStepValue(PROJECTED_VECTOR1_Y) += nodes_added_vector1[3*i+1];
geom[i].FastGetSolutionStepValue(PROJECTED_VECTOR1_Z) += nodes_added_vector1[3*i+2];
geom[i].FastGetSolutionStepValue(YP) += nodes_added_weights[i];
geom[i].UnSetLock();
}
}
}
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
double sum_weights = inode->FastGetSolutionStepValue(YP);
if (sum_weights>0.00001)
{
double & scalar = inode->FastGetSolutionStepValue(PROJECTED_SCALAR1);
array_1d<double,3> & vector = inode->FastGetSolutionStepValue(PROJECTED_VECTOR1);
scalar /=sum_weights; // resetting the scalar1
vector /=sum_weights; // resetting the vector1
}
else // This should never happen because other ways to recover the information have been executed before, but leaving it just in case..
{
inode->FastGetSolutionStepValue(PROJECTED_SCALAR1)=inode->FastGetSolutionStepValue(*mScalarVar1,1); // Resetting the convected scalar
inode->FastGetSolutionStepValue(PROJECTED_VECTOR1)=inode->FastGetSolutionStepValue(*mVectorVar1,1); // Resetting the convected vector
}
}
}
KRATOS_CATCH("")
}
/// Update all the particles without moving them
/** This function updates all the particles variables using the
* "delta variables" from the nodal database.
*
* @see CorrectParticleUsingDeltaVariables
*/
void CorrectParticlesWithoutMovingUsingDeltaVariables()
{
KRATOS_TRY
const int offset = mOffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones.
//(flag managed only by MoveParticles)
ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin();
std::vector<unsigned int> element_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Elements().size(), element_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
//const int & elem_id = ielem->Id();
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
Element::Pointer pelement(*ielem.base());
Geometry<Node<3> >& geom = ielem->GetGeometry();
//ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
//int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_BED_PARTICLES);
int & number_of_particles_in_elem= mNumOfParticlesInElems[ii];
ParticlePointerVector& element_particle_pointers = mVectorOfParticlePointersVectors[ii];
for (int iii=0; iii<number_of_particles_in_elem ; iii++ )
{
if (iii>mMaxNumberOfParticles) //it means we are out of our portion of the array, abort loop!
break;
ShallowParticle & pparticle = element_particle_pointers[offset+iii];
bool erase_flag= pparticle.GetEraseFlag();
if (erase_flag==false)
{
CorrectParticleUsingDeltaVariables(pparticle,pelement,geom); //'lite' version, we pass by reference the geometry, so much cheaper
}
}
}
}
KRATOS_CATCH("")
}
/// Fill an element with particles
/** This function is to be executed after moving particles and
* before tranferring data from lagrangian particles to eulerian mesh
* If an element finishes with less particles than "minimum number
* of particles", then PreReseed adds particles inside it.
* A minimal reseed is performed in order to not disturb the projection
* from lagrangian to euelrian.
*
* @see MinimumNumberOfParticles
*
* @see MoveParticles
* @see MoveParticleInverseWay: is called to get the particle values
*/
void PreReseed(int MinimumNumberOfParticles)
{
KRATOS_TRY
const int offset =mOffset;
const int max_results = 1000;
//tools for the paralelization
unsigned int number_of_threads = ParallelUtilities::GetNumThreads();
std::vector<unsigned int> elem_partition;
int number_of_rows = mrModelPart.Elements().size();
elem_partition.resize(number_of_threads + 1);
int elem_partition_size = number_of_rows / number_of_threads;
elem_partition[0] = 0;
elem_partition[number_of_threads] = number_of_rows;
//KRATOS_WATCH(elem_partition_size);
for (unsigned int i = 1; i < number_of_threads; i++)
elem_partition[i] = elem_partition[i - 1] + elem_partition_size;
ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin();
#pragma omp parallel firstprivate(elem_partition)
{
ResultContainerType results(max_results);
int k = OpenMPUtils::ThisThread();
//ModelPart::ElementsContainerType::iterator it_begin = mrModelPart.ElementsBegin() + elem_partition[k];
//ModelPart::ElementsContainerType::iterator it_end = mrModelPart.ElementsBegin() + elem_partition[k+1] ;
//ModelPart::NodesContainerType local_list=aux[k];
//PointerVectorSet<ShallowParticle, IndexedObject> & list=aux[k];
BoundedMatrix<double, (TDim+1), 3 > pos;
BoundedMatrix<double, (TDim+1) , (TDim+1) > N;
unsigned int freeparticle=0; //we start with the first position in the particles array
//int local_id=1;
for(unsigned int ii=elem_partition[k]; ii<elem_partition[k+1]; ii++)
{
//const int & elem_id = ielem->Id();
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
results.resize(max_results);
//const int & elem_id = ielem->Id();
//ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
//int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_BED_PARTICLES);
int & number_of_particles_in_elem = mNumOfParticlesInElems[ii];
ParticlePointerVector& element_particle_pointers = mVectorOfParticlePointersVectors[ii];
if (number_of_particles_in_elem < (MinimumNumberOfParticles)) // && (ielem->GetGeometry())[0].Y()<0.10 )
{
Geometry< Node<3> >& geom = ielem->GetGeometry();
ComputeGaussPointPositionsForPreReseed(geom, pos, N);
for (unsigned int j = 0; j < (pos.size1()); j++) // I am dropping the last one, the one in the middle of the element
{
bool keep_looking = true;
while(keep_looking)
{
if (mParticlesVector[freeparticle].GetEraseFlag()==true)
{
#pragma omp critical
{
if (mParticlesVector[freeparticle].GetEraseFlag()==true)
{
mParticlesVector[freeparticle].GetEraseFlag()=false;
keep_looking=false;
}
}
if (keep_looking==false)
break;
else
freeparticle++;
}
else
freeparticle++;
}
ShallowParticle pparticle(pos(j,0),pos(j,1),pos(j,2));
array_1d<double,TDim+1>aux2_N;
bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux2_N);
KRATOS_ERROR_IF_NOT( is_found ) <<
"In move shallow water particle utility: particle not found in domain" << std::endl;
pparticle.GetEraseFlag()=false;
ResultIteratorType result_begin = results.begin();
Element::Pointer pelement( *ielem.base() );
MoveParticleInverseWay(pparticle, pelement, result_begin, max_results);
//and we copy it to the array:
mParticlesVector[freeparticle] = pparticle;
element_particle_pointers(offset+number_of_particles_in_elem) = &mParticlesVector[freeparticle];
pparticle.GetEraseFlag()=false;
number_of_particles_in_elem++;
}
}
}
}
KRATOS_CATCH("")
}
/// Fill an element with particles
/** This function is to be executed after the mesh stage solver is
* called and the particles are updated.
* If an element contains less particles than "minimum number of
* particles", then PostReseed adds particles inside it.
* A full reseed is performed and the particle gets it's convected
* variables directly from the eulerian mesh
*
* @param MinimumNumberOfParticles
*
* @see PreReseed
*/
void PostReseed(int MinimumNumberOfParticles) //pooyan's way
{
KRATOS_TRY
const int offset = mOffset;
//TOOLS FOR THE PARALELIZATION
unsigned int number_of_threads = ParallelUtilities::GetNumThreads();
std::vector<unsigned int> elem_partition;
int number_of_rows=mrModelPart.Elements().size();
//KRATOS_THROW_ERROR(std::logic_error, "Add ----NODAL_H---- variable!!!!!! ERROR", "");
elem_partition.resize(number_of_threads + 1);
int elem_partition_size = number_of_rows / number_of_threads;
elem_partition[0] = 0;
elem_partition[number_of_threads] = number_of_rows;
for (unsigned int i = 1; i < number_of_threads; i++)
elem_partition[i] = elem_partition[i - 1] + elem_partition_size;
ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin();
#pragma omp parallel firstprivate(elem_partition) // firstprivate(results)//we will add the nodes in different parts of aux and later assemple everything toghether, remaming particles ids to get consecutive ids
{
unsigned int reused_particles=0;
unsigned int freeparticle = 0; //we start by the first position;
int k = OpenMPUtils::ThisThread();
BoundedMatrix<double, (3+2*TDim), 3 > pos; //7 particles (2D) or 9 particles (3D)
BoundedMatrix<double, (3+2*TDim), (TDim+1) > N;
double mesh_scalar1;
array_1d<double,3> mesh_vector1;
array_1d<int, (3+2*TDim) > positions;
unsigned int number_of_reseeded_particles;
for(unsigned int ii=elem_partition[k]; ii<elem_partition[k+1]; ii++)
{
//const int & elem_id = ielem->Id();
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
int & number_of_particles_in_elem = mNumOfParticlesInElems[ii];
ParticlePointerVector& element_particle_pointers = mVectorOfParticlePointersVectors[ii];
Geometry< Node<3> >& geom = ielem->GetGeometry();
if ( number_of_particles_in_elem < (MinimumNumberOfParticles) ) // && (geom[0].Y()<0.10) ) || (number_of_water_particles_in_elem>2 && number_of_particles_in_elem<(MinimumNumberOfParticles) ) )
{
//bool reseed_more=false;
number_of_reseeded_particles = 0;
//reseed_more=true;
number_of_reseeded_particles = 3 + 2*TDim;
ComputeGaussPointPositionsForPostReseed(geom, pos, N);
for (unsigned int j = 0; j < number_of_reseeded_particles; j++)
{
// Now we have to find an empty space (a particle that was about to be deleted) in the
// particles model part. once found. there will be our renewed particle:
bool keep_looking = true;
while(keep_looking)
{
if (mParticlesVector[freeparticle].GetEraseFlag()==true)
{
#pragma omp critical
{
if (mParticlesVector[freeparticle].GetEraseFlag()==true)
{
mParticlesVector[freeparticle].GetEraseFlag()=false;
keep_looking=false;
}
}
if (keep_looking==false)
break;
else
freeparticle++;
}
else
freeparticle++;
}
ShallowParticle pparticle(pos(j,0),pos(j,1),pos(j,2));
array_1d<double,TDim+1>aux_N;
bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux_N);
KRATOS_ERROR_IF_NOT( is_found ) <<
"In move shallow water particle utility: particle not found in domain" << std::endl;
mesh_scalar1 = 0.0;
mesh_vector1 = ZeroVector(3);
for (unsigned int l = 0; l < (TDim+1); l++)
{
mesh_scalar1 += N(j,l) * geom[l].FastGetSolutionStepValue(*mScalarVar1);
noalias(mesh_vector1) += N(j, l) * geom[l].FastGetSolutionStepValue(*mVectorVar1);
}
pparticle.GetScalar1()=mesh_scalar1;
pparticle.GetVector1()=mesh_vector1;
pparticle.GetEraseFlag()=false;
mParticlesVector[freeparticle]=pparticle;
element_particle_pointers(offset+number_of_particles_in_elem) = &mParticlesVector[freeparticle];
number_of_particles_in_elem++;
KRATOS_ERROR_IF( keep_looking ) <<
"In move shallow water particle utility: Finished the list and couldnt find a free cell for the new particle!" << std::endl;
reused_particles++;
}
}
}
}
KRATOS_CATCH("")
}
/// Fill a model part with particles
/** This function prints the particles to a model part
*
* @param rLagrangianModelPart: empty model part to print particles
* @param FilterFactor: the function will print one particle of every "filter factor"
*/
void ExecuteParticlesPrintingTool( ModelPart& rLagrangianModelPart, unsigned int FilterFactor )
{
KRATOS_TRY
// We will only print one out of every "filter factor" particles of the total particle list
if (mParticlePrintingToolInitialized == false)
{
KRATOS_ERROR_IF( rLagrangianModelPart.NodesBegin() - rLagrangianModelPart.NodesEnd() > 0 ) <<
"In move shallow water particle utility: an empty model part is required for the particles printing tool" << std::endl;
rLagrangianModelPart.AddNodalSolutionStepVariable(*mScalarVar1);
rLagrangianModelPart.AddNodalSolutionStepVariable(DISPLACEMENT);
for (unsigned int i = 0; i != ((mMaxNumberOfParticles*mNElems)/FilterFactor) + FilterFactor; i++)
{
Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode( i+mLastNodeId+1 , 0.0, 0.0, 0.0); //recordar que es el nueevo model part!!
//pnode->SetBufferSize(mrModelPart.NodesBegin()->GetBufferSize());
pnode->SetBufferSize(1);
}
mParticlePrintingToolInitialized=true;
}
// Resetting data of the unused particles
const double inactive_particle_position = -10.0;
array_1d<double,3>inactive_particle_position_vector;
inactive_particle_position_vector(0)=inactive_particle_position;
inactive_particle_position_vector(1)=inactive_particle_position;
inactive_particle_position_vector(2)=inactive_particle_position;
ModelPart::NodesContainerType::iterator inodebegin = rLagrangianModelPart.NodesBegin();
for(unsigned int ii = 0; ii < rLagrangianModelPart.Nodes().size(); ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->FastGetSolutionStepValue(*mScalarVar1) = 0.0;
inode->FastGetSolutionStepValue(DISPLACEMENT) = inactive_particle_position_vector;
}
int counter = 0;
//ModelPart::NodesContainerType::iterator it_begin = rLagrangianModelPart.NodesBegin();
for (int i = 0; i != mMaxNumberOfParticles*mNElems; i++)
{
ShallowParticle& pparticle = mParticlesVector[i];
if(pparticle.GetEraseFlag() == false && i%FilterFactor == 0)
{
ModelPart::NodesContainerType::iterator inode = inodebegin + counter; //copying info from the particle to the (printing) node.
inode->FastGetSolutionStepValue(*mScalarVar1) = pparticle.GetScalar1();
inode->FastGetSolutionStepValue(DISPLACEMENT) = pparticle.Coordinates();
counter++;
}
}
KRATOS_CATCH("")
}
protected:
private:
/// Move a particle
/** this function moves a particle according to the velocity given
* by VELOCITY variable. The movement is performed in nsubsteps,
* during a total time of DELTA_TIME
*
* @param pParticle
* @param pElement
* @param rElementsInTrajectory
* @param rNumberOfElementsInTrajectory
* @param ResultBegin
* @param MaxNumberOfResults
*
* @see MoveParticles
*/
void MoveParticle(ShallowParticle & pParticle,
Element::Pointer & pElement,
GlobalPointersVector< Element >& rElementsInTrajectory,
unsigned int & rNumberOfElementsInTrajectory,
ResultIteratorType ResultBegin,
const unsigned int MaxNumberOfResults)
{
const ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
unsigned int nsubsteps;
double substep_dt;
bool keep_integrating = false;
bool is_found;
array_1d<double,3> vel;
array_1d<double,3> vel_without_other_phase_nodes=ZeroVector(3);
array_1d<double,3> position;
array_1d<double,3> mid_position;
array_1d<double,TDim+1> N;
//we start with the first position, then it will enter the loop.
position = pParticle.Coordinates(); //initial coordinates
double only_integral = 0.0 ;
is_found = FindNodeOnMesh(position, N, pElement, ResultBegin, MaxNumberOfResults); //good, now we know where this point is:
if(is_found == true)
{
keep_integrating=true;
Geometry< Node<3> >& geom = pElement->GetGeometry();//the element we're in
vel=ZeroVector(3);
for(unsigned int j=0; j<(TDim+1); j++)
{
noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j];
}
//calculating substep to get +- courant(substep) = 0.1
nsubsteps = 10.0 * (delta_t * pElement->GetValue(MEAN_VEL_OVER_ELEM_SIZE));
if (nsubsteps<1)
nsubsteps=1;
substep_dt = delta_t / double(nsubsteps);
only_integral = 1.0;// weight;//*double(nsubsteps);
position += vel*substep_dt;//weight;
// DONE THE FIRST LOCATION OF THE PARTICLE, NOW WE PROCEED TO STREAMLINE INTEGRATION USING THE MESH VELOCITY
unsigned int check_from_element_number = 0;
for(unsigned int i=0; i<(nsubsteps-1); i++)// this is for the substeps n+1. in the first one we already knew the position of the particle.
{
if (keep_integrating == true)
{
is_found = FindNodeOnMesh(position, N, pElement, rElementsInTrajectory, rNumberOfElementsInTrajectory, check_from_element_number, ResultBegin, MaxNumberOfResults); //good, now we know where this point is:
if(is_found == true)
{
Geometry< Node<3> >& geom = pElement->GetGeometry();//the element we're in
vel = ZeroVector(3);
for(unsigned int j=0; j<(TDim+1); j++)
{
noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j];
}
only_integral += 1.0; //values saved for the current time step
position+=vel*substep_dt;//weight;
}
else
{
keep_integrating=false;
break;
}
}
else
break;
}
}
if (keep_integrating == false) (pParticle.GetEraseFlag()=true);
else is_found = FindNodeOnMesh(position, N ,pElement,ResultBegin,MaxNumberOfResults); //we must save the pointer of the last element that we're in (inside the pointervector pElement)
if (is_found == false) ( pParticle.GetEraseFlag()=true);
pParticle.Coordinates() = position;
}
/// This function updates a particle
/** This function updates a particle variables using the "delta
* variables" from the nodal database.
*
* @param pParticle
* @param pElement
* @param rGeom
*
* @see CorrectParticlesWithoutMovingUsingDeltaVariables
*/
void CorrectParticleUsingDeltaVariables(ShallowParticle & pParticle,
Element::Pointer & pElement,
Geometry< Node<3> >& rGeom)
{
array_1d<double,TDim+1> N;
//we start with the first position, then it will enter the loop.
array_1d<double,3> coords = pParticle.Coordinates();
float & particle_scalar1 = pParticle.GetScalar1();
array_1d<float,3> & particle_vector1 = pParticle.GetVector1();
//double distance=0.0;
double delta_scalar1 = 0.0;
array_1d<double,3> delta_vector1 = ZeroVector(3);
bool is_found = CalculatePosition(rGeom,coords[0],coords[1],coords[2],N);
if(is_found == false)
{
KRATOS_INFO("MoveShallowWaterParticleUtility") << N << std::endl;
for (int j=0 ; j!=(TDim+1); j++)
if (N[j]<0.0 )
N[j]=1e-10;
}
for(unsigned int j=0; j<(TDim+1); j++)
{
delta_scalar1 += rGeom[j].FastGetSolutionStepValue(DELTA_SCALAR1)*N[j];
noalias(delta_vector1) += rGeom[j].FastGetSolutionStepValue(DELTA_VECTOR1)*N[j];
}
particle_scalar1 = particle_scalar1 + delta_scalar1;
particle_vector1 = particle_vector1 + delta_vector1;
}
/// Move a particle in the inverse way
/** this function moves a particle according to the -velocity given
* by VELOCITY variable. The movement is performed by a backward
* integration in nsubsteps, during a total time of DELTA_TIME
* Before the particle goes out of the element, gets the value
* of the eulerian mesh and stores it
*
* @param pParticle
* @param pElement
* @param ResultBegin
* @param MaxNumberOfResults
*
* @see PreReseed
*/
void MoveParticleInverseWay(ShallowParticle & pParticle,
Element::Pointer & pElement, //NOT A REFERENCE!! WE SHALL NOT OVERWRITE THE ELEMENT IT BELONGS TO!
ResultIteratorType ResultBegin,
const unsigned int MaxNumberOfResults)
{
const ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
unsigned int nsubsteps;
double substep_dt;
bool keep_integrating = false;
bool is_found;
double scalar1 = 0.0;
array_1d<double,3> vector1;
array_1d<double,3> vel;
array_1d<double,3> position;
array_1d<double,3> mid_position;
array_1d<double,TDim+1> N;
//we start with the first position, then it will enter the loop.
position = pParticle.Coordinates(); // + (pParticle)->FastGetSolutionStepValue(DISPLACEMENT); //initial coordinates
double only_integral = 0.0 ;
is_found = FindNodeOnMesh(position, N, pElement, ResultBegin, MaxNumberOfResults); //good, now we know where this point is:
if(is_found == true)
{
keep_integrating = true;
Geometry< Node<3> >& geom = pElement->GetGeometry(); //the element we're in
scalar1 = 0.0;
vector1 = ZeroVector(3);
vel = ZeroVector(3);
for(unsigned int j=0; j<(TDim+1); j++)
{
scalar1 += geom[j].FastGetSolutionStepValue(*mScalarVar1)*N[j];
noalias(vector1) += geom[j].FastGetSolutionStepValue(*mVectorVar1)*N[j];
noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j];
}
//calculating substep to get +- courant(substep) = 1/4
nsubsteps = 10.0 * (delta_t * pElement->GetValue(MEAN_VEL_OVER_ELEM_SIZE));
if (nsubsteps<1)
nsubsteps=1;
substep_dt = delta_t / double(nsubsteps);
only_integral = 1.0; // weight;//*double(nsubsteps);
position -= vel*substep_dt; //weight;
for(unsigned int i=0; i<(nsubsteps-1); i++) // this is for the substeps n+1. in the first one we already knew the position of the particle.
{
if (keep_integrating == true)
{
is_found = FindNodeOnMesh(position, N, pElement, ResultBegin, MaxNumberOfResults); //good, now we know where this point is:
if (is_found == true)
{
Geometry< Node<3> >& geom = pElement->GetGeometry();//the element we're in
scalar1 = 0.0;
vector1 = ZeroVector(3);
vel = ZeroVector(3);
for(unsigned int j=0; j<(TDim+1); j++)
{
scalar1 += geom[j].FastGetSolutionStepValue(*mScalarVar1)*N(j);
noalias(vector1) += geom[j].FastGetSolutionStepValue(*mVectorVar1)*N[j];
noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j];
}
only_integral += 1.0; //weight ; //values saved for the current time step
position -= vel*substep_dt; //weight;
}
else keep_integrating = false;
}
}
pParticle.GetScalar1() = scalar1;
pParticle.GetVector1() = vector1;
}
}
/// Find the element into which a given node is located
/** This function should find the element into which a given node
* is located and return a pointer to the element and the vector
* containing the shape functions that define the positions within
* the element.
* If false is returned the element is not found
*
* @param position of the node
* @param N: return shape functions that define the positions within the elem
* @param pElement: return a pointer to the element
* @param ResultBegin
* @param MaxNumberOfResults
* @return FindNodeOnMesh if the element is found of not
*
* @see CalculatePosition
*/
bool FindNodeOnMesh( const array_1d<double,3>& rPosition,
array_1d<double,TDim+1>& N,
Element::Pointer & pElement,
ResultIteratorType ResultBegin,
const unsigned int MaxNumberOfResults)
{
typedef std::size_t SizeType;
array_1d<double,TDim+1> aux_N;
//before using the bin to search for possible elements we check first the last element in which the particle was.
Geometry<Node<3> >& geom_default = pElement->GetGeometry(); //(*(i))->GetGeometry();
bool is_found_1 = CalculatePosition(geom_default,rPosition[0],rPosition[1],rPosition[2],N);
if (is_found_1) //that was easy!
{
return true;
}
// To begin with we check the neighbour elements; it is a bit more expensive
GlobalPointersVector< Element >& neighb_elems = pElement->GetValue(NEIGHBOUR_ELEMENTS);
for (unsigned int i=0;i!=(neighb_elems.size());i++)
{
Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry();
bool is_found_2 = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],N);
if (is_found_2)
{
pElement = neighb_elems[i].shared_from_this();
return true;
}
}
// If checking all the neighbour elements did not work, we have to use the bins
// ask to the container for the list of candidate elements
SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(Point{rPosition}, ResultBegin, MaxNumberOfResults );
if (results_found>0)
{
//loop over the candidate elements and check if the particle falls within
for(SizeType i = 0; i< results_found; i++)
{
Geometry<Node<3> >& geom = (*(ResultBegin + i))->GetGeometry();
//find local position
bool is_found_3 = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],N);
if (is_found_3)
{
pElement = (*(ResultBegin + i))->shared_from_this();
return true;
}
}
}
//if nothing worked, then:
//not found case
return false;
}
/// Find the element into which a given node is located
/** This function should find the element into which a given node
* is located and return a pointer to the element and the vector
* containing the shape functions that define the positions within
* the element.
* If false is returned the element is not found
* This version includes predefined elements following a trajectory
*
* @param rPosition of the node
* @param N Output shape functions that define the positions within the elem
* @param pElement Output a pointer to the element
* @param rElementsInTrajectory
* @param rNumberOfElementsInTrajectory Output
* @param CheckFromElementNumber
* @param ResultBegin
* @param MaxNumberOfResults
* @return FindNodeOnMesh if the element is found of not
*
* @see CalculatePosition
*/
bool FindNodeOnMesh( const array_1d<double,3>& rPosition,
array_1d<double,TDim+1>& N,
Element::Pointer & pElement,
GlobalPointersVector< Element >& rElementsInTrajectory,
unsigned int & rNumberOfElementsInTrajectory,
unsigned int & rCheckFromElementNumber,
ResultIteratorType ResultBegin,
const unsigned int MaxNumberOfResults)
{
typedef std::size_t SizeType;
//~ const array_1d<double,3>& coords = rPosition;
array_1d<double,TDim+1> aux_N;
//before using the bin to search for possible elements we check first the last element in which the particle was.
Geometry<Node<3> >& geom_default = pElement->GetGeometry(); //(*(i))->GetGeometry();
bool is_found_1 = CalculatePosition(geom_default,rPosition[0],rPosition[1],rPosition[2],N);
if(is_found_1 == true)
{
return true; //that was easy!
}
// If it was not found in the first element, we can proceed to check in the following elements (in the trajectory defined by previous particles that started from the same element.
for (unsigned int i=(rCheckFromElementNumber);i!=rNumberOfElementsInTrajectory;i++)
{
Geometry<Node<3> >& geom = rElementsInTrajectory[i].GetGeometry();
bool is_found_2 = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],aux_N);
if (is_found_2)
{
pElement = rElementsInTrajectory[i].shared_from_this();
N = aux_N;
rCheckFromElementNumber = i+1 ; //now i element matches pElement, so to avoid cheching twice the same element we send the counter to the following element.
return true;
}
}
// Now we check the neighbour elements:
GlobalPointersVector< Element >& neighb_elems = pElement->GetValue(NEIGHBOUR_ELEMENTS);
for (unsigned int i=0;i!=(neighb_elems.size());i++)
{
Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry();
bool is_found_2 = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],N);
if (is_found_2)
{
pElement = neighb_elems[i].shared_from_this();
if (rNumberOfElementsInTrajectory<20)
{
rElementsInTrajectory(rNumberOfElementsInTrajectory) = pElement;
rNumberOfElementsInTrajectory++;
rCheckFromElementNumber = rNumberOfElementsInTrajectory; //we do it after doing the ++ to the counter, so we woudlnt enter the loop that searches in the rElementsInTrajectory list. we are the particle that is adding elements to the list
}
return true;
}
}
// If checking all the neighbour elements did not work, we have to use the bins
// ask to the container for the list of candidate elements
SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(Point{rPosition}, ResultBegin, MaxNumberOfResults );
if(results_found>0)
{
//loop over the candidate elements and check if the particle falls within
for(SizeType i = 0; i< results_found; i++)
{
Geometry<Node<3> >& geom = (*(ResultBegin + i))->GetGeometry();
//find local position
bool is_found = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],N);
if (is_found)
{
pElement = (*(ResultBegin + i))->shared_from_this();
if (rNumberOfElementsInTrajectory<20)
{
rElementsInTrajectory(rNumberOfElementsInTrajectory) = pElement;
rNumberOfElementsInTrajectory++;
rCheckFromElementNumber = rNumberOfElementsInTrajectory; //we do it after doing the ++ to the counter, so we woudlnt enter the loop that searches in the rElementsInTrajectory list. we are the particle that is adding elements to the list
}
return true;
}
}
}
//not found case
return false;
}
/// Calculate the position of a given particle inside an element
/** This function calculates the position of a given particle inside
* an element and returns the shape functions that define it position
* within the element and returns false if the particle is otuside
* the element
*
* @param rGeom: the element (a triangle)
* @param xc: the postition of the particle
* @param yc: the postition of the particle
* @param zc: the postition of the particle
* @param N: the shape functions to define the particle position
*
* @return CalculatePosition
*/
inline bool CalculatePosition( const Geometry<Node < 3 > >&rGeom,
const double xc,
const double yc,
const double zc,
array_1d<double,3> & N )
{
double x0 = rGeom[0].X();
double y0 = rGeom[0].Y();
double x1 = rGeom[1].X();
double y1 = rGeom[1].Y();
double x2 = rGeom[2].X();
double y2 = rGeom[2].Y();
double area = CalculateVol(x0, y0, x1, y1, x2, y2);
KRATOS_ERROR_IF( area == 0.0 ) << "In move shallow water particle utility: element with zero area found" << std::endl;
double inv_area = 1.0 / area;
N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area;
N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area;
N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area;
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0)
//if the xc yc is inside the triangle return true
return true;
return false;
}
/// Calculate the position of a given particle inside an element
/** This function calculates the position of a given particle inside
* an element and returns the shape functions that define it position
* within the element and returns false if the particle is otuside
* the element
*
* @param rNodesPositions of the element (a triangle)
* @param xc: the postition of the particle
* @param yc: the postition of the particle
* @param zc: the postition of the particle
* @param N: the shape functions to define the particle position
*
* @return CalculatePosition
*/
inline bool CalculatePosition( const array_1d<double,3*(TDim+1)>& rNodesPositions,
const double xc,
const double yc,
const double zc,
array_1d<double,3> & N )
{
const double& x0 = rNodesPositions[0];
const double& y0 = rNodesPositions[1];
const double& x1 = rNodesPositions[3];
const double& y1 = rNodesPositions[4];
const double& x2 = rNodesPositions[6];
const double& y2 = rNodesPositions[7];
double area = CalculateVol(x0, y0, x1, y1, x2, y2);
KRATOS_ERROR_IF( area == 0.0 ) << "In move shallow water particle utility: element with zero area found" << std::endl;
double inv_area = 1.0 / area;
N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area;
N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area;
N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area;
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0)
//if the xc yc is inside the triangle return true
return true;
return false;
}
/// Calculate the position of a given particle inside an element
/** This function calculates the position of a given particle inside
* an element and returns the shape functions that define it position
* within the element and returns false if the particle is otuside
* the element
*
* @param rGeom: the element (a tetrahedron)
* @param xc: the postition of the particle
* @param yc: the postition of the particle
* @param zc: the postition of the particle
* @param N: the shape functions to define the particle position
*
* @return CalculatePosition
*/
inline bool CalculatePosition( const Geometry<Node < 3 > >&rGeom,
const double xc,
const double yc,
const double zc,
array_1d<double, 4 > & N )
{
double x0 = rGeom[0].X();
double y0 = rGeom[0].Y();
double z0 = rGeom[0].Z();
double x1 = rGeom[1].X();
double y1 = rGeom[1].Y();
double z1 = rGeom[1].Z();
double x2 = rGeom[2].X();
double y2 = rGeom[2].Y();
double z2 = rGeom[2].Z();
double x3 = rGeom[3].X();
double y3 = rGeom[3].Y();
double z3 = rGeom[3].Z();
double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
KRATOS_ERROR_IF( vol == 0.0 ) << "In move shallow water particle utility: element with zero vol found" << std::endl;
double inv_vol = 1.0 / vol;
N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol;
N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol;
N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol;
N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol;
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 &&
N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0)
//if the xc yc zc is inside the tetrahedron return true
return true;
return false;
}
/// Calculate the position of a given particle inside an element
/** This function calculates the position of a given particle inside
* an element and returns the shape functions that define it position
* within the element and returns false if the particle is otuside
* the element
*
* @param rNodesPositions of the element (a tetrahedron)
* @param xc: the postition of the particle
* @param yc: the postition of the particle
* @param zc: the postition of the particle
* @param N: the shape functions to define the particle position
*
* @return CalculatePosition
*/
inline bool CalculatePosition( const array_1d<double,3*(TDim+1)>& rNodesPositions,
const double xc,
const double yc,
const double zc,
array_1d<double, 4 > & N )
{
const double& x0 = rNodesPositions[0];
const double& y0 = rNodesPositions[1];
const double& z0 = rNodesPositions[2];
const double& x1 = rNodesPositions[3];
const double& y1 = rNodesPositions[4];
const double& z1 = rNodesPositions[5];
const double& x2 = rNodesPositions[6];
const double& y2 = rNodesPositions[7];
const double& z2 = rNodesPositions[8];
const double& x3 = rNodesPositions[9];
const double& y3 = rNodesPositions[10];
const double& z3 = rNodesPositions[11];
double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
KRATOS_ERROR_IF( vol == 0.0 ) << "In move shallow water particle utility: element with zero vol found" << std::endl;
double inv_vol = 1.0 / vol;
N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol;
N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol;
N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol;
N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol;
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 &&
N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0)
//if the xc yc zc is inside the tetrahedron return true
return true;
return false;
}
/// Calculate the volume
/** This function computes the area of a triangle
*/
inline double CalculateVol( const double x0, const double y0,
const double x1, const double y1,
const double x2, const double y2 )
{
return 0.5 * ((x1 - x0)*(y2 - y0)- (y1 - y0)*(x2 - x0));
}
/// Calculate the volume
/** This function computes the volume of a tetrahedron
*/
inline double CalculateVol( const double x0, const double y0, const double z0,
const double x1, const double y1, const double z1,
const double x2, const double y2, const double z2,
const double x3, const double y3, const double z3 )
{
double x10 = x1 - x0;
double y10 = y1 - y0;
double z10 = z1 - z0;
double x20 = x2 - x0;
double y20 = y2 - y0;
double z20 = z2 - z0;
double x30 = x3 - x0;
double y30 = y3 - y0;
double z30 = z3 - z0;
double detJ = x10 * y20 * z30 - x10 * y30 * z20 + y10 * z20 * x30 - y10 * x20 * z30 + z10 * x20 * y30 - z10 * y20 * x30;
return detJ * 0.1666666666666666666667;
}
/// Compute the Gauss points
/**
*/
void ComputeGaussPointPositions_4( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 7, 3 > & pos,
BoundedMatrix<double, 7, 3 > & N )
{
double one_third = 1.0 / 3.0;
double one_sixt = 0.15; //1.0 / 6.0;
double two_third = 0.7; //2.0 * one_third;
N(0, 0) = one_sixt;
N(0, 1) = one_sixt;
N(0, 2) = two_third;
N(1, 0) = two_third;
N(1, 1) = one_sixt;
N(1, 2) = one_sixt;
N(2, 0) = one_sixt;
N(2, 1) = two_third;
N(2, 2) = one_sixt;
N(3, 0) = one_third;
N(3, 1) = one_third;
N(3, 2) = one_third;
//first
pos(0, 0) = one_sixt * geom[0].X() + one_sixt * geom[1].X() + two_third * geom[2].X();
pos(0, 1) = one_sixt * geom[0].Y() + one_sixt * geom[1].Y() + two_third * geom[2].Y();
pos(0, 2) = one_sixt * geom[0].Z() + one_sixt * geom[1].Z() + two_third * geom[2].Z();
//second
pos(1, 0) = two_third * geom[0].X() + one_sixt * geom[1].X() + one_sixt * geom[2].X();
pos(1, 1) = two_third * geom[0].Y() + one_sixt * geom[1].Y() + one_sixt * geom[2].Y();
pos(1, 2) = two_third * geom[0].Z() + one_sixt * geom[1].Z() + one_sixt * geom[2].Z();
//third
pos(2, 0) = one_sixt * geom[0].X() + two_third * geom[1].X() + one_sixt * geom[2].X();
pos(2, 1) = one_sixt * geom[0].Y() + two_third * geom[1].Y() + one_sixt * geom[2].Y();
pos(2, 2) = one_sixt * geom[0].Z() + two_third * geom[1].Z() + one_sixt * geom[2].Z();
//fourth
pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X();
pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y();
pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z();
}
/// Compute the Gauss points
/** For a triangle
*
* @see PostReseed
*/
void ComputeGaussPointPositionsForPostReseed( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 7, 3 > & pos,
BoundedMatrix<double, 7, 3 > & N ) //2d
{
double one_third = 1.0 / 3.0;
double one_eight = 0.12; //1.0 / 6.0;
double three_quarters = 0.76; //2.0 * one_third;
N(0, 0) = one_eight;
N(0, 1) = one_eight;
N(0, 2) = three_quarters;
N(1, 0) = three_quarters;
N(1, 1) = one_eight;
N(1, 2) = one_eight;
N(2, 0) = one_eight;
N(2, 1) = three_quarters;
N(2, 2) = one_eight;
N(3, 0) = one_third;
N(3, 1) = one_third;
N(3, 2) = one_third;
N(4, 0) = one_eight;
N(4, 1) = 0.44;
N(4, 2) = 0.44;
N(5, 0) = 0.44;
N(5, 1) = one_eight;
N(5, 2) = 0.44;
N(6, 0) = 0.44;
N(6, 1) = 0.44;
N(6, 2) = one_eight;
//first
pos(0, 0) = one_eight * geom[0].X() + one_eight * geom[1].X() + three_quarters * geom[2].X();
pos(0, 1) = one_eight * geom[0].Y() + one_eight * geom[1].Y() + three_quarters * geom[2].Y();
pos(0, 2) = one_eight * geom[0].Z() + one_eight * geom[1].Z() + three_quarters * geom[2].Z();
//second
pos(1, 0) = three_quarters * geom[0].X() + one_eight * geom[1].X() + one_eight * geom[2].X();
pos(1, 1) = three_quarters * geom[0].Y() + one_eight * geom[1].Y() + one_eight * geom[2].Y();
pos(1, 2) = three_quarters * geom[0].Z() + one_eight * geom[1].Z() + one_eight * geom[2].Z();
//third
pos(2, 0) = one_eight * geom[0].X() + three_quarters * geom[1].X() + one_eight * geom[2].X();
pos(2, 1) = one_eight * geom[0].Y() + three_quarters * geom[1].Y() + one_eight * geom[2].Y();
pos(2, 2) = one_eight * geom[0].Z() + three_quarters * geom[1].Z() + one_eight * geom[2].Z();
//fourth
pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X();
pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y();
pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z();
//fifth
pos(4, 0) = one_eight * geom[0].X() + 0.44 * geom[1].X() + 0.44 * geom[2].X();
pos(4, 1) = one_eight * geom[0].Y() + 0.44 * geom[1].Y() + 0.44 * geom[2].Y();
pos(4, 2) = one_eight * geom[0].Z() + 0.44 * geom[1].Z() + 0.44 * geom[2].Z();
//sixth
pos(5, 0) = 0.44 * geom[0].X() + one_eight * geom[1].X() + 0.44 * geom[2].X();
pos(5, 1) = 0.44 * geom[0].Y() + one_eight * geom[1].Y() + 0.44 * geom[2].Y();
pos(5, 2) = 0.44 * geom[0].Z() + one_eight * geom[1].Z() + 0.44 * geom[2].Z();
//seventh
pos(6, 0) = 0.44 * geom[0].X() + 0.44 * geom[1].X() + one_eight * geom[2].X();
pos(6, 1) = 0.44 * geom[0].Y() + 0.44 * geom[1].Y() + one_eight * geom[2].Y();
pos(6, 2) = 0.44 * geom[0].Z() + 0.44 * geom[1].Z() + one_eight * geom[2].Z();
}
/// Compute the Gauss points
/** For a tetrahedron
*
* @see PostReseed
*/
void ComputeGaussPointPositionsForPostReseed( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 9, 3 > & pos,
BoundedMatrix<double, 9, 4 > & N ) //3D
{
double one_quarter = 0.25;
double small_fraction = 0.1; //1.0 / 6.0;
double big_fraction = 0.7; //2.0 * one_third;
double mid_fraction = 0.3; //2.0 * one_third;
N(0, 0) = big_fraction;
N(0, 1) = small_fraction;
N(0, 2) = small_fraction;
N(0, 3) = small_fraction;
N(1, 0) = small_fraction;
N(1, 1) = big_fraction;
N(1, 2) = small_fraction;
N(1, 3) = small_fraction;
N(2, 0) = small_fraction;
N(2, 1) = small_fraction;
N(2, 2) = big_fraction;
N(2, 3) = small_fraction;
N(3, 0) = small_fraction;
N(3, 1) = small_fraction;
N(3, 2) = small_fraction;
N(3, 3) = big_fraction;
N(4, 0) = one_quarter;
N(4, 1) = one_quarter;
N(4, 2) = one_quarter;
N(4, 3) = one_quarter;
N(5, 0) = small_fraction;
N(5, 1) = mid_fraction;
N(5, 2) = mid_fraction;
N(5, 3) = mid_fraction;
N(6, 0) = mid_fraction;
N(6, 1) = small_fraction;
N(6, 2) = mid_fraction;
N(6, 3) = mid_fraction;
N(7, 0) = mid_fraction;
N(7, 1) = mid_fraction;
N(7, 2) = small_fraction;
N(7, 3) = mid_fraction;
N(8, 0) = mid_fraction;
N(8, 1) = mid_fraction;
N(8, 2) = mid_fraction;
N(8, 3) = small_fraction;
pos=ZeroMatrix(9,3);
for (unsigned int i=0; i!=4; i++) //going through the 4 nodes
{
array_1d<double, 3 > & coordinates = geom[i].Coordinates();
for (unsigned int j=0; j!=9; j++) //going through the 9 particles
{
for (unsigned int k=0; k!=3; k++) //x,y,z
pos(j,k) += N(j,i) * coordinates[k];
}
}
}
/// Compute the Gauss points
/** For a triangle
*
* @see PreReseed
*/
void ComputeGaussPointPositionsForPreReseed( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 3, 3 > & pos,
BoundedMatrix<double, 3, 3 > & N ) //2D
{
N(0, 0) = 0.5;
N(0, 1) = 0.25;
N(0, 2) = 0.25;
N(1, 0) = 0.25;
N(1, 1) = 0.5;
N(1, 2) = 0.25;
N(2, 0) = 0.25;
N(2, 1) = 0.25;
N(2, 2) = 0.5;
//first
pos(0, 0) = 0.5 * geom[0].X() + 0.25 * geom[1].X() + 0.25 * geom[2].X();
pos(0, 1) = 0.5 * geom[0].Y() + 0.25 * geom[1].Y() + 0.25 * geom[2].Y();
pos(0, 2) = 0.5 * geom[0].Z() + 0.25 * geom[1].Z() + 0.25 * geom[2].Z();
//second
pos(1, 0) = 0.25 * geom[0].X() + 0.5 * geom[1].X() + 0.25 * geom[2].X();
pos(1, 1) = 0.25 * geom[0].Y() + 0.5 * geom[1].Y() + 0.25 * geom[2].Y();
pos(1, 2) = 0.25 * geom[0].Z() + 0.5 * geom[1].Z() + 0.25 * geom[2].Z();
//third
pos(2, 0) = 0.25 * geom[0].X() + 0.25 * geom[1].X() + 0.5 * geom[2].X();
pos(2, 1) = 0.25 * geom[0].Y() + 0.25 * geom[1].Y() + 0.5 * geom[2].Y();
pos(2, 2) = 0.25 * geom[0].Z() + 0.25 * geom[1].Z() + 0.5 * geom[2].Z();
}
/// Compute the Gauss points
/** For a tetrahedron
*
* @see PreReseed
*/
void ComputeGaussPointPositionsForPreReseed( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 4, 3 > & pos,
BoundedMatrix<double, 4, 4 > & N ) //3D
{
//creating 4 particles, each will be closer to a node and equidistant to the other nodes
N(0, 0) = 0.4;
N(0, 1) = 0.2;
N(0, 2) = 0.2;
N(0, 3) = 0.2;
N(1, 0) = 0.2;
N(1, 1) = 0.4;
N(1, 2) = 0.2;
N(1, 3) = 0.2;
N(2, 0) = 0.2;
N(2, 1) = 0.2;
N(2, 2) = 0.4;
N(2, 3) = 0.2;
N(3, 0) = 0.2;
N(3, 1) = 0.2;
N(3, 2) = 0.2;
N(3, 3) = 0.4;
pos=ZeroMatrix(4,3);
for (unsigned int i=0; i!=4; i++) //going through the 4 nodes
{
array_1d<double, 3 > & coordinates = geom[i].Coordinates();
for (unsigned int j=0; j!=4; j++) //going through the 4 particles
{
for (unsigned int k=0; k!=3; k++) //x,y,z
pos(j,k) += N(j,i) * coordinates[k];
}
}
}
/// Compute the Gauss points
/**
*/
void ComputeGaussPointPositions_45( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 45, 3 > & pos,
BoundedMatrix<double, 45, 3 > & N )
{
unsigned int counter=0;
for (unsigned int i=0; i!=9;i++)
{
for (unsigned int j=0; j!=(9-i);j++)
{
N(counter,0)=0.05+double(i)*0.1;
N(counter,1)=0.05+double(j)*0.1;
N(counter,2)=1.0 - ( N(counter,1)+ N(counter,0) ) ;
pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X();
pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y();
pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z();
counter++;
}
}
}
/// Compute the Gauss points
/**
*/
void ComputeGaussPointPositions_initial( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 15, 3 > & pos,
BoundedMatrix<double, 15, 3 > & N ) //2D
{
unsigned int counter=0;
for (unsigned int i=0; i!=5;i++)
{
for (unsigned int j=0; j!=(5-i);j++)
{
N(counter,0)=0.05+double(i)*0.2;
N(counter,1)=0.05+double(j)*0.2;
N(counter,2)=1.0 - ( N(counter,1)+ N(counter,0) ) ;
pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X();
pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y();
pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z();
counter++;
}
}
}
/// Compute the Gauss points
/**
*/
void ComputeGaussPointPositions_initial( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 20, 3 > & pos,
BoundedMatrix<double, 20, 4 > & N ) //3D
{
double fraction_increment;
unsigned int counter=0;
for (unsigned int i=0; i!=4;i++) //going to build a particle "pyramid"(tetrahedra) by layers. the first layer will be made by a triangle of 4 base X 4 height. since it is a triangle, it means it will have 10 particles
{
for (unsigned int j=0; j!=(4-i);j++)
{
for (unsigned int k=0; k!=(4-i-j);k++)
{
N(counter,0)= 0.27 * ( 0.175 + double(i) ) ; //this is our "surface" in which we will build each layer, so we must construct a triangle using what's left of the shape functions total (a total of 1)
//total = 1.0 - N(counter,0);
fraction_increment = 0.27; //
N(counter,1)=fraction_increment * (0.175 + double(j));
N(counter,2)=fraction_increment * (0.175 + double(k));
N(counter,3)=1.0 - ( N(counter,0)+ N(counter,1) + N(counter,2) ) ;
pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X() + N(counter,3) * geom[3].X();
pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y() + N(counter,3) * geom[3].Y();
pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z() + N(counter,3) * geom[3].Z();
counter++;
}
}
}
}
/// check function
virtual int Check()
{
KRATOS_TRY
Node<3>& rnode = *mrModelPart.NodesBegin();
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA((*mVectorVar1), rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA((*mScalarVar1), rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(VELOCITY, rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(DELTA_VECTOR1, rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(DELTA_SCALAR1, rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(PROJECTED_VECTOR1, rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(PROJECTED_SCALAR1, rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(YP, rnode)
return 0;
KRATOS_CATCH("")
}
/// Member variables
ModelPart& mrModelPart;
int mNParticles;
int mNElems;
int mOffset;
int mMaxSubSteps;
double mMaxSubStepDt;
int mMaxNumberOfParticles;
std::vector< ShallowParticle > mParticlesVector;
int mLastElemId;
bool mOddTimeStep;
bool mParticlePrintingToolInitialized;
unsigned int mLastNodeId;
DenseVector<int> mNumOfParticlesInElems;
DenseVector<int> mNumOfParticlesInElemsAux;
DenseVector<ParticlePointerVector> mVectorOfParticlePointersVectors;
typename BinsObjectDynamic<Configure>::Pointer mpBinsObjectDynamic;
const Variable<double>* mScalarVar1;
const Variable<array_1d<double,3>>* mVectorVar1;
std::string m_scalar_var1_name;
std::string m_vector_var1_name;
}; // class MoveShallowWaterParticleUtility
} // namespace Kratos.
#endif // KRATOS_MOVE_SHALLOW_WATER_PARTICLE_UTILITY_H_INCLUDED defined
|
aix_smd5_fmt_plug.c | /* AIX smd5 cracker patch for JtR. Hacked together during April of 2013 by Dhiru
* Kholia <dhiru at openwall.com>.
*
* This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> and
* it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_smd5;
#elif FMT_REGISTERS_H
john_register_one(&fmt_smd5);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 16 // tuned on i7 w/HT
#endif
#endif
#include "md5.h"
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "memdbg.h"
#define FORMAT_LABEL "aix-smd5"
#define FORMAT_NAME "AIX LPA {smd5} (modified crypt-md5)"
#define FORMAT_TAG "{smd5}"
#define FORMAT_TAG1 "$1$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define FORMAT_TAG1_LEN (sizeof(FORMAT_TAG1)-1)
#define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 16
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN sizeof(int)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests smd5_tests[] = {
/* following hashes are AIX non-standard smd5 hashes */
{"{smd5}s8/xSJ/v$uGam4GB8hOjTLQqvBfxJ2/", "password"},
{"{smd5}alRJaSLb$aKM3H1.h1ycXl5GEVDH1e1", "aixsucks?"},
{"{smd5}eLB0QWeS$Eg.YfWY8clZuCxF0xNrKg.", "0123456789ABCDE"},
/* following hashes are AIX standard smd5 hashes (with corrected tag)
* lpa_options = std_hash=true */
{"$1$JVDbGx8K$T9h8HK4LZxeLPMTAxCfpc1", "password"},
{"$1$1Cu6fEvv$42kuaJ5fMEqyVStPuFG040", "0123456789ABCDE"},
{"$1$ql5x.xXL$vYVDhExol2xUBBpERRWcn1", "jtr>hashcat"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static struct custom_salt {
int is_standard;
unsigned char salt[16];
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p;
char *ctcopy;
char *keeptr;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0 &&
strncmp(ciphertext, FORMAT_TAG1, FORMAT_TAG1_LEN))
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
if (!strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
ctcopy += FORMAT_TAG_LEN;
else
ctcopy += FORMAT_TAG1_LEN;
if ((p = strtokm(ctcopy, "$")) == NULL) /* salt */
goto err;
if (strlen(p) != 8)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* hash */
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
keeptr = ctcopy;
if (!strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) {
ctcopy += FORMAT_TAG_LEN;
cs.is_standard = 0;
}
else {
ctcopy += FORMAT_TAG1_LEN;
cs.is_standard = 1;
}
p = strtokm(ctcopy, "$");
strncpy((char*)cs.salt, p, 9);
p = strtokm(NULL, "$");
MEM_FREE(keeptr);
return (void *)&cs;
}
#define TO_BINARY(b1, b2, b3) \
value = \
(ARCH_WORD_32)atoi64[ARCH_INDEX(pos[0])] | \
((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[1])] << 6) | \
((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[2])] << 12) | \
((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[3])] << 18); \
pos += 4; \
out.b[b1] = value >> 16; \
out.b[b2] = value >> 8; \
out.b[b3] = value;
static void* get_binary(char *ciphertext)
{
static union {
char b[16];
ARCH_WORD w;
} out;
char *pos;
ARCH_WORD_32 value;
if (!strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
pos = ciphertext + FORMAT_TAG_LEN;
else
pos = ciphertext + FORMAT_TAG1_LEN;
while (*pos++ != '$');
TO_BINARY(0, 6, 12);
TO_BINARY(1, 7, 13);
TO_BINARY(2, 8, 14);
TO_BINARY(3, 9, 15);
TO_BINARY(4, 10, 5);
out.b[11] =
(ARCH_WORD_32)atoi64[ARCH_INDEX(pos[0])] |
((ARCH_WORD_32)atoi64[ARCH_INDEX(pos[1])] << 6);
return out.b;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
/*
* $Id: md5_crypt.c,v 1.1 2002-05-11 14:42:35 cpbotha Exp $
*
* ----------------------------------------------------------------------------
* "THE BEER-WARE LICENSE" (Revision 42):
* <phk@login.dknet.dk> wrote this file. As long as you retain this notice you
* can do whatever you want with this stuff. If we meet some day, and you think
* this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
* ----------------------------------------------------------------------------
*
* Origin: Id: crypt.c,v 1.3 1995/05/30 05:42:22 rgrimes Exp
*
*/
static void crypt_md5(char *pw, char *salt, int is_standard, char *passwd)
{
char *magic = "$1$";
/* This string is magic for this algorithm. Having
* it this way, we can get get better later on */
char *sp, *ep;
unsigned char final[16];
int sl, pl, i, j;
MD5_CTX ctx, ctx1;
/* Refine the Salt first */
sp = salt;
/* If it starts with the magic string, then skip that */
if (!strncmp(sp, magic, strlen(magic)))
sp += strlen(magic);
/* It stops at the first '$', max 8 chars */
for (ep = sp; *ep && *ep != '$' && ep < (sp + 8); ep++)
continue;
/* get the length of the true salt */
sl = ep - sp;
MD5_Init(&ctx);
/* The password first, since that is what is most unknown */
MD5_Update(&ctx,(unsigned char *)pw,strlen(pw));
// The following license text applies to the "if" code block
// License: belongs to the PUBLIC DOMAIN, donated to hashcat, credits MUST go to atom
// (hashcat) and philsmd for their hard work. Thx
// Disclaimer: WE PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER
// EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// Furthermore, NO GUARANTEES THAT IT WORKS FOR YOU AND WORKS CORRECTLY
if (is_standard) {
/* Then our magic string */
MD5_Update(&ctx,(unsigned char *)magic,strlen(magic));
/* Then the raw salt */
MD5_Update(&ctx,(unsigned char *)sp,sl);
}
else {
MD5_Update(&ctx,(unsigned char *)sp,sl);
}
/* Then just as many characters of the MD5_(pw,salt,pw) */
MD5_Init(&ctx1);
MD5_Update(&ctx1,(unsigned char *)pw,strlen(pw));
MD5_Update(&ctx1,(unsigned char *)sp,sl);
MD5_Update(&ctx1,(unsigned char *)pw,strlen(pw));
MD5_Final(final,&ctx1);
for (pl = strlen(pw); pl > 0; pl -= 16)
MD5_Update(&ctx,(unsigned char *)final,pl>16 ? 16 : pl);
memset(final, 0, sizeof final);
/* Then something really weird... */
for (j = 0, i = strlen(pw); i; i >>= 1)
if (i & 1)
MD5_Update(&ctx, (unsigned char *)final+j, 1);
else
MD5_Update(&ctx, (unsigned char *)pw+j, 1);
/* Now make the output string */
strcpy(passwd, magic);
strncat(passwd, sp, sl);
strcat(passwd, "$");
MD5_Final(final,&ctx);
/*
* and now, just to make sure things don't run too fast
* On a 60 Mhz Pentium this takes 34 msec, so you would
* need 30 seconds to build a 1000 entry dictionary...
*/
for (i = 0; i < 1000; i++) {
MD5_Init(&ctx1);
if (i & 1)
MD5_Update(&ctx1,(unsigned char *)pw,strlen(pw));
else
MD5_Update(&ctx1,(unsigned char *)final,16);
if (i % 3)
MD5_Update(&ctx1,(unsigned char *)sp,sl);
if (i % 7)
MD5_Update(&ctx1,(unsigned char *)pw,strlen(pw));
if (i & 1)
MD5_Update(&ctx1,(unsigned char *)final,16);
else
MD5_Update(&ctx1,(unsigned char *)pw,strlen(pw));
MD5_Final(final,&ctx1);
}
memcpy(passwd, final, 16);
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
crypt_md5(saved_key[index], (char*)cur_salt->salt, cur_salt->is_standard, (char *)crypt_out[index]);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void smd5_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int salt_hash(void *salt)
{
return *(unsigned int*)salt & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_smd5 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG, FORMAT_TAG1 },
smd5_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
smd5_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
scaling_solver.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
//
#if !defined(KRATOS_SCALING_SOLVER_H_INCLUDED )
#define KRATOS_SCALING_SOLVER_H_INCLUDED
// System includes
#include <cmath>
#include <complex>
// External includes
// Project includes
#include "includes/define.h"
#include "factories/linear_solver_factory.h"
#include "linear_solvers/linear_solver.h"
#include "utilities/openmp_utils.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ScalingSolver
* @ingroup KratosCore
* @brief This solvers rescales in order to improve the conditioning of the system
* @details Rescales the matrix, and uses a given linear solver
* @author Riccardo Rossi
* @tparam TSparseSpaceType The sparse space definition
* @tparam TDenseSpaceType The dense space definition
* @tparam TReordererType The reorder considered
*/
template<class TSparseSpaceType, class TDenseSpaceType,
class TReordererType = Reorderer<TSparseSpaceType, TDenseSpaceType> >
class ScalingSolver
: public LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType>
{
public:
///@name Type Definitions
///@{
/// Pointer definition of ScalingSolver
KRATOS_CLASS_POINTER_DEFINITION(ScalingSolver);
/// Definition of the base type
typedef LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType> BaseType;
/// The definition of the spaces (sparse matrix)
typedef typename TSparseSpaceType::MatrixType SparseMatrixType;
/// The definition of the spaces (vector)
typedef typename TSparseSpaceType::VectorType VectorType;
/// The definition of the spaces (dense matrix)
typedef typename TDenseSpaceType::MatrixType DenseMatrixType;
/// The definition of the linear solver factory type
typedef LinearSolverFactory<TSparseSpaceType,TDenseSpaceType> LinearSolverFactoryType;
///@}
///@name Life Cycle
///@{
/// Default constructor.
ScalingSolver()
{
}
/**
* @brief Constructor without parameters
* @param pLinearSolver The linear solver to be scaled
* @param SymmetricScaling If the scaling is symmetric (true by default)
*/
ScalingSolver(
typename BaseType::Pointer pLinearSolver,
const bool SymmetricScaling = true
) : BaseType (),
mpLinearSolver(pLinearSolver),
mSymmetricScaling(SymmetricScaling)
{
}
/**
* @brief Constructor with parameters
* @param ThisParameters The configuration parameters of the linear solver
*/
ScalingSolver(Parameters ThisParameters)
: BaseType ()
{
KRATOS_TRY
KRATOS_ERROR_IF_NOT(ThisParameters.Has("solver_type")) << "Solver_type must be specified to construct the ScalingSolver" << std::endl;
mpLinearSolver = LinearSolverFactoryType().Create(ThisParameters);
mSymmetricScaling = ThisParameters.Has("symmetric_scaling") ? ThisParameters["symmetric_scaling"].GetBool() : true;
KRATOS_CATCH("")
}
/// Copy constructor.
ScalingSolver(const ScalingSolver& Other) : BaseType(Other) {}
/// Destructor.
~ScalingSolver() override {}
///@}
///@name Operators
///@{
/// Assignment operator.
ScalingSolver& operator=(const ScalingSolver& Other)
{
BaseType::operator=(Other);
return *this;
}
///@}
///@name Operations
///@{
/** Some solvers may require a minimum degree of knowledge of the structure of the matrix. To make an example
* when solving a mixed u-p problem, it is important to identify the row associated to v and p.
* another example is the automatic prescription of rotation null-space for smoothed-aggregation solvers
* which require knowledge on the spatial position of the nodes associated to a given dof.
* This function tells if the solver requires such data
*/
bool AdditionalPhysicalDataIsNeeded() override
{
return mpLinearSolver->AdditionalPhysicalDataIsNeeded();
}
/** Some solvers may require a minimum degree of knowledge of the structure of the matrix. To make an example
* when solving a mixed u-p problem, it is important to identify the row associated to v and p.
* another example is the automatic prescription of rotation null-space for smoothed-aggregation solvers
* which require knowledge on the spatial position of the nodes associated to a given dof.
* This function is the place to eventually provide such data
*/
void ProvideAdditionalData(
SparseMatrixType& rA,
VectorType& rX,
VectorType& rB,
typename ModelPart::DofsArrayType& rdof_set,
ModelPart& r_model_part
) override
{
mpLinearSolver->ProvideAdditionalData(rA,rX,rB,rdof_set,r_model_part);
}
void InitializeSolutionStep (SparseMatrixType& rA, VectorType& rX, VectorType& rB) override
{
mpLinearSolver->InitializeSolutionStep(rA,rX,rB);
}
/** This function is designed to be called at the end of the solve step.
* for example this is the place to remove any data that we do not want to save for later
@param rA. System matrix
@param rX. Solution vector. it's also the initial guess for iterative linear solvers.
@param rB. Right hand side vector.
*/
void FinalizeSolutionStep (SparseMatrixType& rA, VectorType& rX, VectorType& rB) override
{
mpLinearSolver->FinalizeSolutionStep(rA,rX,rB);
}
/** This function is designed to clean up all internal data in the solver.
* Clear is designed to leave the solver object as if newly created.
* After a clear a new Initialize is needed
*/
void Clear() override
{
mpLinearSolver->Clear();
}
/** Normal solve method.
Solves the linear system Ax=b and puts the result on SystemVector& rX.
rX is also th initial guess for iterative methods.
@param rA. System matrix
@param rX. Solution vector. it's also the initial
guess for iterative linear solvers.
@param rB. Right hand side vector.
*/
bool Solve(SparseMatrixType& rA, VectorType& rX, VectorType& rB) override
{
if(this->IsNotConsistent(rA, rX, rB))
return false;
VectorType scaling_vector(rX.size());
//obtain the scaling matrix
GetScalingWeights(rA,scaling_vector);
//scale system
if(mSymmetricScaling == false)
{
KRATOS_THROW_ERROR(std::logic_error,"not yet implemented","")
}
else
{
#pragma omp parallel for
for(int i=0; i< static_cast<int>(scaling_vector.size()); i++)
scaling_vector[i] = sqrt(std::abs(scaling_vector[i]));
SymmetricScaling(rA,scaling_vector);
}
//scale RHS
#pragma omp parallel for
for(int i=0; i< static_cast<int>(scaling_vector.size()); i++)
rB[i] /= scaling_vector[i];
//solve the problem
bool is_solved = mpLinearSolver->Solve(rA,rX,rB);
//backscale the solution
if(mSymmetricScaling == true)
{
#pragma omp parallel for
for(int i=0; i< static_cast<int>(scaling_vector.size()); i++)
rX[i] /= scaling_vector[i];
}
return is_solved;
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
std::stringstream buffer;
buffer << "Composite Linear Solver. Uses internally the following linear solver " << mpLinearSolver->Info();
return buffer.str();
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
BaseType::PrintData(rOStream);
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
typename LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType>::Pointer mpLinearSolver;
bool mSymmetricScaling;
///@}
///@name Private Operators
///@{
static void SymmetricScaling( SparseMatrixType& A, const VectorType& aux)
{
//typedef unsigned int size_type;
//typedef double value_type;
//create partition
OpenMPUtils::PartitionVector partition;
int number_of_threads = OpenMPUtils::GetNumThreads();
OpenMPUtils::DivideInPartitions(A.size1(),number_of_threads, partition);
//parallel loop
#pragma omp parallel
{
int thread_id = OpenMPUtils::ThisThread();
int number_of_rows = partition[thread_id+1] - partition[thread_id];
typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::index_array_type::iterator row_iter_begin = A.index1_data().begin()+partition[thread_id];
typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::index_array_type::iterator index_2_begin = A.index2_data().begin()+*row_iter_begin;
typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::value_array_type::iterator value_begin = A.value_data().begin()+*row_iter_begin;
perform_matrix_scaling( number_of_rows,
row_iter_begin,
index_2_begin,
value_begin,
partition[thread_id],
aux
);
}
}
/**
* calculates partial product resetting to Zero the output before
*/
static void perform_matrix_scaling(
int number_of_rows,
typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::index_array_type::iterator row_begin,
typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::index_array_type::iterator index2_begin,
typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::value_array_type::iterator value_begin,
unsigned int output_begin_index,
const VectorType& weights
)
{
int row_size;
typename SparseMatrixType::index_array_type::const_iterator row_it = row_begin;
int kkk = output_begin_index;
for(int k = 0; k < number_of_rows; k++)
{
row_size= *(row_it+1)-*row_it;
row_it++;
const typename TDenseSpaceType::DataType row_weight = weights[kkk++];
for(int i = 0; i<row_size; i++)
{
const typename TDenseSpaceType::DataType col_weight = weights[*index2_begin];
typename TDenseSpaceType::DataType t = (*value_begin);
t /= (row_weight*col_weight);
(*value_begin) = t; //check if this is correcct!!
value_begin++;
index2_begin++;
}
}
}
static void GetScalingWeights( const SparseMatrixType& A, VectorType& aux)
{
//typedef unsigned int size_type;
//typedef double value_type;
//create partition
OpenMPUtils::PartitionVector partition;
int number_of_threads = OpenMPUtils::GetNumThreads();
OpenMPUtils::DivideInPartitions(A.size1(),number_of_threads, partition);
//parallel loop
#pragma omp parallel
{
int thread_id = OpenMPUtils::ThisThread();
int number_of_rows = partition[thread_id+1] - partition[thread_id];
typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::index_array_type::const_iterator row_iter_begin = A.index1_data().begin()+partition[thread_id];
typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::index_array_type::const_iterator index_2_begin = A.index2_data().begin()+*row_iter_begin;
typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::value_array_type::const_iterator value_begin = A.value_data().begin()+*row_iter_begin;
GS2weights( number_of_rows,
row_iter_begin,
index_2_begin,
value_begin,
partition[thread_id],
aux
);
}
}
/**
* calculates partial product resetting to Zero the output before
*/
static void GS2weights(
int number_of_rows,
typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::index_array_type::const_iterator row_begin,
typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::index_array_type::const_iterator index2_begin,
typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::value_array_type::const_iterator value_begin,
unsigned int output_begin_index,
VectorType& weights
)
{
int row_size;
typename SparseMatrixType::index_array_type::const_iterator row_it = row_begin;
int kkk = output_begin_index;
for(int k = 0; k < number_of_rows; k++)
{
row_size= *(row_it+1)-*row_it;
row_it++;
double t = 0.0;
for(int i = 0; i<row_size; i++)
{
double tmp = std::abs(*value_begin);
t += tmp*tmp;
value_begin++;
}
t = sqrt(t);
weights[kkk++] = t;
}
}
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; // Class ScalingSolver
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
template<class TSparseSpaceType, class TDenseSpaceType,
class TPreconditionerType,
class TReordererType>
inline std::istream& operator >> (std::istream& IStream,
ScalingSolver<TSparseSpaceType, TDenseSpaceType,
TReordererType>& rThis)
{
return IStream;
}
/// output stream function
template<class TSparseSpaceType, class TDenseSpaceType,
class TPreconditionerType,
class TReordererType>
inline std::ostream& operator << (std::ostream& OStream,
const ScalingSolver<TSparseSpaceType, TDenseSpaceType,
TReordererType>& rThis)
{
rThis.PrintInfo(OStream);
OStream << std::endl;
rThis.PrintData(OStream);
return OStream;
}
///@}
} // namespace Kratos.
#endif // KRATOS_SCALING_SOLVER_H_INCLUDED defined
|
GB_realloc_memory.c | //------------------------------------------------------------------------------
// GB_realloc_memory: wrapper for realloc
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2018, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// A wrapper for REALLOC
// If p is non-NULL on input, it points to a previously allocated object of
// size nitems_old * size_of_item. The object is reallocated to be of size
// nitems_new * size_of_item. If p is NULL on input, then a new object of that
// size is allocated. On success, a pointer to the new object is returned, and
// ok is returned as true. If the allocation fails, ok is set to false and a
// pointer to the old (unmodified) object is returned.
// Usage:
//
// p = GB_realloc_memory (nnew, nold, size, p, &ok)
// if (ok)
// p points to a space of size at least nnew*size, and the first
// part, of size min(nnew,nold)*size, has the same content as
// the old memory space if it was present.
// else
// p points to the old space of size nold*size, which is left
// unchanged. This case never occurs if nnew < nold.
// By default, REALLOC is defined in GB.h as realloc. For a MATLAB
// mexFunction, it is mxRealloc. It can also be defined at compile time with
// -DREALLOC=myreallocfunc.
#include "GB.h"
void *GB_realloc_memory // pointer to reallocated block of memory, or
// to original block if the reallocation failed.
(
size_t nitems_new, // new number of items in the object
size_t nitems_old, // old number of items in the object
size_t size_of_item, // sizeof each item
void *p, // old object to reallocate
bool *ok // true if successful, false otherwise
)
{
size_t size ;
int nmalloc ;
// make sure at least one item is allocated
nitems_old = IMAX (1, nitems_old) ;
nitems_new = IMAX (1, nitems_new) ;
// make sure at least one byte is allocated
size_of_item = IMAX (1, size_of_item) ;
(*ok) = GB_size_t_multiply (&size, nitems_new, size_of_item) ;
if (!(*ok) || nitems_new > GB_INDEX_MAX || size_of_item > GB_INDEX_MAX)
{
// overflow
(*ok) = false ;
}
else if (p == NULL)
{
// a fresh object is being allocated
GB_MALLOC_MEMORY (p, nitems_new, size_of_item) ;
(*ok) = (p != NULL) ;
}
else if (nitems_old == nitems_new)
{
// the object does not change; do nothing
(*ok) = true ;
}
else
{
// change the size of the object from nitems_old to nitems_new
void *pnew ;
// check the malloc debug status. This debug flag is set outside
// of GraphBLAS and not modified, so it is safe to check it outside
// a critical section.
bool pretend_to_fail = false ;
if (GB_Global.malloc_debug)
{
// brutal malloc debug; pretend to fail if the count <= 0
#pragma omp critical (GB_memory)
{
pretend_to_fail = (GB_Global.malloc_debug_count-- <= 0) ;
}
}
if (pretend_to_fail)
{
// brutal malloc debug; pretend to fail if the count <= 0,
pnew = NULL ;
}
else
{
pnew = (void *) REALLOC (p, size) ;
}
#pragma omp critical (GB_memory)
{
if (pnew == NULL)
{
if (nitems_new < nitems_old)
{
// the attempt to reduce the size of the block failed, but
// the old block is unchanged. So pretend to succeed.
(*ok) = true ;
GB_Global.inuse -= (nitems_old - nitems_new) * size_of_item;
}
else
{
// out of memory
(*ok) = false ;
}
}
else
{
// success
p = pnew ;
(*ok) = true ;
GB_Global.inuse += (nitems_new - nitems_old) * size_of_item ;
GB_Global.maxused = IMAX (GB_Global.maxused, GB_Global.inuse) ;
}
nmalloc = GB_Global.nmalloc ;
}
#ifdef PRINT_MALLOC
printf ("realloc: %14p %3d %1d n "GBd" -> "GBd" size "GBd"\n",
pnew, nmalloc, GB_Global.malloc_debug, (int64_t) nitems_old,
(int64_t) nitems_new, (int64_t) size_of_item) ;
#endif
}
return (p) ;
}
|
pzgbtrf.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> s d c
*
**/
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
#include <plasma_core_blas.h>
#define A(m, n) ((plasma_complex64_t*)plasma_tile_addr(A, m, n))
/******************************************************************************/
void plasma_pzgbtrf(plasma_desc_t A, int *ipiv,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Return if failed sequence.
if (sequence->status != PlasmaSuccess)
return;
// Read parameters from the context.
plasma_context_t *plasma = plasma_context_self();
int ib = plasma->ib;
int max_panel_threads = plasma->max_panel_threads;
for (int k = 0; k < imin(A.mt, A.nt); k++) {
// for band matrix, gm is a multiple of mb,
// and there is no a10 submatrix
int mvak = plasma_tile_mview(A, k);
int nvak = plasma_tile_nview(A, k);
int ldak = plasma_tile_mmain_band(A, k, k);
// panel
int *ipivk = NULL;
plasma_complex64_t *a00 = NULL;
int mak = imin(A.m-k*A.mb, mvak+A.kl);
int size_a00 = (A.gm-k*A.mb) * plasma_tile_nmain(A, k);
int size_i = imin(mvak, nvak);
int num_panel_threads = imin(max_panel_threads,
imin(imin(A.mt, A.nt)-k, A.klt));
ipivk = &ipiv[k*A.mb];
a00 = A(k, k);
#pragma omp task depend(inout:a00[0:size_a00]) \
depend(out:ipivk[0:size_i]) \
priority(1)
{
volatile int *max_idx = (int*)malloc(num_panel_threads*sizeof(int));
if (max_idx == NULL)
plasma_request_fail(sequence, request, PlasmaErrorOutOfMemory);
volatile plasma_complex64_t *max_val =
(plasma_complex64_t*)malloc(num_panel_threads*sizeof(
plasma_complex64_t));
if (max_val == NULL)
plasma_request_fail(sequence, request, PlasmaErrorOutOfMemory);
volatile int info = 0;
plasma_barrier_t barrier;
plasma_barrier_init(&barrier);
if (sequence->status == PlasmaSuccess) {
for (int rank = 0; rank < num_panel_threads; rank++) {
#pragma omp task shared(barrier) priority(1)
{
// create a view for panel as a "general" submatrix
plasma_desc_t view = plasma_desc_view(
A, (A.kut-1)*A.mb, k*A.nb, mak, nvak);
view.type = PlasmaGeneral;
plasma_core_zgetrf(view, &ipiv[k*A.mb], ib,
rank, num_panel_threads,
max_idx, max_val, &info,
&barrier);
if (info != 0)
plasma_request_fail(sequence, request, k*A.mb+info);
}
}
}
#pragma omp taskwait
free((void*)max_idx);
free((void*)max_val);
}
// update
// TODO: fills are not tracked, see the one in fork
for (int n = k+1; n < imin(A.nt, k+A.kut); n++) {
plasma_complex64_t *a01 = NULL;
plasma_complex64_t *a11 = NULL;
int nvan = plasma_tile_nview(A, n);
int size_a01 = ldak*nvan;
int size_a11 = (A.gm-(k+1)*A.mb)*nvan;
a01 = A(k, n);
a11 = A(k+1, n);
#pragma omp task depend(in:a00[0:size_a00]) \
depend(inout:ipivk[0:size_i]) \
depend(inout:a01[0:size_a01]) \
depend(inout:a11[0:size_a11]) \
priority(n == k+1)
{
if (sequence->status == PlasmaSuccess) {
// geswp
int k1 = k*A.mb+1;
int k2 = imin(k*A.mb+A.mb, A.m);
plasma_desc_t view =
plasma_desc_view(A,
(A.kut-1 + k-n)*A.mb, n*A.nb,
mak, nvan);
view.type = PlasmaGeneral;
plasma_core_zgeswp(
PlasmaRowwise, view, 1, k2-k1+1, &ipiv[k*A.mb], 1);
// trsm
plasma_core_ztrsm(PlasmaLeft, PlasmaLower,
PlasmaNoTrans, PlasmaUnit,
mvak, nvan,
1.0, A(k, k), ldak,
A(k, n), plasma_tile_mmain_band(A, k, n));
// gemm
for (int m = imax(k+1, n-A.kut); m < imin(k+A.klt, A.mt); m++) {
int mvam = plasma_tile_mview(A, m);
#pragma omp task priority(n == k+1)
{
plasma_core_zgemm(
PlasmaNoTrans, PlasmaNoTrans,
mvam, nvan, A.nb,
-1.0, A(m, k), plasma_tile_mmain_band(A, m, k),
A(k, n), plasma_tile_mmain_band(A, k, n),
1.0, A(m, n), plasma_tile_mmain_band(A, m, n));
}
}
#pragma omp taskwait
}
}
}
#pragma omp task depend(in:ipivk[0:size_i])
if (sequence->status == PlasmaSuccess) {
if (k > 0) {
for (int i = 0; i < imin(mak, nvak); i++) {
ipiv[k*A.mb+i] += k*A.mb;
}
}
}
}
}
|
emd.h | // fix inttypes for GCC
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include <cinttypes>
// fix for the fix - it conflicts with numpy
#undef __STDC_FORMAT_MACROS
#include <cstdint>
#include <cmath>
#include <algorithm>
#include "cache.h"
#include "graph/min_cost_flow.h"
/*! @mainpage libwmdrelax
*
* @section s0 Description
* This library allows to efficinetly solve the Earth Mover's Distance
* problem (http://homepages.inf.ed.ac.uk/rbf/CVonline/LOCAL_COPIES/RUBNER/emd.htm).
* It also solves the relaxed approximation suitable for calculating the
* Word Mover's Distance (http://www.cs.cornell.edu/~kilian/papers/wmd_metric.pdf),
* hence the name.
*
* Project: https://github.com/src-d/wmd-relax
*
* README: @ref ignore_this_doxygen_anchor
*
* @section s1 C/C++ API
* - emd() solves the original Earth Mover's Distance problem.
* - emd_relaxed() solves the relaxed problem - one of the two sums is replaced
* with the maximum element.
* - EMDCache and EMDRelaxedCache are the caches to prevent from dynamic memory
* allocation.
*
* Although C/C++ API is complete and totally usable as-is, python.cc provides
* the Python 3 API.
*
* @section s2 Python 3 API
*
* - emd_relaxed()
* - emd_relaxed_cache_init() creates the cache object for emd_relaxed()
* - emd_relaxed_cache_fini() destroys the cache object for emd_relaxed()
* - emd()
* - emd_cache_init() creates the cache object for emd()
* - emd_cache_fini() destroys the cache object for emd()
*
* @section s3 Building
*
* Normally, the library is built with setup.py as a part of the python package.
* Besides, it can be built with cmake. In the latter case, ensure that you've
* cloned or-tools submodule:
* @code{.unparsed}
* git submodule update --init
* @endcode
*/
namespace {
const int64_t MASS_MULT = 1000 * 1000 * 1000; // weights quantization constant
const int64_t COST_MULT = 1000 * 1000; // costs quantization constant
/// The cache for emd().
class EMDCache : public wmd::Cache {
public:
bool* side() const noexcept {
return side_.get();
}
int64_t* demand() const noexcept {
return demand_.get();
}
int64_t* cost() const noexcept {
return cost_.get();
}
size_t get_size() const noexcept {
return size_;
}
operations_research::SimpleMinCostFlow& min_cost_flow() const noexcept {
return min_cost_flow_;
}
protected:
void _allocate() override {
side_.reset(new bool[size_]);
demand_.reset(new int64_t[size_]);
cost_.reset(new int64_t[size_ * size_]);
// warmup min_cost_flow_
for (size_t i = 0; i < size_; i++) {
for (size_t j = 0; j < size_; j++) {
min_cost_flow_.AddArcWithCapacityAndUnitCost(i, j, 1, 1);
}
}
for (size_t i = 0; i < size_; i++) {
min_cost_flow_.SetNodeSupply(i, 1);
}
min_cost_flow_.Reset();
}
void _reset() noexcept override {
side_.reset();
demand_.reset();
cost_.reset();
min_cost_flow_.Reset();
}
private:
mutable std::unique_ptr<bool[]> side_;
mutable std::unique_ptr<int64_t[]> demand_;
mutable std::unique_ptr<int64_t[]> cost_;
mutable operations_research::SimpleMinCostFlow min_cost_flow_;
mutable std::mutex lock_;
};
/// Used by emd() to convert the problem to min cost flow.
template <typename T>
void convert_weights(const T*__restrict__ in, bool sign,
int64_t*__restrict__ out, size_t size) {
assert(in && out);
assert(size > 0);
int64_t sum = 0;
double old_s = 0, new_s = 0;
double mult = (sign ? -1 : 1);
#pragma omp simd
for (size_t i = 0; i < size; i++) {
old_s = new_s;
new_s = old_s + in[i];
int64_t w = round(new_s * MASS_MULT) - round(old_s * MASS_MULT);
sum += w;
out[i] += w * mult;
}
if (sum != MASS_MULT) {
if (fabs(sum - MASS_MULT + 0.) / MASS_MULT > 0.000001) {
#ifndef NDEBUG
assert(sum == MASS_MULT && "Masses on one side not sufficiently normalized.");
#else
fprintf(stderr,
"wmd: weights are not normalized: %" PRId64 " != %" PRId64 "\n",
sum, MASS_MULT);
#endif
} else {
// compensate for the rounding error
out[0] += (sign ? 1 : -1) * (sum - MASS_MULT);
}
}
}
/// Used by emd() to convert the problem to min cost flow.
template <typename T>
void convert_costs(const T*__restrict__ in, const bool*__restrict__ side,
int64_t*__restrict__ out, size_t size) {
#pragma omp simd
for (size_t i = 0; i < size; i++) {
for (size_t j = 0; j < size; j++) {
out[i * size + j] = round(in[i * size + j] * COST_MULT);
}
}
#pragma omp simd
for (size_t i = 0; i < size; i++) {
for (size_t j = 0; j < size; j++) {
if (side[i] && !side[j]) {
out[i * size + j] = -out[j * size + i];
}
}
}
}
} // namespace
/// Solves the exact EMD problem. Internally, it converts the conditions to
/// a min cost flow statement and calls operations_research::SimpleMinCostFlow.
/// @param w1 The first array with weights of length `size`.
/// @param w2 The second array with weights of length `size`.
/// @param dist The costs matrix of shape `size` x `size`.
/// @param size The dimensionality of the problem.
/// @param cache The cache to use. It should be initialized with at least `size`
/// elements.
/// @author Wojciech Jabłoński <wj359634@students.mimuw.edu.pl>
template <typename T>
T emd(const T*__restrict__ w1, const T*__restrict__ w2,
const T*__restrict__ dist, uint32_t size, const EMDCache& cache) {
assert(w1 && w2 && dist);
assert(size > 0);
std::lock_guard<std::mutex> _(cache.enter(size));
bool* side = cache.side();
int64_t* demand = cache.demand();
int64_t* cost = cache.cost();
memset(demand, 0, size * sizeof(demand[0]));
convert_weights(w1, false, demand, size);
convert_weights(w2, true, demand, size);
#pragma omp simd
for (size_t i = 0; i < size; i++) {
side[i] = (demand[i] < 0);
}
convert_costs(dist, side, cost, size);
auto& min_cost_flow = cache.min_cost_flow();
for (size_t i = 0; i < size; i++) {
for (size_t j = 0; j < size; j++) {
if (!side[i] && side[j]) {
min_cost_flow.AddArcWithCapacityAndUnitCost(
i, j, std::min(demand[i], -demand[j]), cost[i * size + j]);
}
}
}
for (size_t i = 0; i < size; i++) {
min_cost_flow.SetNodeSupply(i, demand[i]);
}
auto status = min_cost_flow.Solve();
double result = min_cost_flow.OptimalCost();
min_cost_flow.Reset();
#ifndef NDEBUG
assert(status == operations_research::SimpleMinCostFlow::OPTIMAL);
#else
if (status != operations_research::SimpleMinCostFlow::OPTIMAL) {
fprintf(stderr, "wmd: status is %d\n", status);
return -status;
}
#endif
return T((result / MASS_MULT) / COST_MULT);
}
|
utils.c | #include "utils.h"
// ISSO É O SONHO
void swap(double *a, double *b) {
double c = *a;
*a = *b;
*b = c;
}
void merge_sort_threaded(double *vec, size_t arr_size, int num_threads) {
// partition_task_t task;
// task.vec = vec;
// task.number = 0;
// task.mythreads = num_threads;
// task.start = 0;
// task.end = arr_size;
// partition_worker((void *)&task);
partition_threaded(vec, num_threads, 0, arr_size);
}
void partition_threaded(double *vec, int threads, int start, int end) {
int middle = (start + end) / 2;
if (end - start > 1) {
if (threads > 1) {
omp_set_nested(1);
#pragma omp parallel sections
{
omp_set_num_threads(2);
#pragma omp section
partition_threaded(vec, threads / 2, start, middle);
#pragma omp section
partition_threaded(vec, threads - (threads / 2), middle, end);
}
} else {
partition(vec, start, middle);
partition(vec, middle, end);
}
}
merge(vec, start, middle, end);
}
void *partition_worker(void *task) {
partition_task_t *mytask = (partition_task_t *)task;
partition_task_t tasks[2];
int middle = (mytask->start + mytask->end) / 2;
if (mytask->end - mytask->start > 1) {
if (mytask->mythreads > 1) {
tasks[0].vec = mytask->vec;
tasks[0].number = mytask->number * 2 + 1;
tasks[0].mythreads = mytask->mythreads / 2;
tasks[0].start = mytask->start;
tasks[0].end = middle;
tasks[1].vec = mytask->vec;
tasks[1].number = mytask->number * 2 + 2;
tasks[1].mythreads = (mytask->mythreads / 2) + (mytask->mythreads % 2);
tasks[1].start = middle;
tasks[1].end = mytask->end;
#pragma omp parallel sections
{
#pragma omp section
{ partition_worker(&tasks[0]); }
#pragma omp section
{ partition_worker(&tasks[1]); }
}
} else {
// fprintf(stderr, "OI3 %d %d\n", mytask->start, mytask->end);
partition(mytask->vec, mytask->start, middle);
partition(mytask->vec, middle, mytask->end);
}
}
merge(mytask->vec, mytask->start, middle, mytask->end);
return NULL;
}
void merge_sort(double *vec, size_t arr_size) { partition(vec, 0, arr_size); }
void partition(double *vec, int start, int end) {
int middle = (start + end) / 2;
if (end - start > 1) {
partition(vec, start, middle);
partition(vec, middle, end);
}
merge(vec, start, middle, end);
}
void merge(double *vec, int start, int middle, int end) {
double *lv = (double *)malloc(sizeof(double) * (middle - start));
memcpy(lv, &vec[start], sizeof(double) * (middle - start));
double *rv = (double *)malloc(sizeof(double) * (end - middle));
memcpy(rv, &vec[middle], sizeof(double) * (end - middle));
int l = 0, ll = middle - start;
int r = 0, rr = end - middle;
int k = start;
while (l < ll && r < rr) {
if (lv[l] < rv[r]) {
vec[k++] = lv[l++];
} else {
vec[k++] = rv[r++];
}
}
while (l < ll) {
vec[k++] = lv[l++];
}
while (r < rr) {
vec[k++] = rv[r++];
}
free(lv);
free(rv);
}
void bubble_sort(double *vec, size_t arr_size) {
int i, k;
for (k = 0; k < (int)arr_size; k++) {
for (i = 0; i < (int)arr_size - 1; i++) {
if (vec[i] > vec[i + 1]) {
double c = vec[i];
vec[i] = vec[i + 1];
vec[i + 1] = c;
// swap(&vec[i], &vec[i+1]);
}
}
}
}
|
test-math-vector-sincos.h | /* Wrappers definitions for tests of ABI of vector sincos/sincosf having
vector declaration "#pragma omp declare simd notinbranch".
Copyright (C) 2016-2020 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#define INIT_VEC_PTRS_LOOP(vec, val, len) \
do \
{ \
union { VEC_INT_TYPE v; __typeof__ ((val)[0]) *a[(len)]; } u; \
for (i = 0; i < len; i++) \
u.a[i] = &(val)[i]; \
(vec) = u.v; \
} \
while (0)
/* Wrapper for vector sincos/sincosf compatible with x86_64 and x32 variants
of _ZGVbN2vvv_sincos, _ZGVdN4vvv_sincos, _ZGVeN8vvv_sincos;
x32 variants of _ZGVbN4vvv_sincosf, _ZGVcN4vvv_sincos, _ZGVdN8vvv_sincosf,
_ZGVeN16vvv_sincosf. */
#define VECTOR_WRAPPER_fFF_2(scalar_func, vector_func) \
extern void vector_func (VEC_TYPE, VEC_INT_TYPE, VEC_INT_TYPE); \
void scalar_func (FLOAT x, FLOAT * r, FLOAT * r1) \
{ \
int i; \
FLOAT r_loc[VEC_LEN], r1_loc[VEC_LEN]; \
VEC_TYPE mx; \
VEC_INT_TYPE mr, mr1; \
INIT_VEC_LOOP (mx, x, VEC_LEN); \
INIT_VEC_PTRS_LOOP (mr, r_loc, VEC_LEN); \
INIT_VEC_PTRS_LOOP (mr1, r1_loc, VEC_LEN); \
vector_func (mx, mr, mr1); \
TEST_VEC_LOOP (r_loc, VEC_LEN); \
TEST_VEC_LOOP (r1_loc, VEC_LEN); \
*r = r_loc[0]; \
*r1 = r1_loc[0]; \
return; \
}
/* Wrapper for vector sincos/sincosf compatible with x86_64 variants of
_ZGVcN4vvv_sincos, _ZGVeN16vvv_sincosf, _ZGVbN4vvv_sincosf,
_ZGVdN8vvv_sincosf, _ZGVcN8vvv_sincosf. */
#define VECTOR_WRAPPER_fFF_3(scalar_func, vector_func) \
extern void vector_func (VEC_TYPE, VEC_INT_TYPE, VEC_INT_TYPE, \
VEC_INT_TYPE, VEC_INT_TYPE); \
void scalar_func (FLOAT x, FLOAT * r, FLOAT * r1) \
{ \
int i; \
FLOAT r_loc[VEC_LEN/2], r1_loc[VEC_LEN/2]; \
VEC_TYPE mx; \
VEC_INT_TYPE mr, mr1; \
INIT_VEC_LOOP (mx, x, VEC_LEN); \
INIT_VEC_PTRS_LOOP (mr, r_loc, VEC_LEN/2); \
INIT_VEC_PTRS_LOOP (mr1, r1_loc, VEC_LEN/2); \
vector_func (mx, mr, mr, mr1, mr1); \
TEST_VEC_LOOP (r_loc, VEC_LEN/2); \
TEST_VEC_LOOP (r1_loc, VEC_LEN/2); \
*r = r_loc[0]; \
*r1 = r1_loc[0]; \
return; \
}
/* Wrapper for vector sincosf compatible with x86_64 variant of
_ZGVcN8vvv_sincosf. */
#define VECTOR_WRAPPER_fFF_4(scalar_func, vector_func) \
extern void vector_func (VEC_TYPE, VEC_INT_TYPE, VEC_INT_TYPE, \
VEC_INT_TYPE, VEC_INT_TYPE, \
VEC_INT_TYPE, VEC_INT_TYPE, \
VEC_INT_TYPE, VEC_INT_TYPE); \
void scalar_func (FLOAT x, FLOAT * r, FLOAT * r1) \
{ \
int i; \
FLOAT r_loc[VEC_LEN/4], r1_loc[VEC_LEN/4]; \
VEC_TYPE mx; \
VEC_INT_TYPE mr, mr1; \
INIT_VEC_LOOP (mx, x, VEC_LEN); \
INIT_VEC_PTRS_LOOP (mr, r_loc, VEC_LEN/4); \
INIT_VEC_PTRS_LOOP (mr1, r1_loc, VEC_LEN/4); \
vector_func (mx, mr, mr, mr, mr, mr1, mr1, mr1, mr1); \
TEST_VEC_LOOP (r_loc, VEC_LEN/4); \
TEST_VEC_LOOP (r1_loc, VEC_LEN/4); \
*r = r_loc[0]; \
*r1 = r1_loc[0]; \
return; \
}
|
proj.c | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <sched.h>
#include <omp.h>
#include "allocate.h"
#include "data.h"
#include "prepro.h"
#include "proj.h"
void createSinogram(struct Sinogram *sinogram)
{
sinogram->sino = (ENTRY *)get_spc((sinogram->geom_info.Nr)*(sinogram->geom_info.Nc)*(sinogram->geom_info.Nv), sizeof(ENTRY));
}
void freeSinogram(struct Sinogram *sinogram)
{
free(sinogram->sino);
}
void createImage(struct Image *image)
{
image->img = (ENTRY *)get_spc((image->img_info.Nx)*(image->img_info.Ny)*(image->img_info.Nz), sizeof(ENTRY));
}
void freeImage(struct Image *image)
{
free(image->img);
}
void fillGeomInfo(struct GeomInfo *geom_info) /* fill in the intermediate variables */
{
geom_info->alphac0 = -(geom_info->Nc-1.0)*(geom_info->Del_alphac)/2.0 + geom_info->del_alphac;
geom_info->Del_zs = (geom_info->u)*(geom_info->Del_dr)*(geom_info->Del_beta)/(2.0*PI);
geom_info->detc = (geom_info->Nc)*(geom_info->Del_alphac);
geom_info->detr = (geom_info->Nr)*(geom_info->Del_dr);
geom_info->half_detr = (geom_info->Nr-1.0)*(geom_info->Del_dr)/2.0;
geom_info->cone_zbuffer= geom_info->detr*(geom_info->r_si+geom_info->fov/2.0)/(2.0*geom_info->r_sd);
}
void fillImgInfo(struct ImgInfo *img_info) /* fill in the intermediate variables */
{
img_info->x0 = img_info->xc - (img_info->Del_xy)*(img_info->Nx-1)/2.0;
img_info->y0 = img_info->yc - (img_info->Del_xy)*(img_info->Ny-1)/2.0;
img_info->z0 = img_info->zc - (img_info->Del_z)*(img_info->Nz-1)/2.0;
}
/* sjk: check information for consistency */
void checkInfo(struct GeomInfo *geom_info, struct ImgInfo *img_info)
{
float x,y,dist;
/* find pixel distance furthest from iso (just check corners) */
x = img_info->x0 - img_info->Del_xy/2.0;
y = img_info->y0 - img_info->Del_xy/2.0;
dist = sqrt(x*x + y*y);
x = img_info->x0 - img_info->Del_xy/2.0 + img_info->Nx;
y = img_info->y0 - img_info->Del_xy/2.0;
dist = max(dist,sqrt(x*x+y*y));
x = img_info->x0 - img_info->Del_xy/2.0;
y = img_info->y0 - img_info->Del_xy/2.0 + img_info->Ny;
dist = max(dist,sqrt(x*x+y*y));
x = img_info->x0 - img_info->Del_xy/2.0 + img_info->Nx;
y = img_info->y0 - img_info->Del_xy/2.0 + img_info->Ny;
dist = max(dist,sqrt(x*x+y*y));
if(dist>geom_info->r_si) {
fprintf(stdout,"ERROR: You have pixels outside the source radius.");
exit(-1);
}
}
void createSourceLocInfo(
struct SourceLocInfo *source_loc_info,
struct GeomInfo *geom_info)
{
source_loc_info->beta = (ENTRY *)get_spc(geom_info->Nv, sizeof(ENTRY));
source_loc_info->xs = (ENTRY *)get_spc(geom_info->Nv, sizeof(ENTRY));
source_loc_info->ys = (ENTRY *)get_spc(geom_info->Nv, sizeof(ENTRY));
source_loc_info->zs = (ENTRY *)get_spc(geom_info->Nv, sizeof(ENTRY));
}
void compSourceLocInfo(
struct SourceLocInfo *source_loc_info,
struct GeomInfo *geom_info)
{
int iv; /* view index */
for (iv = 0; iv < geom_info->Nv; iv++)
{
source_loc_info->beta[iv] = geom_info->beta0 + iv*(geom_info->Del_beta);
source_loc_info->xs[iv] = (geom_info->r_si)*cos(source_loc_info->beta[iv]);
source_loc_info->ys[iv] = (geom_info->r_si)*sin(source_loc_info->beta[iv]);
source_loc_info->zs[iv] = iv*(geom_info->Del_zs); /* assume z start from 0 */
}
}
void freeSourceLocInfo(struct SourceLocInfo *source_loc_info)
{
free(source_loc_info->beta);
free(source_loc_info->xs);
free(source_loc_info->ys);
free(source_loc_info->zs);
}
void createViewXYInfo(
struct ViewXYInfo *view_xy_info,
struct GeomInfo *geom_info)
{
view_xy_info->ic_start = (CHANNEL *)get_spc(geom_info->Nv, sizeof(CHANNEL));
view_xy_info->ic_num = (PROCHANNEL *)get_spc(geom_info->Nv, sizeof(PROCHANNEL));
view_xy_info->Mag = (ENTRY *)get_spc(geom_info->Nv, sizeof(ENTRY));
view_xy_info->Wr = (ENTRY *)get_spc(geom_info->Nv, sizeof(ENTRY));
view_xy_info->B = (ENTRY **)get_spc(geom_info->Nv, sizeof(ENTRY *));
}
void compViewXYInfo(
float x,
float y,
struct ViewXYInfo *view_xy_info,
struct GeomInfo *geom_info,
struct ImgInfo *img_info,
struct SourceLocInfo *source_loc_info)
{
int iv; /* view index */
float theta; /* slope of the ray */
float theta_td; /* adjusted theta, in [-PI/4, PI/4] */
float costh; /* cos(theta_td) */
float alphaj; /* voxel angle relative to ray through iso */
float alphaj_td; /* alphaj_td = (alphaj + PI) mod 2*PI - PI */
float alpha_min;
float alpha_max;
float r_sv; /* source to voxel */
float Wc; /* projection angle width */
float del_c; /* angle between ray through voxel center and ray through detector center */
float Bij;
CHANNEL ic_end;
PROCHANNEL p;
int ic; /* channel index */
for (iv = 0; iv < geom_info->Nv; iv++)
{
theta = atan2((source_loc_info->ys[iv]-y), (source_loc_info->xs[iv]-x));
if (theta >= -PI/4.0)
{
theta_td = fmod((theta + PI/4.0), (PI/2.0)) - (PI/4.0);
}
else
{
theta_td = fmod((theta + PI/4.0), (PI/2.0)) + (PI/4.0);
}
costh = cos(theta_td);
alphaj = theta - source_loc_info->beta[iv];
alphaj_td = adjust(alphaj);
r_sv = sqrt((source_loc_info->xs[iv]-x)*(source_loc_info->xs[iv]-x) + (source_loc_info->ys[iv]-y)*(source_loc_info->ys[iv]-y));
view_xy_info->Mag[iv] = (geom_info->r_sd)/r_sv;
view_xy_info->Wr[iv] = (img_info->Del_z)*(view_xy_info->Mag[iv]);
Wc = (img_info->Del_xy)*costh/r_sv;
alpha_min = alphaj_td - geom_info->alphac0 - (Wc - geom_info->Del_alphac)/2.0;
alpha_max = alphaj_td - geom_info->alphac0 + (Wc + geom_info->Del_alphac)/2.0;
if (alpha_max < 0 || alpha_min > geom_info->detc)
{
view_xy_info->ic_num[iv] = 0;
}
else
{
view_xy_info->ic_start[iv] = (CHANNEL)max((CHANNEL)floor(alpha_min/(geom_info->Del_alphac)), 0);
ic_end = (CHANNEL)min((CHANNEL)floor(alpha_max/(geom_info->Del_alphac)), (CHANNEL)(geom_info->Nc-1));
view_xy_info->ic_num[iv] = ((PROCHANNEL)(ic_end - view_xy_info->ic_start[iv] + 1));
}
view_xy_info->B[iv] = (ENTRY *)get_spc((int)(view_xy_info->ic_num[iv]), sizeof(ENTRY));
for (p = 0; p < view_xy_info->ic_num[iv]; p++)
{
ic = (int)(view_xy_info->ic_start[iv] + p);
del_c = adjust(alphaj - (ic*(geom_info->Del_alphac) + geom_info->alphac0));
Bij = clip(0.0, ((Wc+(geom_info->Del_alphac))/2.0)-fabs(del_c), min(Wc, (geom_info->Del_alphac)));
Bij *= ((img_info->Del_xy)/((geom_info->Del_alphac)*costh));
view_xy_info->B[iv][p] = Bij;
}
}
}
void freeViewXYInfoB(
struct ViewXYInfo *view_xy_info,
struct GeomInfo *geom_info)
{
int iv; /* view index */
for (iv = 0; iv < geom_info->Nv; iv++)
{
free(view_xy_info->B[iv]);
}
}
void freeViewXYInfo(
struct ViewXYInfo *view_xy_info,
struct GeomInfo *geom_info)
{
free(view_xy_info->ic_start);
free(view_xy_info->ic_num);
free(view_xy_info->Mag);
free(view_xy_info->Wr);
free(view_xy_info->B);
}
void createViewXYZInfo(
struct ViewXYZInfo *view_xyz_info,
struct GeomInfo *geom_info)
{
view_xyz_info->ir_start = (ROW *)get_spc(geom_info->Nv, sizeof(ROW));
view_xyz_info->ir_num = (PROROW *)get_spc(geom_info->Nv, sizeof(PROROW));
}
void compViewXYZInfo(
float z,
struct ViewXYZInfo *view_xyz_info,
struct GeomInfo *geom_info,
struct ImgInfo *img_info,
struct SourceLocInfo *source_loc_info,
struct ViewXYInfo *view_xy_info)
{
int iv; /* view index */
float d;
float d_min;
float d_max;
ROW ir_end;
int iv_end; /* sjk */
if(0) /* sjk: this block is replaced with the lines following */
{
/* calculate d */
d = geom_info->Nr*geom_info->Del_dr*(geom_info->r_si+geom_info->fov/2.0)/(2.0*geom_info->r_sd);
if (z < source_loc_info->zs[0]-d || z > source_loc_info->zs[geom_info->Nv-1]+d)
{
view_xyz_info->iv_num = 0;
}
else
{
for (iv = 0; iv < geom_info->Nv; iv++)
{
if (z >= source_loc_info->zs[iv]-d && z <= source_loc_info->zs[iv]+d)
{
view_xyz_info->iv_start = iv;
break;
}
}
view_xyz_info->iv_num = 0;
for (iv = 0; iv < geom_info->Nv; iv++)
{
if (z >= source_loc_info->zs[iv]-d && z <= source_loc_info->zs[iv]+d)
{
view_xyz_info->iv_num++;
}
}
/*for (; z >= source_loc_info->zs[iv]-d && z <= source_loc_info->zs[iv]+d; iv++)
{
view_xyz_info->iv_num++;
}*/
}
}
/* sjk: this block replaces the above, finding the iv range in closed form */
view_xyz_info->iv_start=max(0,(int)floor((z-img_info->Del_z/2.0-geom_info->cone_zbuffer)/geom_info->Del_zs));
iv_end = min(geom_info->Nv-1,(int)ceil((z+img_info->Del_z/2.0+geom_info->cone_zbuffer)/geom_info->Del_zs));
if((iv_end<0) || (view_xyz_info->iv_start > (geom_info->Nv-1)))
view_xyz_info->iv_num=0;
else
view_xyz_info->iv_num= iv_end - view_xyz_info->iv_start + 1;
/* sjk: moved this block down so that we don't have to go through all views */
for (iv = view_xyz_info->iv_start; iv <= iv_end; iv++) /* sjk */
{
d_min = (geom_info->half_detr+geom_info->Del_dr/2.0-view_xy_info->Mag[iv]*(z-source_loc_info->zs[iv]+img_info->Del_z/2.0));
d_max = (geom_info->half_detr+geom_info->Del_dr/2.0-view_xy_info->Mag[iv]*(z-source_loc_info->zs[iv]-img_info->Del_z/2.0));
if (d_max < 0 || d_min > geom_info->detr)
{
view_xyz_info->ir_num[iv] = 0;
}
else
{
view_xyz_info->ir_start[iv] = (ROW)max((ROW)floor(d_min/(geom_info->Del_dr)), 0);
ir_end = (ROW)min((ROW)floor(d_max/(geom_info->Del_dr)), (ROW)(geom_info->Nr-1));
view_xyz_info->ir_num[iv] = ((PROROW)(ir_end - view_xyz_info->ir_start[iv] + 1));
}
}
}
void freeViewXYZInfo(struct ViewXYZInfo *view_xyz_info)
{
free(view_xyz_info->ir_start);
free(view_xyz_info->ir_num);
}
void createACol(struct ACol *A_col, int length)
{
A_col->index = (int *)get_spc(length, sizeof(int));
A_col->val = (ENTRY *)get_spc(length, sizeof(ENTRY));
A_col->array_length = length; /* sjk */
}
/* sjk */
void increaseAColLength(struct ACol *A_col) /* increase array length by 10% */
{
int *index,i,new_length;
ENTRY *val;
new_length=1.1*A_col->array_length;
index = (int *)get_spc(new_length,sizeof(int)); /* create new array */
for(i=0;i<A_col->array_length;i++) /* copy old into new array */
index[i] = A_col->index[i];
free(A_col->index); /* free old array */
A_col->index = index; /* point to new array */
val = (ENTRY *)get_spc(new_length,sizeof(ENTRY));
for(i=0;i<A_col->array_length;i++)
val[i] = A_col->val[i];
free(A_col->val);
A_col->val= val;
A_col->array_length= new_length;
}
void freeACol(struct ACol *A_col)
{
free(A_col->index);
free(A_col->val);
}
void compAColxyz(
int jjx,
int jjy,
int jjz,
struct GeomInfo *geom_info,
struct ImgInfo *img_info,
struct ACol *col_xyz)
{
static int first = 1;
static struct ACol A_col;
static struct ACol *A_col_pt;
static struct ACol ***A_array; /* store the whole A matrix, each column is A[jx][jy][jz] */
int jx, jy, jz; /* voxel indicies */
int iv, ic, ir, l, p, q; /* detector indicies */
int r;
float x, y, z; /* voxel coordinate */
float del_r;
float Atmp, Cij;
struct SourceLocInfo source_loc_info;
struct ViewXYInfo view_xy_info;
struct ViewXYZInfo view_xyz_info;
if (first == 1) /* if the function is first called, precompute the whole A matrix and store it */
{
first = 0;
A_array = (struct ACol ***)multialloc(sizeof(struct ACol), 3, img_info->Nx, img_info->Ny, img_info->Nz);/* no allocation for arrays */
createACol(&A_col, COL_LEN); /* TODO, COL_LEN hard-coded */
/* allocate precomputing structure */
createSourceLocInfo(&source_loc_info, geom_info);
compSourceLocInfo(&source_loc_info, geom_info); /* populate SourceLocInfo structure */
createViewXYInfo(&view_xy_info, geom_info);
createViewXYZInfo(&view_xyz_info, geom_info);
fprintf(stdout, "\ninitializing A matrix ...\n");
for (jx = 0 ; jx < img_info->Nx; jx++)
{
x = img_info->x0 + jx*img_info->Del_xy;
for (jy = 0; jy < img_info->Ny; jy++)
{
y = img_info->y0 + jy*img_info->Del_xy;
compViewXYInfo(x, y, &view_xy_info, geom_info, img_info, &source_loc_info); /* populate ViewXYInfo structure */
for (jz = 0; jz < img_info->Nz; jz++)
{
z = img_info->z0 + jz*img_info->Del_z;
compViewXYZInfo(z, &view_xyz_info, geom_info, img_info, &source_loc_info, &view_xy_info); /* poplutate ViewXYZInfo structure */
A_col.n_index = 0;
for (l = 0; l < view_xyz_info.iv_num; l++) /* view loop */
{
iv = view_xyz_info.iv_start + l;
for (p = 0; p < view_xy_info.ic_num[iv]; p++) /* channel loop */
{
ic = view_xy_info.ic_start[iv] + p;
for (q = 0; q < view_xyz_info.ir_num[iv]; q++) /* row loop */
{
ir = view_xyz_info.ir_start[iv] + q;
/* ATTENTION! CHANGE SIGN HERE ROW 0 IS CLOSEST !! */
del_r = view_xy_info.Mag[iv]*(z-source_loc_info.zs[iv]) + ir*geom_info->Del_dr - geom_info->half_detr;
Cij = clip(0.0, ((view_xy_info.Wr[iv]+geom_info->Del_dr)/2.0)-fabs(del_r), min(view_xy_info.Wr[iv], geom_info->Del_dr)); /* sjk: fixed typo */
Cij *= (sqrt((source_loc_info.xs[iv]-x)*(source_loc_info.xs[iv]-x)+(source_loc_info.ys[iv]-y)*(source_loc_info.ys[iv]-y)+(source_loc_info.zs[iv]-z)*(source_loc_info.zs[iv]-z)));
Cij /= (sqrt((source_loc_info.xs[iv]-x)*(source_loc_info.xs[iv]-x) +(source_loc_info.ys[iv]-y)*(source_loc_info.ys[iv]-y))*geom_info->Del_dr);
Atmp = view_xy_info.B[iv][p]*Cij;
if (Atmp > EPSILON) /* if nonzero entry */
{
A_col.index[A_col.n_index] = iv*geom_info->Nc*geom_info->Nr + ic*geom_info->Nr + ir;
A_col.val[A_col.n_index] = Atmp;
A_col.n_index++;
}
}
}
}
/* here we finish computing one column of A for a specific (x,y,z) voxel */
/* store it in A_array */
A_array[jx][jy][jz].index = (int *)get_spc(A_col.n_index, sizeof(int));
A_array[jx][jy][jz].val = (ENTRY *)get_spc(A_col.n_index, sizeof(ENTRY));
A_array[jx][jy][jz].n_index = A_col.n_index;
for (r = 0; r < A_col.n_index; r++)
{
A_array[jx][jy][jz].index[r] = (int)A_col.index[r];
A_array[jx][jy][jz].val[r] = (ENTRY)A_col.val[r];
}
}
freeViewXYInfoB(&view_xy_info, geom_info);
}
}
freeViewXYZInfo(&view_xyz_info);
freeViewXYInfo(&view_xy_info, geom_info);
freeSourceLocInfo(&source_loc_info);
fprintf(stdout, "finish computing A matrix!\n");
}
A_col_pt = &A_array[jjx][jjy][jjz];
col_xyz->n_index = A_col_pt->n_index;
for (r = 0; r < A_col_pt->n_index; r++)
{
col_xyz->index[r] = A_col_pt->index[r];
col_xyz->val[r] = A_col_pt->val[r];
}
}
void compAColxyzOnFly(
float x,
float y,
float z,
struct GeomInfo *geom_info,
struct SourceLocInfo *source_loc_info,
struct ViewXYInfo *view_xy_info,
struct ViewXYZInfo *view_xyz_info,
struct ACol *A_col)
{
int iv, ic, ir, l, p, q;
float del_r;
float Cij;
float Atmp;
A_col->n_index = 0;
for (l = 0; l < view_xyz_info->iv_num; l++) /* view loop */
{
iv = view_xyz_info->iv_start + l;
for (p = 0; p < view_xy_info->ic_num[iv]; p++) /* channel loop */
{
ic = view_xy_info->ic_start[iv] + p;
for (q = 0; q < view_xyz_info->ir_num[iv]; q++) /* row loop */
{
ir = view_xyz_info->ir_start[iv] + q;
/* ATTENTION!! CHANGE SIGN HERE! ROW 0 IS CLOSEST */
del_r = view_xy_info->Mag[iv]*(z-source_loc_info->zs[iv]) + ir*geom_info->Del_dr - geom_info->half_detr;
Cij = clip(0.0, (((view_xy_info->Wr[iv]+geom_info->Del_dr)/2.0)-fabs(del_r)), min(view_xy_info->Wr[iv], geom_info->Del_dr));
Cij *= (sqrt((source_loc_info->xs[iv]-x)*(source_loc_info->xs[iv]-x) + (source_loc_info->ys[iv]-y)*(source_loc_info->ys[iv]-y) + (source_loc_info->zs[iv]-z)*(source_loc_info->zs[iv]-z)));
Cij /= (sqrt((source_loc_info->xs[iv]-x)*(source_loc_info->xs[iv]-x) + (source_loc_info->ys[iv]-y)*(source_loc_info->ys[iv]-y))*geom_info->Del_dr);
Atmp = view_xy_info->B[iv][p]*Cij;
if (Atmp > EPSILON) /* non-zero entry */
{
A_col->index[A_col->n_index] = iv*geom_info->Nc*geom_info->Nr + ic*geom_info->Nr + ir;
A_col->val[A_col->n_index] = Atmp;
A_col->n_index++;
/* sjk: */
if(A_col->n_index >= A_col->array_length)
{
fprintf(stdout,"Increasing size of A column by 10%% (%d).\n",(int)1.1*A_col->array_length);
increaseAColLength(A_col);
}
}
}
}
}
}
void forwardProject(ENTRY *AX, ENTRY *X, unsigned short *AX_mask, char **recon_mask, struct GeomInfo *geom_info, struct ImgInfo *img_info)
{
int i, t;
struct SourceLocInfo source_loc_info;
/* initialize projection */
for (i = 0; i < (geom_info->Nr)*(geom_info->Nc)*(geom_info->Nv); i++)
{
AX[i] = 0.0;
}
if(AX_mask != NULL)
{
for (i = 0; i < (geom_info->Nr)*(geom_info->Nc)*(geom_info->Nv); i++)
AX_mask[i] = 0;
}
/* allocate precomputing structures */
createSourceLocInfo(&source_loc_info, geom_info);
compSourceLocInfo(&source_loc_info, geom_info);
/* for each voxel in the image */
fprintf(stdout, "\nforward projecting (parallelized version) ...\n");
#pragma omp parallel
{
paraForwardProject(geom_info,img_info,&source_loc_info,X,AX,AX_mask,recon_mask);
}
fprintf(stdout, "\nfinish forward projection!\n");
freeSourceLocInfo(&source_loc_info);
}
void paraForwardProject(struct GeomInfo *geom_info,struct ImgInfo *img_info,struct SourceLocInfo *source_loc_info,ENTRY *X,ENTRY *AX,unsigned short *AX_mask,char **recon_mask)
{
int tid = omp_get_thread_num();
int indx,offset1,offset2; /* sjk */
int jx, jy, jz, jzmax, Nyz, r;
float x, y, z;
struct ACol col_xyz;
struct ViewXYInfo view_xy_info;
struct ViewXYZInfo view_xyz_info;
createACol(&col_xyz, COL_LEN); /* TODO COL_LEN hard-coded */
createViewXYInfo(&view_xy_info, geom_info);
createViewXYZInfo(&view_xyz_info, geom_info);
fprintf(stdout,"tid is %d omp_get_num_threads() %d\n",tid,omp_get_num_threads());
jzmax = (tid+1)*img_info->Nz/omp_get_num_threads();
if (tid == (omp_get_num_threads()-1))
{
jzmax = img_info->Nz;
}
Nyz = img_info->Ny*img_info->Nz;
for (jx = 0; jx < img_info->Nx; jx++)
{
x = img_info->x0 + jx*img_info->Del_xy;
offset1=jx*Nyz;
for (jy = 0; jy < img_info->Ny; jy++)
{
if(recon_mask[jx][jy])
/* sjk: everything outside mask has been set to 0, and never updated */
{
y = img_info->y0 + jy*img_info->Del_xy;
compViewXYInfo(x, y, &view_xy_info, geom_info, img_info, source_loc_info);
offset2=offset1+jy*img_info->Nz;
for (jz = tid*img_info->Nz/omp_get_num_threads(); jz < jzmax; jz++)
{
indx=offset2+jz;
if(X[indx]>0)
{
z = img_info->z0 + jz*img_info->Del_z;
compViewXYZInfo(z, &view_xyz_info, geom_info, img_info, source_loc_info, &view_xy_info);
compAColxyzOnFly(x, y, z, geom_info, source_loc_info, &view_xy_info, &view_xyz_info, &col_xyz);
#pragma omp critical
{
for (r = 0; r < col_xyz.n_index; r++)
{
AX[col_xyz.index[r]] += col_xyz.val[r]*X[indx];
if(AX_mask != NULL)
{
if(X[indx]>hu2miu(3000,MIU_AIR,MIU_WATER))
AX_mask[col_xyz.index[r]] += 1;
}
}
}
}
}
freeViewXYInfoB(&view_xy_info, geom_info);
}
}
}
freeViewXYZInfo(&view_xyz_info);
freeViewXYInfo(&view_xy_info, geom_info);
freeACol(&col_xyz);
}
void serialForwardProject(ENTRY *AX, ENTRY *X, struct GeomInfo *geom_info, struct ImgInfo *img_info)
{
int i, jx, jy, jz, r, Nyz;
float x, y, z;
struct SourceLocInfo source_loc_info;
struct ViewXYInfo view_xy_info;
struct ViewXYZInfo view_xyz_info;
struct ACol col_xyz;
/* initialize projection */
for (i = 0; i < (geom_info->Nr)*(geom_info->Nc)*(geom_info->Nv); i++)
{
AX[i] = 0.0;
}
/* allocate precomputing structures */
createACol(&col_xyz, COL_LEN); /* TODO COL_LEN hard-coded */
createSourceLocInfo(&source_loc_info, geom_info);
compSourceLocInfo(&source_loc_info, geom_info);
createViewXYInfo(&view_xy_info, geom_info);
createViewXYZInfo(&view_xyz_info, geom_info);
Nyz = img_info->Ny*img_info->Nz;
/* for each voxel in the image */
fprintf(stdout, "\nforward projecting (serial version) ...\n");
for (jx = 0; jx < img_info->Nx; jx++)
{
x = img_info->x0 + jx*img_info->Del_xy;
for (jy = 0; jy < img_info->Ny; jy++)
{
y = img_info->y0 + jy*img_info->Del_xy;
compViewXYInfo(x, y, &view_xy_info, geom_info, img_info, &source_loc_info);
for (jz = 0; jz < img_info->Nz; jz++)
{
z = img_info->z0 + jz*img_info->Del_z;
compViewXYZInfo(z, &view_xyz_info, geom_info, img_info, &source_loc_info, &view_xy_info);
compAColxyzOnFly(x, y, z, geom_info, &source_loc_info, &view_xy_info, &view_xyz_info, &col_xyz);
for (r = 0; r < col_xyz.n_index; r++)
{
AX[col_xyz.index[r]] += col_xyz.val[r]*X[jx*Nyz+jy*img_info->Nz+jz];
}
}
freeViewXYInfoB(&view_xy_info, geom_info);
}
}
fprintf(stdout, "\nfinish forward projection!\n");
freeViewXYZInfo(&view_xyz_info);
freeViewXYInfo(&view_xy_info, geom_info);
freeSourceLocInfo(&source_loc_info);
freeACol(&col_xyz);
}
void backProject(ENTRY *AX, ENTRY *X, struct GeomInfo *geom_info, struct ImgInfo *img_info)
{
int i, t, len;
struct SourceLocInfo source_loc_info;
//pthread_t thread[NUM_CORE];
//struct paraForwardProjectData thread_data[NUM_CORE];
/* initialize projection */
len=img_info->Nx * img_info->Ny * img_info->Nz;
for (i = 0; i < len; i++)
X[i] = 0.0;
/* allocate precomputing structures */
createSourceLocInfo(&source_loc_info, geom_info);
compSourceLocInfo(&source_loc_info, geom_info);
/* for each voxel in the image */
fprintf(stdout, "\nback projecting (parallelized version) ...\n");
/*
for (t = 0; t < NUM_CORE; t++)
{
thread_data[t].tid = t;
thread_data[t].geom_info = geom_info;
thread_data[t].img_info = img_info;
thread_data[t].source_loc_info = &source_loc_info;
thread_data[t].X = X;
thread_data[t].AX = AX;
pthread_create(&thread[t], NULL, paraBackProject, (void *)&thread_data[t]);
}
for (t = 0; t < NUM_CORE; t++)
{
pthread_join(thread[t], NULL);
}
*/
paraBackProject(geom_info,img_info,&source_loc_info,X,AX);
freeSourceLocInfo(&source_loc_info);
}
void *paraBackProject(struct GeomInfo *geom_info,struct ImgInfo *img_info,struct SourceLocInfo *source_loc_info,ENTRY *X,ENTRY *AX)
{
int tid = omp_get_thread_num();
//struct GeomInfo *geom_info;
//struct ImgInfo *img_info;
//struct SourceLocInfo *source_loc_info;
//ENTRY *X;
//ENTRY *AX;
//struct paraForwardProjectData *data;
int indx,offset1,offset2; /* sjk */
int jx, jy, jz, jzmax, Nyz, r;
float x, y, z;
struct ACol col_xyz;
struct ViewXYInfo view_xy_info;
struct ViewXYZInfo view_xyz_info;
float sum;
//data = (struct paraForwardProjectData *)input;
//tid = data->tid;
//geom_info = data->geom_info;
//img_info = data->img_info;
//source_loc_info = data->source_loc_info;
//X = data->X;
//AX = data->AX;
createACol(&col_xyz, COL_LEN); /* TODO COL_LEN hard-coded */
createViewXYInfo(&view_xy_info, geom_info);
createViewXYZInfo(&view_xyz_info, geom_info);
jzmax = (tid+1)*img_info->Nz/omp_get_num_threads();
if (tid == (omp_get_num_threads()-1))
{
jzmax = img_info->Nz;
}
Nyz = img_info->Ny*img_info->Nz;
for (jx = 0; jx < img_info->Nx; jx++)
{
x = img_info->x0 + jx*img_info->Del_xy;
offset1=jx*Nyz; /* sjk */
for (jy = 0; jy < img_info->Ny; jy++)
{
y = img_info->y0 + jy*img_info->Del_xy;
compViewXYInfo(x, y, &view_xy_info, geom_info, img_info, source_loc_info);
offset2=offset1+jy*img_info->Nz; /* sjk */
for (jz = tid*img_info->Nz/omp_get_num_threads(); jz < jzmax; jz++)
{
indx=offset2+jz; /* sjk */
z = img_info->z0 + jz*img_info->Del_z;
compViewXYZInfo(z, &view_xyz_info, geom_info, img_info, source_loc_info, &view_xy_info);
compAColxyzOnFly(x, y, z, geom_info, source_loc_info, &view_xy_info, &view_xyz_info, &col_xyz);
sum=0;
for (r = 0; r < col_xyz.n_index; r++)
{
sum += AX[col_xyz.index[r]] * col_xyz.val[r];
/*AX[col_xyz.index[r]] += col_xyz.val[r]*X[indx]; */
}
X[indx]=sum/col_xyz.n_index; /* divide by number of non-zero terms */
}
freeViewXYInfoB(&view_xy_info, geom_info);
}
}
freeViewXYZInfo(&view_xyz_info);
freeViewXYInfo(&view_xy_info, geom_info);
freeACol(&col_xyz);
return 0;
}
|
GB_binop__pow_int64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pow_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__pow_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__pow_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__pow_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_int64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__pow_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__pow_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_int64)
// C=scalar+B GB (_bind1st__pow_int64)
// C=scalar+B' GB (_bind1st_tran__pow_int64)
// C=A+scalar GB (_bind2nd__pow_int64)
// C=A'+scalar GB (_bind2nd_tran__pow_int64)
// C type: int64_t
// A type: int64_t
// A pattern? 0
// B type: int64_t
// B pattern? 0
// BinaryOp: cij = GB_pow_int64 (aij, bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_pow_int64 (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_POW || GxB_NO_INT64 || GxB_NO_POW_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__pow_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pow_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pow_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pow_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int64_t alpha_scalar ;
int64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int64_t *) alpha_scalar_in)) ;
beta_scalar = (*((int64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__pow_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__pow_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__pow_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__pow_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__pow_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_pow_int64 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__pow_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_pow_int64 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_pow_int64 (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__pow_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_pow_int64 (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__pow_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ascii_utils.c | #include "ascii_utils.h"
#include <string.h>
#include <stdio.h>
#define STB_IMAGE_IMPLEMENTATION
#include "stb/stb_image.h"
char *gray_levels_10 = "@%#*+=-:. ";
char *gray_levels_70 = "$@B%8&WM#*oahkbdpqwmZO0QLCJUYXzcvunxrjft/\\|()1{}[]?-_+~<>i!lI;:,\"^`'. ";
int gra_to_grayscale(unsigned char **result, unsigned char *image, int width, int height) {
int image_length = width * height * 2;
int gr_length = width * height;
*result = (unsigned char*) malloc(gr_length);
if (result == NULL) {
fprintf(stderr, "%s\n", "Malloc of grayscale image failed.");
return 0;
}
unsigned char *iter = *result;
//#pragma omp parallel for private(iter)
for (int i = 0; i < image_length; i += 2) {
*iter++ = image[i];
}
return 1;
}
int rgb_to_grayscale(unsigned char **result, unsigned char *image, int width, int height, int n) {
if (n <= 2) return 0;
int image_length = width * height * n;
int gr_length = width * height;
*result = (unsigned char*) malloc(gr_length);
if (result == NULL) {
fprintf(stderr, "%s\n", "Malloc of grayscale image failed.");
return 0;
}
unsigned char *iter = *result;
//#pragma omp parallel for
for (int i = 0; i < image_length; i += n) {
unsigned char red = image[i];
unsigned char green = image[i + 1];
unsigned char blue = image[i + 2];
float intensity = red * 0.3 + green * 0.59 + blue * 0.11;
*iter++ = clamp((unsigned char) intensity, 0, 255);
}
return 1;
}
int image_to_greyscale(unsigned char **result, unsigned char *image, int width, int height, int n) {
int ok = 1;
if (n == 1) {
*result = malloc(width * height);
memcpy(*result, image, width * height);
} else if (n == 2) {
ok = gra_to_grayscale(result, image, width, height);
} else if (n >= 3) {
ok = rgb_to_grayscale(result, image, width, height, n);
} else {
fprintf(stderr, "%s\n", "Negative n is prohibited.");
ok = 0;
}
return ok;
}
static int compute_average(unsigned char *image, int width, int height, int x1, int y1, int x2, int y2) {
int average = 0;
int count = 0;
for (int x = x1; x < x2; x++) {
for (int y = y1; y < y2; y++) {
average += image[y * width + x];
count++;
}
}
return average / count;
}
int image_to_ascii(char **result, char *filename, int cols, float scale, char *levels) {
int levels_length = strlen(levels);
if (levels == NULL || levels_length == 0) return 0;
int width, height, n;
unsigned char *image = stbi_load(filename, &width, &height, &n, 0);
if (image == NULL) {
fprintf(stderr, "%s\n", "Image load failed - file may not exist or it isn't an image.");
return 0;
}
float tile_width = ((float) width) / cols;
float tile_height = tile_width / scale;
int rows = (int) (height / tile_height);
if (cols > width || rows > height) {
fprintf(stderr, "%s\n", "Image too small for specified columns.");
return 0;
}
unsigned char *gr = NULL;
int ok = image_to_greyscale(&gr, image, width, height, n);
if (!ok || gr == NULL) {
fprintf(stderr, "%s\n", "Converting image to grayscale failed.");
stbi_image_free(image);
return 0;
}
*result = (char*) malloc(rows * cols + rows + 1);
if (result == NULL) {
fprintf(stderr, "%s\n", "Couldn't allocate string.");
return 0;
}
char *iter = *result;
//#pragma omp parallel for
for (int j = 0; j < rows; j++) {
int y1 = (int) (j * tile_height);
int y2 = j == rows - 1 ? height : (int) ((j + 1) * tile_height);
for (int i = 0; i < cols; i++) {
int x1 = (int) (i * tile_width);
int x2 = i == cols - 1 ? width : (int) ((i + 1) * tile_width);
float average = compute_average(gr, width, height, x1, y1, x2, y2);
char gs = levels[clamp((int) (average / 255 * (levels_length - 1)), 0, levels_length - 1)];
*iter++ = gs;
}
*iter++ = '\n';
}
*iter = '\0';
stbi_image_free(image);
free(gr);
return 1;
}
|
gemm.h | /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <cstring>
#include <string>
#include "common/log.h"
#include "memory/t_malloc.h"
#ifdef _OPENMP
#include <omp.h>
#endif
// 矩阵取值运算宏,假设矩阵按行存储
#define A(i, j) A[(i)*lda + (j)]
#define B(i, j) B[(i)*ldb + (j)]
#define C(i, j) C[(i)*ldc + (j)]
#if __aarch64__
#define MR_INT8 4
#define NR_INT8 4
#define MR 6
#define NR 16
#else
#define MR_INT8 4
#define NR_INT8 2
#define MR 6
#define NR 8
#endif
#define s_min(i, j) ((i) < (j) ? (i) : (j))
namespace paddle_mobile {
namespace operators {
namespace math {
class Gemm {
public:
typedef void (Gemm::*FnPack)(int, int, int, const float *, int, float *,
const bool);
typedef void (Gemm::*FnAddDot)(int, const float *, const float *, float *,
int);
FnPack procPackA;
FnPack procPackB;
FnAddDot procAddDot;
void PackMatrixA_6r(int m, int k, int m_tail, const float *A, int lda,
float *buffer, const bool parallel);
void PackMatrixA_8r(int m, int k, int m_tail, const float *A, int lda,
float *buffer, const bool parallel);
void PackMatrixB_8c(int k, int n, int n_tail, const float *B, int ldb,
float *buffer, const bool parallel);
#if __aarch64__
void PackMatrixB_12c(int k, int n, int n_tail, const float *B, int ldb,
float *buffer, const bool parallel);
void PackMatrixB_16c(int k, int n, int n_tail, const float *B, int ldb,
float *buffer, const bool parallel);
#endif
// 分块矩阵乘法
void InnerKernel(int mc, int nc, float alpha, const float *a, const float *b,
float beta, float *c, float *C, int ldc, bool relu);
void InnerKernelWithBias(int mc, int nc, float alpha, const float *a,
const float *b, float beta, float *c, float *C,
int ldc, bool relu, float *bias);
void InnerKernelWithBn(int mc, int nc, float alpha, const float *a,
const float *b, float beta, float *c, float *C,
int ldc, bool relu, float *new_scale, float *new_bias);
void InnerKernelWithBnAdd(int mc, int nc, float alpha, const float *a,
const float *b, float beta, float *c, float *C,
int ldc, bool relu, float *new_scale,
float *new_bias, float *bias);
void InnerKernelWithPRelu(int mc, int nc, const float *a, const float *b,
float *c, float *C, int ldc, float *p,
std::string mode, float *bias, float *bias1);
// 计算一个更小的 C 矩阵分块
#if __aarch64__
void AddDot6x8(int k, const float *a, const float *b, float *c, int ldc);
void AddDot8x12(int k, const float *a, const float *b, float *c, int ldc);
void AddDot6x16(int k, const float *a, const float *b, float *c, int ldc);
#else
void AddDot4x4(int k, const float *a, const float *b, float *c, int ldc);
void AddDot4x8(int k, const float *a, const float *b, float *c, int ldc);
void AddDot6x8(int k, const float *a, const float *b, float *c, int ldc);
#endif
// 分块矩阵乘法结果回写
// C = A * B
void WriteBasic(int mc, int nc, float *c, float *C, int ldc);
// C = alpha * A * B + beta * C
void WriteWithAlphaBeta(int mc, int nc, float *c, float *C, int ldc);
// C = A * B + C
void WriteWithAdd(int mc, int nc, float *c, float *C, int ldc);
// C = A * B + bias
void WriteWithAddV1(int mc, int nc, float *c, float *C, int ldc, float *bias);
// C = A * B + C, relu(C)
void WriteWithAddRelu(int mc, int nc, float *c, float *C, int ldc);
// C = A * B + C,prelu(C)
void WriteWithAddPRelu(int mc, int nc, float *c, float *C, int ldc, float *p,
std::string mode, float *bias, float *bias1);
// C = A * B + bias ,relu(C)
void WriteWithAddReluV1(int mc, int nc, float *c, float *C, int ldc,
float *bias);
// C = A * B, batchnorm(C)
void WriteWithBn(int mc, int nc, float *c, float *C, int ldc,
float *new_scale, float *new_bias);
// C = A * B, batchnorm(C), relu(C)
void WriteWithBnRelu(int mc, int nc, float *c, float *C, int ldc,
float *new_scale, float *new_bias);
void WriteWithBnAddRelu(int mc, int nc, float *c, float *C, int ldc,
float *new_scale, float *new_bias, float *bias1);
// 向量矩阵乘法 (M = 1)
#if __aarch64__
#else
void VectorKernel(int m, int n, int k, float alpha, const float *A, int lda,
const float *B, int ldb, float beta, float *C, int ldc,
bool relu);
void VectorKernelWithBn(int m, int n, int k, float alpha, const float *A,
int lda, const float *B, int ldb, float beta,
float *C, int ldc, bool relu, float *new_scale,
float *new_bias);
// 向量矩阵乘法结果回写
// C = A * B
void VecWriteBasic(int n, float *c, float *C, int ldc);
// C = alpha * A * B + beta * C
void VecWriteWithAlphaBeta(int n, float *c, float *C, int ldc);
// C = A * B + C
void VecWriteWithAdd(int n, float *c, float *C, int ldc);
// C = A * B + C, relu(C)
void VecWriteWithAddRelu(int n, float *c, float *C, int ldc);
// C = A * B, batchnorm(C)
void VecWriteWithBn(int n, float *c, float *C, int ldc, float *new_scale,
float *new_bias);
// C = A * B, batchnorm(C), relu(C)
void VecWriteWithBnRelu(int n, float *c, float *C, int ldc, float *new_scale,
float *new_bias);
#endif
// 32位 float 矩阵乘法
void Sgemm(int m, int n, int k, float alpha, const float *A, int lda,
const float *B, int ldb, float beta, float *C, int ldc, bool relu,
float *bias);
// 32位 float 矩阵乘法, 并对结果进行 batchnrom
void SgemmWithBn(int m, int n, int k, float alpha, const float *A, int lda,
const float *B, int ldb, float beta, float *C, int ldc,
bool relu, float *new_scale, float *new_bias, float *bias);
void SgemmWithPRelu(int m, int n, int k, const float *A, int lda,
const float *B, int ldb, float *C, int ldc, float *p,
std::string mode, float *bias, float *bias1);
// 32位 float 矩阵乘法(openmp 多线程版本)
void Sgemm_omp(int m, int n, int k, float alpha, const float *A, int lda,
const float *B, int ldb, float beta, float *C, int ldc,
bool relu, float *bias);
// 32位 float 矩阵乘法, 并对结果进行 batchnrom(openmp 多线程版本)
void SgemmWithBn_omp(int m, int n, int k, float alpha, const float *A,
int lda, const float *B, int ldb, float beta, float *C,
int ldc, bool relu, float *new_scale, float *new_bias,
float *bias);
void SgemmWithPRelu_omp(int m, int n, int k, const float *A, int lda,
const float *B, int ldb, float *C, int ldc, float *p,
std::string mode, float *bias, float *bias1);
// 8 bits function cluster begins
// 8 bits int small block inner product, data packed k = 1
void AddDot4x8(int32_t k, const int8_t *a, const int8_t *b, int32_t *c,
int32_t ldc);
void AddDot6x8(int32_t k, const int8_t *a, const int8_t *b, int32_t *c,
int32_t ldc);
// 8 bits int small block inner product, data packed k = 16
void AddDot4x2(int32_t k, const int8_t *a, const int8_t *b, int32_t *c,
int32_t ldc);
void AddDot4x4(int32_t k, const int8_t *a, const int8_t *b, int32_t *c,
int32_t ldc);
// 8 bits int inner product
template <typename Otype>
void InnerKernel(int32_t mc, int32_t nc, float alpha, const int8_t *a,
const int8_t *b, float beta, int32_t *c, Otype *C,
int32_t ldc, bool relu);
template <typename Otype>
void InnerKernelWithBias(int32_t mc, int32_t nc, float alpha, const int8_t *a,
const int8_t *b, float beta, int32_t *c, Otype *C,
int32_t ldc, bool relu, int32_t *bias,
bool addOnRow = false);
// 8 bits int pack function
void PackMatrixA_4r(int32_t m, int32_t k, int32_t m_tail, const int8_t *A,
int32_t lda, int8_t *buffer);
void PackMatrixA_6r(int32_t m, int32_t k, int32_t m_tail, const int8_t *A,
int32_t lda, int8_t *buffer);
void PackMatrixB_8c(int32_t k, int32_t n, int32_t n_tail, const int8_t *B,
int32_t ldb, int8_t *buffer);
void PackMatrixA_4r_16(int32_t m, int32_t k, int32_t m_tail, const int8_t *A,
int32_t lda, int8_t *buffer);
void PackMatrixB_2c_16(int32_t k, int32_t n, int32_t n_tail, const int8_t *B,
int32_t ldb, int8_t *buffer);
void PackMatrixB_4c_16(int32_t k, int32_t n, int32_t n_tail, const int8_t *B,
int32_t ldb, int8_t *buffer);
void PackMatrixA_omp_4r(int32_t m, int32_t k, int32_t m_tail, const int8_t *A,
int32_t lda, int8_t *buffer);
void PackMatrixB_omp_8c(int32_t k, int32_t n, int32_t n_tail, const int8_t *B,
int32_t ldb, int8_t *buffer);
void PackMatrixA_omp_4r_16(int32_t m, int32_t k, int32_t m_tail,
const int8_t *A, int32_t lda, int8_t *buffer);
void PackMatrixB_omp_2c_16(int32_t k, int32_t n, int32_t n_tail,
const int8_t *B, int32_t ldb, int8_t *buffer);
void PackMatrixB_omp_4c_16(int32_t k, int32_t n, int32_t n_tail,
const int8_t *B, int32_t ldb, int8_t *buffer);
// 8 bits int matrix product
template <typename Itype, typename Btype, typename Otype>
void Sgemm_omp(int32_t m, int32_t n, int32_t k, float alpha, const Itype *A,
int32_t lda, const Itype *B, int32_t ldb, float beta, Otype *C,
int32_t ldc, bool relu, Btype *bias, bool addOnRow = false);
template <typename Otype>
void Sgemm_omp(int32_t m, int32_t n, int32_t k, float alpha, const int8_t *A,
int32_t lda, const int8_t *B, int32_t ldb, float beta,
Otype *C, int32_t ldc, bool relu, int32_t *bias,
bool addOnRow = false);
template <typename Itype, typename Btype, typename Otype>
void Sgemm(int32_t m, int32_t n, int32_t k, float alpha, const Itype *A,
int32_t lda, const Itype *B, int32_t ldb, float beta, Otype *C,
int32_t ldc, bool relu, Btype *bias, bool addOnRow = false);
template <typename Otype>
void Sgemm(int32_t m, int32_t n, int32_t k, float alpha, const int8_t *A,
int32_t lda, const int8_t *B, int32_t ldb, float beta, Otype *C,
int32_t ldc, bool relu, int32_t *bias, bool addOnRow = false);
// 8 bits int write back
// C = A * B
void WriteBasic(int32_t mc, int32_t nc, int32_t *c, int32_t *C, int32_t ldc);
// C = A * B + bias, scale * relu(C)
void WriteWithAddReluScale(int32_t mc, int32_t nc, int32_t *c, int8_t *C,
int32_t ldc, int32_t *bias, float scale);
// C = A * B + bias, scale * C, bias is added on column
void WriteWithAddScale(int32_t mc, int32_t nc, int32_t *c, int8_t *C,
int32_t ldc, int32_t *bias, float scale);
// C = A * B + bias, scale * C, bias is added on row
void WriteWithAddScaleT(int32_t mc, int32_t nc, int32_t *c, int8_t *C,
int32_t ldc, int32_t *bias, float scale);
private:
int MC = 0;
int KC = 0;
int NC = 0;
// 32位 float
float *packedA;
float *packedB;
float *packedC;
// 8 bits int
int8_t *packedA_int8;
int8_t *packedB_int8;
int32_t *packedC_int32;
int8_t *zero_int8;
};
// 8 bits int matrix product (m*k x k*n)
template <typename Otype>
void Gemm::Sgemm(int32_t m, int32_t n, int32_t k, float alpha, const int8_t *A,
int32_t lda, const int8_t *B, int32_t ldb, float beta,
Otype *C, int32_t ldc, bool relu, int32_t *bias,
bool addOnRow) {
// L1 data cache is 32 kib (Per Contex-A57, Contex-A72, Contex-A73)
// L2 cache is 0.5~4 Mib (Contex-A72 cluster)
int32_t L1 = 32 * 1024;
int32_t L2 = 512 * 1024;
const int32_t k_complete = (k + 15) - ((k + 15) & 15);
KC = k_complete;
MC = L1 / (KC * sizeof(int8_t));
NC = L2 / (KC * sizeof(int8_t));
// make sure MC is multiple of MR_INT8, and NC is multiple of NR_INT8
if (MC == 0) {
MC = MR_INT8;
} else {
int32_t mblock_num = (m + MC - 1) / MC;
MC = (m + mblock_num - 1) / mblock_num;
MC = (MC + MR_INT8 - 1) / MR_INT8 * MR_INT8;
}
// DLOG << "mblock_num = " << mblock_num << ", MC = " << MC << "\n";
if (NC == 0) {
NC = NR_INT8;
} else {
int32_t nblock_num = (n + NC - 1) / NC;
NC = (n + nblock_num - 1) / nblock_num;
NC = (NC + NR_INT8 - 1) / NR_INT8 * NR_INT8;
}
// DLOG << "nblock_num = " << nblock_num << ", NC = " << NC << "\n";
packedA_int8 = static_cast<int8_t *>(
paddle_mobile::memory::Alloc(sizeof(int8_t) * MC * KC));
packedB_int8 = static_cast<int8_t *>(
paddle_mobile::memory::Alloc(sizeof(int8_t) * KC * NC));
packedC_int32 = static_cast<int32_t *>(
paddle_mobile::memory::Alloc(sizeof(int32_t) * MC * NC));
zero_int8 =
static_cast<int8_t *>(paddle_mobile::memory::Alloc(sizeof(int8_t) * k));
memset(static_cast<void *>(zero_int8), 0, sizeof(int8_t) * k);
int32_t mc, nc;
for (int32_t j = 0; j < n; j += NC) {
nc = s_min(n - j, NC);
#if __aarch64__
PackMatrixB_4c_16(k, nc, nc % NR_INT8, &B(0, j), ldb, packedB_int8);
#else
PackMatrixB_2c_16(k, nc, nc % NR_INT8, &B(0, j), ldb, packedB_int8);
#endif
for (int32_t i = 0; i < m; i += MC) {
mc = s_min(m - i, MC);
PackMatrixA_4r_16(mc, k, mc % MR_INT8, &A(i, 0), lda, packedA_int8);
if (bias == nullptr) {
InnerKernel(mc, nc, alpha, packedA_int8, packedB_int8, beta,
packedC_int32, &C(i, j), ldc, relu);
} else {
if (addOnRow) {
InnerKernelWithBias(mc, nc, alpha, packedA_int8, packedB_int8, beta,
packedC_int32, &C(i, j), ldc, relu, bias + j,
addOnRow);
} else {
InnerKernelWithBias(mc, nc, alpha, packedA_int8, packedB_int8, beta,
packedC_int32, &C(i, j), ldc, relu, bias + i,
addOnRow);
}
}
}
}
paddle_mobile::memory::Free(packedA_int8);
paddle_mobile::memory::Free(packedB_int8);
paddle_mobile::memory::Free(packedC_int32);
paddle_mobile::memory::Free(zero_int8);
}
// 8 bits int matrix product (m*k x k*n), omp version
template <typename Otype>
void Gemm::Sgemm_omp(int32_t m, int32_t n, int32_t k, float alpha,
const int8_t *A, int32_t lda, const int8_t *B, int32_t ldb,
float beta, Otype *C, int32_t ldc, bool relu,
int32_t *bias, bool addOnRow) {
#ifdef _OPENMP
int32_t max_threads = omp_get_max_threads();
#else
int32_t max_threads = 1;
#endif
int32_t L1 = 64 / max_threads * 1024;
const int32_t k_complete = (k + 15) - ((k + 15) & 15);
KC = k_complete;
zero_int8 =
static_cast<int8_t *>(paddle_mobile::memory::Alloc(sizeof(int8_t) * k));
memset(static_cast<void *>(zero_int8), 0, sizeof(int8_t) * k);
if (m > n) {
// 对 A 分块
MC = L1 / (KC * sizeof(int8_t));
if (MC == 0) {
MC = MR_INT8;
} else {
int32_t mblock_num = (m + MC - 1) / MC;
MC = (m + mblock_num - 1) / mblock_num;
MC = (MC + MR_INT8 - 1) / MR_INT8 * MR_INT8;
}
// 补齐 B
NC = (n + NR_INT8 - 1) / NR_INT8 * NR_INT8;
packedB_int8 = static_cast<int8_t *>(
paddle_mobile::memory::Alloc(sizeof(int8_t) * KC * NC));
#if __aarch64__
PackMatrixB_omp_4c_16(k, n, n % NR_INT8, B, ldb, packedB_int8);
#else
PackMatrixB_omp_2c_16(k, n, n % NR_INT8, B, ldb, packedB_int8);
#endif
packedA_int8 = static_cast<int8_t *>(
paddle_mobile::memory::Alloc(sizeof(int8_t) * MC * KC * max_threads));
} else {
// 对 B 分块
NC = L1 / (KC * sizeof(int8_t));
if (NC == 0) {
NC = NR_INT8;
} else {
int32_t nblock_num = (n + NC - 1) / NC;
NC = (n + nblock_num - 1) / nblock_num;
NC = (NC + NR_INT8 - 1) / NR_INT8 * NR_INT8;
}
// 补齐 A
MC = (m + MR_INT8 - 1) / MR_INT8 * MR_INT8;
packedA_int8 = static_cast<int8_t *>(
paddle_mobile::memory::Alloc(sizeof(int8_t) * MC * KC));
#if __aarch64__
PackMatrixA_omp_4r_16(m, k, m % MR_INT8, A, lda, packedA_int8);
#else
PackMatrixA_omp_4r_16(m, k, m % MR_INT8, A, lda, packedA_int8);
#endif
packedB_int8 = static_cast<int8_t *>(
paddle_mobile::memory::Alloc(sizeof(int8_t) * KC * NC * max_threads));
}
packedC_int32 = static_cast<int32_t *>(
paddle_mobile::memory::Alloc(sizeof(int32_t) * MC * NC * max_threads));
if (m > n) {
#pragma omp parallel for
for (int32_t i = 0; i < m; i += MC) {
#ifdef _OPENMP
int32_t local_threads = omp_get_thread_num();
#else
int32_t local_threads = 0;
#endif
int32_t mc;
mc = s_min(m - i, MC);
int8_t *local_A = packedA_int8 + MC * KC * local_threads;
int32_t *local_C = packedC_int32 + MC * NC * local_threads;
#if __aarch64__
PackMatrixA_4r_16(mc, k, mc % MR_INT8, &A(i, 0), lda, local_A);
#else
PackMatrixA_4r_16(mc, k, mc % MR_INT8, &A(i, 0), lda, local_A);
#endif
if (bias == nullptr) {
InnerKernel(mc, n, alpha, local_A, packedB_int8, beta, local_C,
&C(i, 0), ldc, relu);
} else {
if (addOnRow) {
InnerKernelWithBias(mc, n, alpha, local_A, packedB_int8, beta,
local_C, &C(i, 0), ldc, relu, bias, addOnRow);
} else {
InnerKernelWithBias(mc, n, alpha, local_A, packedB_int8, beta,
local_C, &C(i, 0), ldc, relu, bias + i, addOnRow);
}
}
}
} else {
#pragma omp parallel for
for (int32_t j = 0; j < n; j += NC) {
#ifdef _OPENMP
int32_t local_threads = omp_get_thread_num();
#else
int32_t local_threads = 0;
#endif
int32_t nc;
nc = s_min(n - j, NC);
int8_t *local_B = packedB_int8 + KC * NC * local_threads;
int32_t *local_C = packedC_int32 + MC * NC * local_threads;
#if __aarch64__
PackMatrixB_4c_16(k, nc, nc % NR_INT8, &B(0, j), ldb, local_B);
#else
PackMatrixB_2c_16(k, nc, nc % NR_INT8, &B(0, j), ldb, local_B);
#endif
if (bias == nullptr) {
InnerKernel(m, nc, alpha, packedA_int8, local_B, beta, local_C,
&C(0, j), ldc, relu);
} else {
if (addOnRow) {
InnerKernelWithBias(m, nc, alpha, packedA_int8, local_B, beta,
local_C, &C(0, j), ldc, relu, bias + j, addOnRow);
} else {
InnerKernelWithBias(m, nc, alpha, packedA_int8, local_B, beta,
local_C, &C(0, j), ldc, relu, bias, addOnRow);
}
}
}
}
paddle_mobile::memory::Free(packedA_int8);
paddle_mobile::memory::Free(packedB_int8);
paddle_mobile::memory::Free(packedC_int32);
paddle_mobile::memory::Free(zero_int8);
}
} // namespace math
} // namespace operators
} // namespace paddle_mobile
|
GB_binop__max_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__max_uint8)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__max_uint8)
// A.*B function (eWiseMult): GB (_AemultB_03__max_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__max_uint8)
// A*D function (colscale): GB (_AxD__max_uint8)
// D*A function (rowscale): GB (_DxB__max_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__max_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__max_uint8)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_uint8)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_uint8)
// C=scalar+B GB (_bind1st__max_uint8)
// C=scalar+B' GB (_bind1st_tran__max_uint8)
// C=A+scalar GB (_bind2nd__max_uint8)
// C=A'+scalar GB (_bind2nd_tran__max_uint8)
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = GB_IMAX (aij, bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_IMAX (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MAX || GxB_NO_UINT8 || GxB_NO_MAX_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__max_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__max_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__max_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__max_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__max_uint8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__max_uint8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__max_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__max_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__max_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__max_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__max_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__max_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = Bx [p] ;
Cx [p] = GB_IMAX (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__max_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = Ax [p] ;
Cx [p] = GB_IMAX (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = GB_IMAX (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__max_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = GB_IMAX (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__max_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 24;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=2*Nt-2;t1++) {
lbp=ceild(t1+2,2);
ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(ceild(t1-8,12),ceild(4*t2-Nz-11,24));t3<=min(min(floord(4*Nt+Ny-9,24),floord(2*t1+Ny-3,24)),floord(4*t2+Ny-9,24));t3++) {
for (t4=max(max(ceild(t1-252,256),ceild(4*t2-Nz-499,512)),ceild(24*t3-Ny-499,512));t4<=min(min(min(floord(4*Nt+Nx-9,512),floord(2*t1+Nx-3,512)),floord(4*t2+Nx-9,512)),floord(24*t3+Nx+11,512));t4++) {
for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(24*t3-Ny+5,4)),ceild(512*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) {
for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(24*t3,4*t5+4);t7<=min(24*t3+23,4*t5+Ny-5);t7++) {
lbv=max(512*t4,4*t5+4);
ubv=min(512*t4+511,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
transpose.h | #pragma once
#include "omp_util.h"
//
// This function is intended to be used in omp parallel reagions.
//
static inline void transpose(const float * const srcBuf, float* dstBuf,
const int width, const int height)
{
constexpr int blocksize = 16;
#pragma omp for collapse(2)
for (int i = 0; i < height; i += blocksize) {
for (int j = 0; j < width; j += blocksize) {
const int row_max = i + blocksize < height ? i + blocksize : height;
const int col_max = j + blocksize < width ? j + blocksize : width;
for (int row = i; row < row_max; ++row) {
OMP_SIMD
for (int col = j; col < col_max; ++col) {
dstBuf[col * height + row] = srcBuf[row * width + col];
}
}
}
}
}
|
r_numint.c | /*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <complex.h>
#include "cint.h"
#include "gto/grid_ao_drv.h"
#include "np_helper/np_helper.h"
#include "vhf/fblas.h"
#include <assert.h>
#define BOXSIZE 56
int VXCao_empty_blocks(char *empty, unsigned char *non0table, int *shls_slice,
int *ao_loc);
static void dot_ao_dm(double complex *vm, double complex *ao, double complex *dm,
int nao, int nocc, int ngrids, int bgrids,
unsigned char *non0table, int *shls_slice, int *ao_loc)
{
int nbox = (nao+BOXSIZE-1) / BOXSIZE;
char empty[nbox];
int has0 = VXCao_empty_blocks(empty, non0table, shls_slice, ao_loc);
const char TRANS_T = 'T';
const char TRANS_N = 'N';
const double complex Z1 = 1;
double complex beta = 0;
if (has0) {
int box_id, bas_id, blen, i, j;
size_t b0;
for (box_id = 0; box_id < nbox; box_id++) {
if (!empty[box_id]) {
b0 = box_id * BOXSIZE;
blen = MIN(nao-b0, BOXSIZE);
zgemm_(&TRANS_N, &TRANS_T, &bgrids, &nocc, &blen,
&Z1, ao+b0*ngrids, &ngrids, dm+b0*nocc, &nocc,
&beta, vm, &ngrids);
beta = 1.0;
}
}
if (beta == 0) { // all empty
for (i = 0; i < nocc; i++) {
for (j = 0; j < bgrids; j++) {
vm[i*ngrids+j] = 0;
}
}
}
} else {
zgemm_(&TRANS_N, &TRANS_T, &bgrids, &nocc, &nao,
&Z1, ao, &ngrids, dm, &nocc, &beta, vm, &ngrids);
}
}
/* vm[nocc,ngrids] = ao[i,ngrids] * dm[i,nocc] */
void VXCzdot_ao_dm(double complex *vm, double complex *ao, double complex *dm,
int nao, int nocc, int ngrids, int nbas,
unsigned char *non0table, int *shls_slice, int *ao_loc)
{
const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE;
#pragma omp parallel default(none) \
shared(vm, ao, dm, nao, nocc, ngrids, nbas, \
non0table, shls_slice, ao_loc)
{
int ip, ib;
#pragma omp for nowait schedule(static)
for (ib = 0; ib < nblk; ib++) {
ip = ib * BLKSIZE;
dot_ao_dm(vm+ip, ao+ip, dm,
nao, nocc, ngrids, MIN(ngrids-ip, BLKSIZE),
non0table+ib*nbas, shls_slice, ao_loc);
}
}
}
/* conj(vv[n,m]) = ao1[n,ngrids] * conj(ao2[m,ngrids]) */
static void dot_ao_ao(double complex *vv, double complex *ao1, double complex *ao2,
int nao, int ngrids, int bgrids, int hermi,
unsigned char *non0table, int *shls_slice, int *ao_loc)
{
int nbox = (nao+BOXSIZE-1) / BOXSIZE;
char empty[nbox];
int has0 = VXCao_empty_blocks(empty, non0table, shls_slice, ao_loc);
const char TRANS_C = 'C';
const char TRANS_N = 'N';
const double complex Z1 = 1;
if (has0) {
int ib, jb, leni, lenj;
int j1 = nbox;
size_t b0i, b0j;
for (ib = 0; ib < nbox; ib++) {
if (!empty[ib]) {
b0i = ib * BOXSIZE;
leni = MIN(nao-b0i, BOXSIZE);
if (hermi) {
j1 = ib + 1;
}
for (jb = 0; jb < j1; jb++) {
if (!empty[jb]) {
b0j = jb * BOXSIZE;
lenj = MIN(nao-b0j, BOXSIZE);
zgemm_(&TRANS_C, &TRANS_N, &lenj, &leni, &bgrids, &Z1,
ao2+b0j*ngrids, &ngrids, ao1+b0i*ngrids, &ngrids,
&Z1, vv+b0i*nao+b0j, &nao);
} }
} }
} else {
zgemm_(&TRANS_C, &TRANS_N, &nao, &nao, &bgrids,
&Z1, ao2, &ngrids, ao1, &ngrids, &Z1, vv, &nao);
}
}
/* vv[nao,nao] = conj(ao1[i,nao]) * ao2[i,nao] */
void VXCzdot_ao_ao(double complex *vv, double complex *ao1, double complex *ao2,
int nao, int ngrids, int nbas, int hermi,
unsigned char *non0table, int *shls_slice, int *ao_loc)
{
const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE;
memset(vv, 0, sizeof(double complex) * nao * nao);
#pragma omp parallel default(none) \
shared(vv, ao1, ao2, nao, ngrids, nbas, hermi, \
non0table, shls_slice, ao_loc)
{
int ip, ib;
double complex *v_priv = calloc(nao*nao, sizeof(double complex));
#pragma omp for nowait schedule(static)
for (ib = 0; ib < nblk; ib++) {
ip = ib * BLKSIZE;
dot_ao_ao(v_priv, ao1+ip, ao2+ip,
nao, ngrids, MIN(ngrids-ip, BLKSIZE), hermi,
non0table+ib*nbas, shls_slice, ao_loc);
}
#pragma omp critical
{
for (ip = 0; ip < nao*nao; ip++) {
vv[ip] += conj(v_priv[ip]);
}
}
free(v_priv);
}
if (hermi != 0) {
NPzhermi_triu(nao, vv, hermi);
}
}
|
sum_double.c | //sum.c
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/timeb.h>
#include <malloc.h>
#define N_RUNS 20
#define N 10240000
// read timer in second
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
//Create a matrix and a vector and fill with random numbers
void init(double *X) {
for (int i = 0; i<N; i++) {
X[i] = (double)rand()/(double)(RAND_MAX/10.0);
}
}
//Our sum function- what it does is pretty straight-forward.
double sum(double *X) {
double result = 0;
#pragma omp simd reduction(+:result)
for (int i = 0; i<N; i++) {
result += X[i];
}
return result;
}
// Debug functions
double sum_serial(double *X) {
double result = 0;
for (int i = 0; i<N; i++) {
result += X[i];
}
return result;
}
void print_vector(double *vector) {
printf("[");
for (int i = 0; i<8; i++) {
printf("%.2f ", vector[i]);
}
puts("]");
}
int main(int argc, char **argv) {
//Set everything up
double *X = malloc(sizeof(double)*N);
double result, result_serial;
srand(time(NULL));
init(X);
//warming up
result = sum(X);
result_serial = sum_serial(X);
double t = 0;
double start = read_timer();
for (int i = 0; i<N_RUNS; i++)
result = sum(X);
t += (read_timer() - start);
double t_serial = 0;
double start_serial = read_timer();
for (int i = 0; i<N_RUNS; i++)
result_serial = sum_serial(X);
t_serial += (read_timer() - start_serial);
print_vector(X);
puts("=\n");
printf("SIMD: %f\n", result);
puts("---------------------------------");
printf("Serial: %f\n", result_serial);
double gflops = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t);
double gflops_serial = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t_serial);
printf("==================================================================\n");
printf("Performance:\t\t\tRuntime (s)\t GFLOPS\n");
printf("------------------------------------------------------------------\n");
printf("Sum (SIMD):\t\t%4f\t%4f\n", t/N_RUNS, gflops);
printf("Sum (Serial):\t\t%4f\t%4f\n", t_serial/N_RUNS, gflops_serial);
printf("Correctness check: %f\n", result_serial - result);
free(X);
return 0;
}
|
shear.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS H H EEEEE AAA RRRR %
% SS H H E A A R R %
% SSS HHHHH EEE AAAAA RRRR %
% SS H H E A A R R %
% SSSSS H H EEEEE A A R R %
% %
% %
% MagickCore Methods to Shear or Rotate an Image by an Arbitrary Angle %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The XShearImage() and YShearImage() methods are based on the paper "A Fast
% Algorithm for General Raster Rotation" by Alan W. Paeth, Graphics
% Interface '86 (Vancouver). ShearRotateImage() is adapted from a similar
% method based on the Paeth paper written by Michael Halle of the Spatial
% Imaging Group, MIT Media Lab.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/channel.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/matrix.h"
#include "MagickCore/memory_.h"
#include "MagickCore/list.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/nt-base-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resource_.h"
#include "MagickCore/shear.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/transform.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C r o p T o F i t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropToFitImage() crops the sheared image as determined by the bounding box
% as defined by width and height and shearing angles.
%
% The format of the CropToFitImage method is:
%
% MagickBooleanType CropToFitImage(Image **image,
% const double x_shear,const double x_shear,
% const double width,const double height,
% const MagickBooleanType rotate,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o x_shear, y_shear, width, height: Defines a region of the image to crop.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CropToFitImage(Image **image,
const double x_shear,const double y_shear,
const double width,const double height,
const MagickBooleanType rotate,ExceptionInfo *exception)
{
Image
*crop_image;
PointInfo
extent[4],
min,
max;
RectangleInfo
geometry,
page;
register ssize_t
i;
/*
Calculate the rotated image size.
*/
extent[0].x=(double) (-width/2.0);
extent[0].y=(double) (-height/2.0);
extent[1].x=(double) width/2.0;
extent[1].y=(double) (-height/2.0);
extent[2].x=(double) (-width/2.0);
extent[2].y=(double) height/2.0;
extent[3].x=(double) width/2.0;
extent[3].y=(double) height/2.0;
for (i=0; i < 4; i++)
{
extent[i].x+=x_shear*extent[i].y;
extent[i].y+=y_shear*extent[i].x;
if (rotate != MagickFalse)
extent[i].x+=x_shear*extent[i].y;
extent[i].x+=(double) (*image)->columns/2.0;
extent[i].y+=(double) (*image)->rows/2.0;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
geometry.x=(ssize_t) ceil(min.x-0.5);
geometry.y=(ssize_t) ceil(min.y-0.5);
geometry.width=(size_t) floor(max.x-min.x+0.5);
geometry.height=(size_t) floor(max.y-min.y+0.5);
page=(*image)->page;
(void) ParseAbsoluteGeometry("0x0+0+0",&(*image)->page);
crop_image=CropImage(*image,&geometry,exception);
if (crop_image == (Image *) NULL)
return(MagickFalse);
crop_image->page=page;
*image=DestroyImage(*image);
*image=crop_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s k e w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeskewImage() removes skew from the image. Skew is an artifact that
% occurs in scanned images because of the camera being misaligned,
% imperfections in the scanning or surface, or simply because the paper was
% not placed completely flat when scanned.
%
% The result will be auto-croped if the artifact "deskew:auto-crop" is
% defined, while the amount the image is to be deskewed, in degrees is also
% saved as the artifact "deskew:angle".
%
% The format of the DeskewImage method is:
%
% Image *DeskewImage(const Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: separate background from foreground.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void RadonProjection(const Image *image,MatrixInfo *source_matrixs,
MatrixInfo *destination_matrixs,const ssize_t sign,size_t *projection)
{
MatrixInfo
*swap;
register MatrixInfo
*p,
*q;
register ssize_t
x;
size_t
step;
p=source_matrixs;
q=destination_matrixs;
for (step=1; step < GetMatrixColumns(p); step*=2)
{
for (x=0; x < (ssize_t) GetMatrixColumns(p); x+=2*(ssize_t) step)
{
register ssize_t
i;
ssize_t
y;
unsigned short
element,
neighbor;
for (i=0; i < (ssize_t) step; i++)
{
for (y=0; y < (ssize_t) (GetMatrixRows(p)-i-1); y++)
{
if (GetMatrixElement(p,x+i,y,&element) == MagickFalse)
continue;
if (GetMatrixElement(p,x+i+step,y+i,&neighbor) == MagickFalse)
continue;
neighbor+=element;
if (SetMatrixElement(q,x+2*i,y,&neighbor) == MagickFalse)
continue;
if (GetMatrixElement(p,x+i+step,y+i+1,&neighbor) == MagickFalse)
continue;
neighbor+=element;
if (SetMatrixElement(q,x+2*i+1,y,&neighbor) == MagickFalse)
continue;
}
for ( ; y < (ssize_t) (GetMatrixRows(p)-i); y++)
{
if (GetMatrixElement(p,x+i,y,&element) == MagickFalse)
continue;
if (GetMatrixElement(p,x+i+step,y+i,&neighbor) == MagickFalse)
continue;
neighbor+=element;
if (SetMatrixElement(q,x+2*i,y,&neighbor) == MagickFalse)
continue;
if (SetMatrixElement(q,x+2*i+1,y,&element) == MagickFalse)
continue;
}
for ( ; y < (ssize_t) GetMatrixRows(p); y++)
{
if (GetMatrixElement(p,x+i,y,&element) == MagickFalse)
continue;
if (SetMatrixElement(q,x+2*i,y,&element) == MagickFalse)
continue;
if (SetMatrixElement(q,x+2*i+1,y,&element) == MagickFalse)
continue;
}
}
}
swap=p;
p=q;
q=swap;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,GetMatrixColumns(p),1)
#endif
for (x=0; x < (ssize_t) GetMatrixColumns(p); x++)
{
register ssize_t
y;
size_t
sum;
sum=0;
for (y=0; y < (ssize_t) (GetMatrixRows(p)-1); y++)
{
ssize_t
delta;
unsigned short
element,
neighbor;
if (GetMatrixElement(p,x,y,&element) == MagickFalse)
continue;
if (GetMatrixElement(p,x,y+1,&neighbor) == MagickFalse)
continue;
delta=(ssize_t) element-(ssize_t) neighbor;
sum+=delta*delta;
}
projection[GetMatrixColumns(p)+sign*x-1]=sum;
}
}
static MagickBooleanType RadonTransform(const Image *image,
const double threshold,size_t *projection,ExceptionInfo *exception)
{
CacheView
*image_view;
MatrixInfo
*destination_matrixs,
*source_matrixs;
MagickBooleanType
status;
size_t
count,
width;
ssize_t
j,
y;
unsigned char
c;
unsigned short
bits[256];
for (width=1; width < ((image->columns+7)/8); width<<=1) ;
source_matrixs=AcquireMatrixInfo(width,image->rows,sizeof(unsigned short),
exception);
destination_matrixs=AcquireMatrixInfo(width,image->rows,
sizeof(unsigned short),exception);
if ((source_matrixs == (MatrixInfo *) NULL) ||
(destination_matrixs == (MatrixInfo *) NULL))
{
if (destination_matrixs != (MatrixInfo *) NULL)
destination_matrixs=DestroyMatrixInfo(destination_matrixs);
if (source_matrixs != (MatrixInfo *) NULL)
source_matrixs=DestroyMatrixInfo(source_matrixs);
return(MagickFalse);
}
if (NullMatrix(source_matrixs) == MagickFalse)
{
destination_matrixs=DestroyMatrixInfo(destination_matrixs);
source_matrixs=DestroyMatrixInfo(source_matrixs);
return(MagickFalse);
}
for (j=0; j < 256; j++)
{
c=(unsigned char) j;
for (count=0; c != 0; c>>=1)
count+=c & 0x01;
bits[j]=(unsigned short) count;
}
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
i,
x;
size_t
bit,
byte;
unsigned short
value;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
bit=0;
byte=0;
i=(ssize_t) (image->columns+7)/8;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte<<=1;
if (((MagickRealType) GetPixelRed(image,p) < threshold) ||
((MagickRealType) GetPixelGreen(image,p) < threshold) ||
((MagickRealType) GetPixelBlue(image,p) < threshold))
byte|=0x01;
bit++;
if (bit == 8)
{
value=bits[byte];
(void) SetMatrixElement(source_matrixs,--i,y,&value);
bit=0;
byte=0;
}
p+=GetPixelChannels(image);
}
if (bit != 0)
{
byte<<=(8-bit);
value=bits[byte];
(void) SetMatrixElement(source_matrixs,--i,y,&value);
}
}
RadonProjection(image,source_matrixs,destination_matrixs,-1,projection);
(void) NullMatrix(source_matrixs);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
i,
x;
size_t
bit,
byte;
unsigned short
value;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
bit=0;
byte=0;
i=0;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte<<=1;
if (((MagickRealType) GetPixelRed(image,p) < threshold) ||
((MagickRealType) GetPixelGreen(image,p) < threshold) ||
((MagickRealType) GetPixelBlue(image,p) < threshold))
byte|=0x01;
bit++;
if (bit == 8)
{
value=bits[byte];
(void) SetMatrixElement(source_matrixs,i++,y,&value);
bit=0;
byte=0;
}
p+=GetPixelChannels(image);
}
if (bit != 0)
{
byte<<=(8-bit);
value=bits[byte];
(void) SetMatrixElement(source_matrixs,i++,y,&value);
}
}
RadonProjection(image,source_matrixs,destination_matrixs,1,projection);
image_view=DestroyCacheView(image_view);
destination_matrixs=DestroyMatrixInfo(destination_matrixs);
source_matrixs=DestroyMatrixInfo(source_matrixs);
return(MagickTrue);
}
static void GetImageBackgroundColor(Image *image,const ssize_t offset,
ExceptionInfo *exception)
{
CacheView
*image_view;
PixelInfo
background;
double
count;
ssize_t
y;
/*
Compute average background color.
*/
if (offset <= 0)
return;
GetPixelInfo(image,&background);
count=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if ((y >= offset) && (y < ((ssize_t) image->rows-offset)))
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
continue;
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x >= offset) && (x < ((ssize_t) image->columns-offset)))
continue;
background.red+=QuantumScale*GetPixelRed(image,p);
background.green+=QuantumScale*GetPixelGreen(image,p);
background.blue+=QuantumScale*GetPixelBlue(image,p);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
background.alpha+=QuantumScale*GetPixelAlpha(image,p);
count++;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
image->background_color.red=(double) ClampToQuantum(QuantumRange*
background.red/count);
image->background_color.green=(double) ClampToQuantum(QuantumRange*
background.green/count);
image->background_color.blue=(double) ClampToQuantum(QuantumRange*
background.blue/count);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->background_color.alpha=(double) ClampToQuantum(QuantumRange*
background.alpha/count);
}
MagickExport Image *DeskewImage(const Image *image,const double threshold,
ExceptionInfo *exception)
{
AffineMatrix
affine_matrix;
const char
*artifact;
double
degrees;
Image
*clone_image,
*crop_image,
*deskew_image,
*median_image;
MagickBooleanType
status;
RectangleInfo
geometry;
register ssize_t
i;
size_t
max_projection,
*projection,
width;
ssize_t
skew;
/*
Compute deskew angle.
*/
for (width=1; width < ((image->columns+7)/8); width<<=1) ;
projection=(size_t *) AcquireQuantumMemory((size_t) (2*width-1),
sizeof(*projection));
if (projection == (size_t *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
status=RadonTransform(image,threshold,projection,exception);
if (status == MagickFalse)
{
projection=(size_t *) RelinquishMagickMemory(projection);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
max_projection=0;
skew=0;
for (i=0; i < (ssize_t) (2*width-1); i++)
{
if (projection[i] > max_projection)
{
skew=i-(ssize_t) width+1;
max_projection=projection[i];
}
}
projection=(size_t *) RelinquishMagickMemory(projection);
degrees=RadiansToDegrees(-atan((double) skew/width/8));
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" Deskew angle: %g",degrees);
/*
Deskew image.
*/
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
{
char
angle[MagickPathExtent];
(void) FormatLocaleString(angle,MagickPathExtent,"%.20g",degrees);
(void) SetImageArtifact(clone_image,"deskew:angle",angle);
}
(void) SetImageVirtualPixelMethod(clone_image,BackgroundVirtualPixelMethod,
exception);
affine_matrix.sx=cos(DegreesToRadians(fmod((double) degrees,360.0)));
affine_matrix.rx=sin(DegreesToRadians(fmod((double) degrees,360.0)));
affine_matrix.ry=(-sin(DegreesToRadians(fmod((double) degrees,360.0))));
affine_matrix.sy=cos(DegreesToRadians(fmod((double) degrees,360.0)));
affine_matrix.tx=0.0;
affine_matrix.ty=0.0;
artifact=GetImageArtifact(image,"deskew:auto-crop");
if (IsStringTrue(artifact) == MagickFalse)
{
deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception);
clone_image=DestroyImage(clone_image);
return(deskew_image);
}
/*
Auto-crop image.
*/
GetImageBackgroundColor(clone_image,(ssize_t) StringToLong(artifact),
exception);
deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception);
clone_image=DestroyImage(clone_image);
if (deskew_image == (Image *) NULL)
return((Image *) NULL);
median_image=StatisticImage(deskew_image,MedianStatistic,3,3,exception);
if (median_image == (Image *) NULL)
{
deskew_image=DestroyImage(deskew_image);
return((Image *) NULL);
}
geometry=GetImageBoundingBox(median_image,exception);
median_image=DestroyImage(median_image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule()," Deskew geometry: "
"%.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double)
geometry.height,(double) geometry.x,(double) geometry.y);
crop_image=CropImage(deskew_image,&geometry,exception);
deskew_image=DestroyImage(deskew_image);
return(crop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e g r a l R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IntegralRotateImage() rotates the image an integral of 90 degrees. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the rotated image.
%
% The format of the IntegralRotateImage method is:
%
% Image *IntegralRotateImage(const Image *image,size_t rotations,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o rotations: Specifies the number of 90 degree rotations.
%
*/
MagickExport Image *IntegralRotateImage(const Image *image,size_t rotations,
ExceptionInfo *exception)
{
#define RotateImageTag "Rotate/Image"
CacheView
*image_view,
*rotate_view;
Image
*rotate_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
/*
Initialize rotated image attributes.
*/
assert(image != (Image *) NULL);
page=image->page;
rotations%=4;
switch (rotations)
{
case 0:
{
rotate_image=CloneImage(image,0,0,MagickTrue,exception);
break;
}
case 2:
{
rotate_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
break;
}
case 1:
case 3:
{
rotate_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
break;
}
}
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
/*
Integral rotate the image.
*/
status=MagickTrue;
progress=0;
if (rotations != 0)
{
image_view=AcquireVirtualCacheView(image,exception);
rotate_view=AcquireAuthenticCacheView(rotate_image,exception);
}
switch (rotations)
{
case 1:
{
size_t
tile_height,
tile_width;
ssize_t
tile_y;
/*
Rotate 90 degrees.
*/
GetPixelCacheTileSize(image,&tile_width,&tile_height);
tile_width=image->columns;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,rotate_image,image->rows/tile_height,1)
#endif
for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height)
{
register ssize_t
tile_x;
if (status == MagickFalse)
continue;
tile_x=0;
for ( ; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
y;
size_t
height,
width;
width=tile_width;
if ((tile_x+(ssize_t) tile_width) > (ssize_t) image->columns)
width=(size_t) (tile_width-(tile_x+tile_width-image->columns));
height=tile_height;
if ((tile_y+(ssize_t) tile_height) > (ssize_t) image->rows)
height=(size_t) (tile_height-(tile_y+tile_height-image->rows));
p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (y=0; y < (ssize_t) width; y++)
{
register const Quantum
*magick_restrict tile_pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(rotate_view,(ssize_t)
(rotate_image->columns-(tile_y+height)),y+tile_x,height,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
tile_pixels=p+((height-1)*width+y)*GetPixelChannels(image);
for (x=0; x < (ssize_t) height; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait rotate_traits = GetPixelChannelTraits(rotate_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(rotate_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(rotate_image,channel,tile_pixels[i],q);
}
tile_pixels-=width*GetPixelChannels(image);
q+=GetPixelChannels(rotate_image);
}
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,RotateImageTag,(MagickOffsetType)
image->rows-1,image->rows);
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.width != 0)
page.x=(ssize_t) (page.width-rotate_image->columns-page.x);
break;
}
case 2:
{
register ssize_t
y;
/*
Rotate 180 degrees.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,rotate_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(rotate_view,0,(ssize_t) (image->rows-y-
1),image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
q+=GetPixelChannels(rotate_image)*image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
q-=GetPixelChannels(rotate_image);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait rotate_traits = GetPixelChannelTraits(rotate_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(rotate_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(rotate_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,RotateImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,RotateImageTag,(MagickOffsetType)
image->rows-1,image->rows);
if (page.width != 0)
page.x=(ssize_t) (page.width-rotate_image->columns-page.x);
if (page.height != 0)
page.y=(ssize_t) (page.height-rotate_image->rows-page.y);
break;
}
case 3:
{
size_t
tile_height,
tile_width;
ssize_t
tile_y;
/*
Rotate 270 degrees.
*/
GetPixelCacheTileSize(image,&tile_width,&tile_height);
tile_width=image->columns;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,rotate_image,image->rows/tile_height,1)
#endif
for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height)
{
register ssize_t
tile_x;
if (status == MagickFalse)
continue;
tile_x=0;
for ( ; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
y;
size_t
height,
width;
width=tile_width;
if ((tile_x+(ssize_t) tile_width) > (ssize_t) image->columns)
width=(size_t) (tile_width-(tile_x+tile_width-image->columns));
height=tile_height;
if ((tile_y+(ssize_t) tile_height) > (ssize_t) image->rows)
height=(size_t) (tile_height-(tile_y+tile_height-image->rows));
p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (y=0; y < (ssize_t) width; y++)
{
register const Quantum
*magick_restrict tile_pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(rotate_view,tile_y,(ssize_t) (y+
rotate_image->rows-(tile_x+width)),height,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
tile_pixels=p+((width-1)-y)*GetPixelChannels(image);
for (x=0; x < (ssize_t) height; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait rotate_traits = GetPixelChannelTraits(rotate_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(rotate_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(rotate_image,channel,tile_pixels[i],q);
}
tile_pixels+=width*GetPixelChannels(image);
q+=GetPixelChannels(rotate_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_IntegralRotateImage)
#endif
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,RotateImageTag,(MagickOffsetType)
image->rows-1,image->rows);
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.height != 0)
page.y=(ssize_t) (page.height-rotate_image->rows-page.y);
break;
}
default:
break;
}
if (rotations != 0)
{
rotate_view=DestroyCacheView(rotate_view);
image_view=DestroyCacheView(image_view);
}
rotate_image->type=image->type;
rotate_image->page=page;
if (status == MagickFalse)
rotate_image=DestroyImage(rotate_image);
return(rotate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ X S h e a r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% XShearImage() shears the image in the X direction with a shear angle of
% 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and
% negative angles shear clockwise. Angles are measured relative to a vertical
% Y-axis. X shears will widen an image creating 'empty' triangles on the left
% and right sides of the source image.
%
% The format of the XShearImage method is:
%
% MagickBooleanType XShearImage(Image *image,const double degrees,
% const size_t width,const size_t height,
% const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: A double representing the shearing angle along the X
% axis.
%
% o width, height, x_offset, y_offset: Defines a region of the image
% to shear.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType XShearImage(Image *image,const double degrees,
const size_t width,const size_t height,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define XShearImageTag "XShear/Image"
typedef enum
{
LEFT,
RIGHT
} ShearDirection;
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
background;
ssize_t
y;
/*
X shear image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
background=image->background_color;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,height,1)
#endif
for (y=0; y < (ssize_t) height; y++)
{
PixelInfo
pixel,
source,
destination;
double
area,
displacement;
register Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
i;
ShearDirection
direction;
ssize_t
step;
if (status == MagickFalse)
continue;
p=GetCacheViewAuthenticPixels(image_view,0,y_offset+y,image->columns,1,
exception);
if (p == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
p+=x_offset*GetPixelChannels(image);
displacement=degrees*(double) (y-height/2.0);
if (displacement == 0.0)
continue;
if (displacement > 0.0)
direction=RIGHT;
else
{
displacement*=(-1.0);
direction=LEFT;
}
step=(ssize_t) floor((double) displacement);
area=(double) (displacement-step);
step++;
pixel=background;
GetPixelInfo(image,&source);
GetPixelInfo(image,&destination);
switch (direction)
{
case LEFT:
{
/*
Transfer pixels left-to-right.
*/
if (step > x_offset)
break;
q=p-step*GetPixelChannels(image);
for (i=0; i < (ssize_t) width; i++)
{
if ((x_offset+i) < step)
{
p+=GetPixelChannels(image);
GetPixelInfoPixel(image,p,&pixel);
q+=GetPixelChannels(image);
continue;
}
GetPixelInfoPixel(image,p,&source);
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&source,(double) GetPixelAlpha(image,p),area,&destination);
SetPixelViaPixelInfo(image,&destination,q);
GetPixelInfoPixel(image,p,&pixel);
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&background,(double) background.alpha,area,&destination);
SetPixelViaPixelInfo(image,&destination,q);
q+=GetPixelChannels(image);
for (i=0; i < (step-1); i++)
{
SetPixelViaPixelInfo(image,&background,q);
q+=GetPixelChannels(image);
}
break;
}
case RIGHT:
{
/*
Transfer pixels right-to-left.
*/
p+=width*GetPixelChannels(image);
q=p+step*GetPixelChannels(image);
for (i=0; i < (ssize_t) width; i++)
{
p-=GetPixelChannels(image);
q-=GetPixelChannels(image);
if ((size_t) (x_offset+width+step-i) > image->columns)
continue;
GetPixelInfoPixel(image,p,&source);
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&source,(double) GetPixelAlpha(image,p),area,&destination);
SetPixelViaPixelInfo(image,&destination,q);
GetPixelInfoPixel(image,p,&pixel);
}
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&background,(double) background.alpha,area,&destination);
q-=GetPixelChannels(image);
SetPixelViaPixelInfo(image,&destination,q);
for (i=0; i < (step-1); i++)
{
q-=GetPixelChannels(image);
SetPixelViaPixelInfo(image,&background,q);
}
break;
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,XShearImageTag,progress,height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Y S h e a r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% YShearImage shears the image in the Y direction with a shear angle of
% 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and
% negative angles shear clockwise. Angles are measured relative to a
% horizontal X-axis. Y shears will increase the height of an image creating
% 'empty' triangles on the top and bottom of the source image.
%
% The format of the YShearImage method is:
%
% MagickBooleanType YShearImage(Image *image,const double degrees,
% const size_t width,const size_t height,
% const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: A double representing the shearing angle along the Y
% axis.
%
% o width, height, x_offset, y_offset: Defines a region of the image
% to shear.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType YShearImage(Image *image,const double degrees,
const size_t width,const size_t height,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define YShearImageTag "YShear/Image"
typedef enum
{
UP,
DOWN
} ShearDirection;
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
background;
ssize_t
x;
/*
Y Shear image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
progress=0;
background=image->background_color;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,width,1)
#endif
for (x=0; x < (ssize_t) width; x++)
{
double
area,
displacement;
PixelInfo
pixel,
source,
destination;
register Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
i;
ShearDirection
direction;
ssize_t
step;
if (status == MagickFalse)
continue;
p=GetCacheViewAuthenticPixels(image_view,x_offset+x,0,1,image->rows,
exception);
if (p == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
p+=y_offset*GetPixelChannels(image);
displacement=degrees*(double) (x-width/2.0);
if (displacement == 0.0)
continue;
if (displacement > 0.0)
direction=DOWN;
else
{
displacement*=(-1.0);
direction=UP;
}
step=(ssize_t) floor((double) displacement);
area=(double) (displacement-step);
step++;
pixel=background;
GetPixelInfo(image,&source);
GetPixelInfo(image,&destination);
switch (direction)
{
case UP:
{
/*
Transfer pixels top-to-bottom.
*/
if (step > y_offset)
break;
q=p-step*GetPixelChannels(image);
for (i=0; i < (ssize_t) height; i++)
{
if ((y_offset+i) < step)
{
p+=GetPixelChannels(image);
GetPixelInfoPixel(image,p,&pixel);
q+=GetPixelChannels(image);
continue;
}
GetPixelInfoPixel(image,p,&source);
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&source,(double) GetPixelAlpha(image,p),area,
&destination);
SetPixelViaPixelInfo(image,&destination,q);
GetPixelInfoPixel(image,p,&pixel);
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&background,(double) background.alpha,area,&destination);
SetPixelViaPixelInfo(image,&destination,q);
q+=GetPixelChannels(image);
for (i=0; i < (step-1); i++)
{
SetPixelViaPixelInfo(image,&background,q);
q+=GetPixelChannels(image);
}
break;
}
case DOWN:
{
/*
Transfer pixels bottom-to-top.
*/
p+=height*GetPixelChannels(image);
q=p+step*GetPixelChannels(image);
for (i=0; i < (ssize_t) height; i++)
{
p-=GetPixelChannels(image);
q-=GetPixelChannels(image);
if ((size_t) (y_offset+height+step-i) > image->rows)
continue;
GetPixelInfoPixel(image,p,&source);
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&source,(double) GetPixelAlpha(image,p),area,
&destination);
SetPixelViaPixelInfo(image,&destination,q);
GetPixelInfoPixel(image,p,&pixel);
}
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&background,(double) background.alpha,area,&destination);
q-=GetPixelChannels(image);
SetPixelViaPixelInfo(image,&destination,q);
for (i=0; i < (step-1); i++)
{
q-=GetPixelChannels(image);
SetPixelViaPixelInfo(image,&background,q);
}
break;
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,YShearImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h e a r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShearImage() creates a new image that is a shear_image copy of an existing
% one. Shearing slides one edge of an image along the X or Y axis, creating
% a parallelogram. An X direction shear slides an edge along the X axis,
% while a Y direction shear slides an edge along the Y axis. The amount of
% the shear is controlled by a shear angle. For X direction shears, x_shear
% is measured relative to the Y axis, and similarly, for Y direction shears
% y_shear is measured relative to the X axis. Empty triangles left over from
% shearing the image are filled with the background color defined by member
% 'background_color' of the image.. ShearImage() allocates the memory
% necessary for the new Image structure and returns a pointer to the new image.
%
% ShearImage() is based on the paper "A Fast Algorithm for General Raster
% Rotatation" by Alan W. Paeth.
%
% The format of the ShearImage method is:
%
% Image *ShearImage(const Image *image,const double x_shear,
% const double y_shear,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o x_shear, y_shear: Specifies the number of degrees to shear the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShearImage(const Image *image,const double x_shear,
const double y_shear,ExceptionInfo *exception)
{
Image
*integral_image,
*shear_image;
MagickBooleanType
status;
PointInfo
shear;
RectangleInfo
border_info,
bounds;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((x_shear != 0.0) && (fmod(x_shear,90.0) == 0.0))
ThrowImageException(ImageError,"AngleIsDiscontinuous");
if ((y_shear != 0.0) && (fmod(y_shear,90.0) == 0.0))
ThrowImageException(ImageError,"AngleIsDiscontinuous");
/*
Initialize shear angle.
*/
integral_image=CloneImage(image,0,0,MagickTrue,exception);
if (integral_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
shear.x=(-tan(DegreesToRadians(fmod(x_shear,360.0))));
shear.y=tan(DegreesToRadians(fmod(y_shear,360.0)));
if ((shear.x == 0.0) && (shear.y == 0.0))
return(integral_image);
if (SetImageStorageClass(integral_image,DirectClass,exception) == MagickFalse)
{
integral_image=DestroyImage(integral_image);
return(integral_image);
}
if (integral_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel,exception);
/*
Compute image size.
*/
bounds.width=image->columns+(ssize_t) floor(fabs(shear.x)*image->rows+0.5);
bounds.x=(ssize_t) ceil((double) image->columns+((fabs(shear.x)*image->rows)-
image->columns)/2.0-0.5);
bounds.y=(ssize_t) ceil((double) image->rows+((fabs(shear.y)*bounds.width)-
image->rows)/2.0-0.5);
/*
Surround image with border.
*/
integral_image->border_color=integral_image->background_color;
integral_image->compose=CopyCompositeOp;
border_info.width=(size_t) bounds.x;
border_info.height=(size_t) bounds.y;
shear_image=BorderImage(integral_image,&border_info,image->compose,exception);
integral_image=DestroyImage(integral_image);
if (shear_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
/*
Shear the image.
*/
if (shear_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(shear_image,OpaqueAlphaChannel,exception);
status=XShearImage(shear_image,shear.x,image->columns,image->rows,bounds.x,
(ssize_t) (shear_image->rows-image->rows)/2,exception);
if (status == MagickFalse)
{
shear_image=DestroyImage(shear_image);
return((Image *) NULL);
}
status=YShearImage(shear_image,shear.y,bounds.width,image->rows,(ssize_t)
(shear_image->columns-bounds.width)/2,bounds.y,exception);
if (status == MagickFalse)
{
shear_image=DestroyImage(shear_image);
return((Image *) NULL);
}
status=CropToFitImage(&shear_image,shear.x,shear.y,(MagickRealType)
image->columns,(MagickRealType) image->rows,MagickFalse,exception);
shear_image->alpha_trait=image->alpha_trait;
shear_image->compose=image->compose;
shear_image->page.width=0;
shear_image->page.height=0;
if (status == MagickFalse)
shear_image=DestroyImage(shear_image);
return(shear_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h e a r R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShearRotateImage() creates a new image that is a rotated copy of an existing
% one. Positive angles rotate counter-clockwise (right-hand rule), while
% negative angles rotate clockwise. Rotated images are usually larger than
% the originals and have 'empty' triangular corners. X axis. Empty
% triangles left over from shearing the image are filled with the background
% color defined by member 'background_color' of the image. ShearRotateImage
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% ShearRotateImage() is based on the paper "A Fast Algorithm for General
% Raster Rotatation" by Alan W. Paeth. ShearRotateImage is adapted from a
% similar method based on the Paeth paper written by Michael Halle of the
% Spatial Imaging Group, MIT Media Lab.
%
% The format of the ShearRotateImage method is:
%
% Image *ShearRotateImage(const Image *image,const double degrees,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: Specifies the number of degrees to rotate the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShearRotateImage(const Image *image,const double degrees,
ExceptionInfo *exception)
{
Image
*integral_image,
*rotate_image;
MagickBooleanType
status;
MagickRealType
angle;
PointInfo
shear;
RectangleInfo
border_info,
bounds;
size_t
height,
rotations,
shear_width,
width;
/*
Adjust rotation angle.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
angle=fmod(degrees,360.0);
if (angle < -45.0)
angle+=360.0;
for (rotations=0; angle > 45.0; rotations++)
angle-=90.0;
rotations%=4;
/*
Calculate shear equations.
*/
integral_image=IntegralRotateImage(image,rotations,exception);
if (integral_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
shear.x=(-tan((double) DegreesToRadians(angle)/2.0));
shear.y=sin((double) DegreesToRadians(angle));
if ((shear.x == 0.0) && (shear.y == 0.0))
return(integral_image);
if (SetImageStorageClass(integral_image,DirectClass,exception) == MagickFalse)
{
integral_image=DestroyImage(integral_image);
return(integral_image);
}
if (integral_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel,exception);
/*
Compute maximum bounds for 3 shear operations.
*/
width=integral_image->columns;
height=integral_image->rows;
bounds.width=(size_t) floor(fabs((double) height*shear.x)+width+0.5);
bounds.height=(size_t) floor(fabs((double) bounds.width*shear.y)+height+0.5);
shear_width=(size_t) floor(fabs((double) bounds.height*shear.x)+
bounds.width+0.5);
bounds.x=(ssize_t) floor((double) ((shear_width > bounds.width) ? width :
bounds.width-shear_width+2)/2.0+0.5);
bounds.y=(ssize_t) floor(((double) bounds.height-height+2)/2.0+0.5);
/*
Surround image with a border.
*/
integral_image->border_color=integral_image->background_color;
integral_image->compose=CopyCompositeOp;
border_info.width=(size_t) bounds.x;
border_info.height=(size_t) bounds.y;
rotate_image=BorderImage(integral_image,&border_info,image->compose,
exception);
integral_image=DestroyImage(integral_image);
if (rotate_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
/*
Rotate the image.
*/
status=XShearImage(rotate_image,shear.x,width,height,bounds.x,(ssize_t)
(rotate_image->rows-height)/2,exception);
if (status == MagickFalse)
{
rotate_image=DestroyImage(rotate_image);
return((Image *) NULL);
}
status=YShearImage(rotate_image,shear.y,bounds.width,height,(ssize_t)
(rotate_image->columns-bounds.width)/2,bounds.y,exception);
if (status == MagickFalse)
{
rotate_image=DestroyImage(rotate_image);
return((Image *) NULL);
}
status=XShearImage(rotate_image,shear.x,bounds.width,bounds.height,(ssize_t)
(rotate_image->columns-bounds.width)/2,(ssize_t) (rotate_image->rows-
bounds.height)/2,exception);
if (status == MagickFalse)
{
rotate_image=DestroyImage(rotate_image);
return((Image *) NULL);
}
status=CropToFitImage(&rotate_image,shear.x,shear.y,(MagickRealType) width,
(MagickRealType) height,MagickTrue,exception);
rotate_image->alpha_trait=image->alpha_trait;
rotate_image->compose=image->compose;
rotate_image->page.width=0;
rotate_image->page.height=0;
if (status == MagickFalse)
rotate_image=DestroyImage(rotate_image);
return(rotate_image);
}
|
graph_v4.c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <stdint.h>
#include <omp.h>
int **vertices1;
int **vertices2;
int *adjDeg1;
int *adjDeg2;
int *visited1;
int *visited2;
int **shortestPath1;
int **shortestPath2;
int *saturation1;
int *saturation2;
int *color1;
int *color2;
int *vertexNumber1;
int *vertexNumber2;
int colorMax = 0;
int generateRandomNumber()
{
int check = rand()%2;
return check;
}
void printTime(clock_t start, clock_t end)
{
printf("Time taken: %lf seconds\n", ((double) (end - start)) / CLOCKS_PER_SEC);
}
void memoryAllocate(int number1, int number2)
{
int numberOfVertices1 = number1;
int numberOfVertices2 = number2;
adjDeg1 = (int *)malloc(numberOfVertices1 * sizeof(int));
adjDeg2 = (int *)malloc(numberOfVertices2 * sizeof(int));
vertices1 = (int **)malloc(numberOfVertices1*sizeof(int *));
vertices2 = (int **)malloc(numberOfVertices2*sizeof(int *));
shortestPath1 = (int **)malloc(numberOfVertices1*sizeof(int *));
shortestPath2 = (int **)malloc(numberOfVertices2*sizeof(int *));
visited1 = (int *)malloc(numberOfVertices1*sizeof(int));
visited2 = (int *)malloc(numberOfVertices2*sizeof(int));
saturation1 = (int *)malloc(numberOfVertices1*sizeof(int));
saturation2 = (int *)malloc(numberOfVertices2*sizeof(int));
color1 = (int *)malloc(numberOfVertices1*sizeof(int));
vertexNumber1 = (int *)malloc(numberOfVertices1*sizeof(int));
vertexNumber2 = (int *)malloc(numberOfVertices2*sizeof(int));
color2 = (int *)malloc(numberOfVertices2*sizeof(int));
for(int i=0; i<numberOfVertices1; i++)
{
vertices1[i] = (int *)malloc(numberOfVertices1 * sizeof(int));
shortestPath1[i] = (int *)malloc(numberOfVertices1 * sizeof(int));
}
for(int i=0; i<numberOfVertices2; i++)
{
vertices2[i] = (int *)malloc(numberOfVertices2 * sizeof(int));
shortestPath2[i] = (int *)malloc(numberOfVertices2 * sizeof(int));
}
}
void addEdge1(int numberOfVertices1)
{
int check;
for(int i=0; i<numberOfVertices1; i++)
{
for(int j=i+1; j<numberOfVertices1; j++)
{
if(i == j)
{
vertices1[i][j] = 0;
}
else
{
check = generateRandomNumber();
if(check == 1)
{
vertices1[i][j] = 1;
vertices1[j][i] = 1;
}
else
{
vertices1[i][j] = 0;
vertices1[j][i] = 0;
}
}
}
}
}
void addEdge2(int numberOfVertices2)
{
int check;
for(int i=0; i<numberOfVertices2; i++)
{
for(int j=0; j<numberOfVertices2; j++)
{
if(i == j)
{
vertices2[i][j] = 0;
}
else
{
check = generateRandomNumber();
if(check == 1)
{
vertices2[i][j] = 1;
vertices2[j][i] = 1;
}
else
{
vertices2[i][j] = 0;
vertices2[j][i] = 0;
}
}
}
}
}
void adjacencyDegree1(int numberOfVertices1)
{
for(int i=0; i<numberOfVertices1; i++)
{
int count = 0;
for(int j=0; j<numberOfVertices1; j++)
{
if(vertices1[i][j] == 1)
{
count++;
}
}
adjDeg1[i] = count;
}
}
void adjacencyDegree2(int numberOfVertices2)
{
for(int i=0; i<numberOfVertices2; i++)
{
int count = 0;
for(int j=0; j<numberOfVertices2; j++)
{
if(vertices2[i][j] == 1)
{
count++;
}
}
adjDeg2[i] = count;
}
}
void initializeShortest1(int numberOfVertices1)
{
for(int i=0; i<numberOfVertices1; i++)
{
for(int j=0; j<numberOfVertices1; j++)
{
shortestPath1[i][j] = vertices1[i][j];
}
}
}
void initializeShortest2(int numberOfVertices2)
{
for(int i=0; i<numberOfVertices2; i++)
{
for(int j=0; j<numberOfVertices2; j++)
{
shortestPath2[i][j] = vertices2[i][j];
}
}
}
void markShortestPath1(int numberOfVertices1, int i, int j, int k)
{
for(i=0; i<numberOfVertices1; i++)
{
for(j=0; j<numberOfVertices1; j++)
{
if((i != j) && (shortestPath1[i][j] == 0))
{
for(k=0; j<numberOfVertices1; k++)
{
if((shortestPath1[k][j] != 0) && (shortestPath1[i][k] != 0))
{
shortestPath1[i][j] = shortestPath1[i][k] + 1;
break;
}
}
}
}
}
}
void markShortestPath2(int numberOfVertices2, int i, int j, int k)
{
for(i=0; i<numberOfVertices2; i++)
{
for(j=0; j<numberOfVertices2; j++)
{
if((i != j) && (shortestPath2[i][j] == 0))
{
for(k=0; j<numberOfVertices2; k++)
{
if((shortestPath2[k][j] != 0) && (shortestPath2[i][k] != 0))
{
shortestPath2[i][j] = shortestPath2[i][k] + 1;
break;
}
}
}
}
}
}
void updateShortestPath(int numberOfVertices1, int numberOfVertices2)
{
int i,j,k;
initializeShortest1(numberOfVertices1);
initializeShortest2(numberOfVertices2);
/*for(i=0; i<numberOfVertices1; i++)
{
markShortestPath1(numberOfVertices1,i,j,k);
}
for(i=0; i<numberOfVertices2; i++)
{
markShortestPath2(numberOfVertices2,i,j,k);
} */
}
void initializeSaturation(int numberOfVertices1, int numberOfVertices2)
{
for(int i=0; i<numberOfVertices1; i++)
{
saturation1[i] = 1;
color1[i] = 0;
vertexNumber1[i] = i;
}
for(int i=0; i<numberOfVertices2; i++)
{
saturation2[i] = 1;
color2[i] = 0;
vertexNumber2[i] = i;
}
}
int maxAdjDeg1(int numberOfVertices1)
{
int max = 0;
int vertexIndex = 0;
for(int i=0; i<numberOfVertices1; i++)
{
if(max < adjDeg1[i])
{
max = adjDeg1[i];
vertexIndex = i;
}
}
return vertexIndex;
}
int maxAdjDeg2(int numberOfVertices2)
{
int max = 0;
int vertexIndex = 0;
for(int i=0; i<numberOfVertices2; i++)
{
if(max < adjDeg2[i])
{
max = adjDeg2[i];
vertexIndex = i;
}
}
return vertexIndex;
}
int saturationDegree1(int numberOfVertices1)
{
int maxSatDeg = -1, vertexIndex = -1; int x;
int size = -1;
int adjDegMax = -1;
int count =0;
for(int i=0; i<numberOfVertices1; i++)
{
if(color1[i] == 0)
{
x = adjDeg1[i];
if(x == 0)
{
size = 0;
}
int colorCheck[numberOfVertices1];
for(int j=0; j<numberOfVertices1; j++)
{
if(vertices1[i][j] == 1)
{
colorCheck[j] = color1[j];
}
}
for(int j=0; j<numberOfVertices1; j++)
{
for(int k=0; k<numberOfVertices1; k++)
{
if(colorCheck[j] == k)
{
count++;
break;
}
}
}
size = count;
if(maxSatDeg < size)
{
maxSatDeg = size;
vertexIndex = i;
adjDegMax = x;
}
if((maxSatDeg == size) && (x > adjDegMax))
{
adjDegMax = x;
maxSatDeg = size;
vertexIndex = i;
}
}
}
return vertexIndex;
}
int saturationDegree2(int numberOfVertices2)
{
int maxSatDeg = -1, vertexIndex = -1; int x;
int size = -1;
int adjDegMax = -1;
int count =0;
for(int i=0; i<numberOfVertices2; i++)
{
if(color2[i] == 0)
{
x = adjDeg2[i];
if(x == 0)
{
size = 0;
}
int colorCheck[numberOfVertices2];
for(int j=0; j<numberOfVertices2; j++)
{
if(vertices2[i][j] == 1)
{
colorCheck[j] = color2[j];
}
}
for(int j=0; j<numberOfVertices2; j++)
{
for(int k=0; k<numberOfVertices2; k++)
{
if(colorCheck[j] == k)
{
count++;
break;
}
}
}
size = count;
if(maxSatDeg < size)
{
maxSatDeg = size;
vertexIndex = i;
adjDegMax = x;
}
if((maxSatDeg == size) && (x > adjDegMax))
{
adjDegMax = x;
maxSatDeg = size;
vertexIndex = i;
}
}
}
return vertexIndex;
}
void addColorToVertex1(int vertexToBeColored, int numberOfVertices1)
{
int flag = 0;
int checkColor[numberOfVertices1];
for(int j=0; j<numberOfVertices1; j++)
{
if(vertices1[vertexToBeColored][j] == 1)
{
checkColor[j] = color1[j];
}
}
for(int j=0; j<=numberOfVertices1; j++)
{
for(int i=1; i<=numberOfVertices1; i++)
{
if(checkColor[j] == i)
{
continue;
}
else
{
color1[vertexToBeColored] = i;
if (colorMax < i)
{
colorMax = i;
flag = 1;
break;
}
}
}
if(flag == 1)
break;
}
}
void addColorToVertex2(int vertexToBeColored, int numberOfVertices2)
{
int flag = 0;
int checkColor[numberOfVertices2];
for(int j=0; j<numberOfVertices2; j++)
{
if(vertices2[vertexToBeColored][j] == 1)
{
checkColor[j] = color2[j];
}
}
for(int j=0; j<=numberOfVertices2; j++)
{
for(int i=1; i<=numberOfVertices2; i++)
{
if(checkColor[j] == i)
{
continue;
}
else
{
color2[vertexToBeColored] = i;
if (colorMax < i)
{
colorMax = i;
flag = 1;
break;
}
}
}
if(flag == 1)
break;
}
}
void colorVertex1(int numberOfVertices1)
{
int first;
first = maxAdjDeg1(numberOfVertices1);
color1[first] =1;
int vertexToBeColored;
vertexToBeColored = saturationDegree1(numberOfVertices1);
while(vertexToBeColored != -1)
{
addColorToVertex1(vertexToBeColored,numberOfVertices1); // calls a function to add color to the other vertices
vertexToBeColored = saturationDegree1(numberOfVertices1);
}
}
void colorVertex2(int numberOfVertices2)
{
int first;
first = maxAdjDeg2(numberOfVertices2);
color1[first] =1;
int vertexToBeColored;
vertexToBeColored = saturationDegree2(numberOfVertices2);
while(vertexToBeColored != -1)
{
addColorToVertex2(vertexToBeColored,numberOfVertices2); // calls a function to add color to the other vertices
vertexToBeColored = saturationDegree2(numberOfVertices2);
}
}
void initializeVisited(int numberOfVertices1, int numberOfVertices2)
{
for(int i=0; i<numberOfVertices1; i++)
{
visited1[i] = 0;
}
for(int i=0; i<numberOfVertices2; i++)
{
visited2[i] = 0;
}
}
void printGraph1(int numberOfVertices1)
{
printf("\n");
for(int i=0; i<numberOfVertices1; i++)
{
for(int j=0; j<numberOfVertices1; j++)
{
printf("%d\t",vertices1[i][j]);
}
printf("\n");
}
}
void printGraph2(int numberOfVertices2)
{
printf("\n");
for(int i=0; i<numberOfVertices2; i++)
{
for(int j=0; j<numberOfVertices2; j++)
{
printf("%d\t",vertices2[i][j]);
}
printf("\n");
}
}
void printColor(int numberOfVertices1, int numberOfVertices2)
{
printf("\n");
for(int i=0; i<numberOfVertices1; i++)
{
printf("%d\t",color1[i]);
}
printf("\n");
for(int i=0; i<numberOfVertices2; i++)
{
printf("%d\t",color2[i]);
}
}
void generateGraph(int numberOfVertices1, int numberOfVertices2)
{
addEdge1(numberOfVertices1);
addEdge2(numberOfVertices2);
adjacencyDegree1(numberOfVertices1);
adjacencyDegree2(numberOfVertices2);
initializeVisited(numberOfVertices1,numberOfVertices2);
updateShortestPath(numberOfVertices1,numberOfVertices2);
initializeSaturation(numberOfVertices1,numberOfVertices2);
colorVertex1(numberOfVertices1);
colorVertex2(numberOfVertices2);
//printColor(numberOfVertices1, numberOfVertices2);
//printGraph1(numberOfVertices1);
//printGraph2(numberOfVertices2);
}
// do permutation to check all combinations
int checkIsomorphism(int numberOfVertices1, int numberOfVertices2, int numberThreads)
{
int numberOfThreads = numberThreads;
//numberOfThreads = omp_get_num_threads();
//printf("\n number of Threads %d ", numberOfThreads);
//scanf("%d",&numberOfThreads);
omp_set_num_threads(numberOfThreads);
#pragma omp parallel
for(int i=0; i<numberOfVertices1; i++)
{
int checkDeg = adjDeg1[i];
int count = 0;
for(int j=0; j<numberOfVertices2; j++)
{
if((adjDeg2[j] == checkDeg) && (visited2[j] == 0))
{
for(int k=0; k<numberOfVertices2; k++)
{
if(vertices1[i][k] == vertices2[j][k])
{
count++;
}
}
}
if(count == numberOfVertices2)
{
visited2[j] = 1;
}
}
}
for(int i=0; i<numberOfVertices2; i++)
{
if(visited2[i] == 0)
{
return 0;
}
}
return 1;
}
int main(int argc, char *argv[])
{
int numberThreads;
if(argc != 2)
{
printf(" \n Enter number of threads to be used in command line argument ");
exit(1);
}
numberThreads=atoi(argv[1]);
if(numberThreads<1)
{
printf(" \n Number of threads specified should be more than 1 ");
exit(1);
}
int number1 = 2000,number2 = 2000;
int check;
clock_t startTime,endTime;
/*printf("\nEnter number of vertices for graph 1\n");
scanf("%d",&number1);
printf("\nEnter number of vertices for graph 2\n");
scanf("%d",&number2); */
memoryAllocate(number1,number2);
startTime = clock();
generateGraph(number1,number2);
check = checkIsomorphism(number1,number2,numberThreads);
if(check == 0)
{
printf("\n Not IsoMorphic \n ");
}
else
printf("\n IsoMorphic \n");
endTime = clock();
printTime(startTime,endTime);
return 0;
}
|
gbdt.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_BOOSTING_GBDT_H_
#define LIGHTGBM_BOOSTING_GBDT_H_
#include <LightGBM/boosting.h>
#include <LightGBM/objective_function.h>
#include <LightGBM/prediction_early_stop.h>
#include <LightGBM/cuda/vector_cudahost.h>
#include <LightGBM/utils/json11.h>
#include <LightGBM/utils/threading.h>
#include <string>
#include <algorithm>
#include <cstdio>
#include <fstream>
#include <map>
#include <memory>
#include <mutex>
#include <unordered_map>
#include <utility>
#include <vector>
#include "score_updater.hpp"
namespace LightGBM {
using json11::Json;
/*!
* \brief GBDT algorithm implementation. including Training, prediction, bagging.
*/
class GBDT : public GBDTBase {
public:
/*!
* \brief Constructor
*/
GBDT();
/*!
* \brief Destructor
*/
~GBDT();
/*!
* \brief Initialization logic
* \param gbdt_config Config for boosting
* \param train_data Training data
* \param objective_function Training objective function
* \param training_metrics Training metrics
*/
void Init(const Config *gbdt_config, const Dataset *train_data,
const ObjectiveFunction *objective_function,
const std::vector<const Metric *> &training_metrics) override;
/*!
* \brief Merge model from other boosting object. Will insert to the front of current boosting object
* \param other
*/
void MergeFrom(const Boosting *other) override {
auto other_gbdt = reinterpret_cast<const GBDT *>(other);
// tmp move to other vector
auto original_models = std::move(models_);
models_ = std::vector<std::unique_ptr<Tree>>();
// push model from other first
for (const auto &tree : other_gbdt->models_) {
auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get())));
models_.push_back(std::move(new_tree));
}
num_init_iteration_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
// push model in current object
for (const auto &tree : original_models) {
auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get())));
models_.push_back(std::move(new_tree));
}
num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
}
void ShuffleModels(int start_iter, int end_iter) override {
int total_iter = static_cast<int>(models_.size()) / num_tree_per_iteration_;
start_iter = std::max(0, start_iter);
if (end_iter <= 0) {
end_iter = total_iter;
}
end_iter = std::min(total_iter, end_iter);
auto original_models = std::move(models_);
std::vector<int> indices(total_iter);
for (int i = 0; i < total_iter; ++i) {
indices[i] = i;
}
Random tmp_rand(17);
for (int i = start_iter; i < end_iter - 1; ++i) {
int j = tmp_rand.NextShort(i + 1, end_iter);
std::swap(indices[i], indices[j]);
}
models_ = std::vector<std::unique_ptr<Tree>>();
for (int i = 0; i < total_iter; ++i) {
for (int j = 0; j < num_tree_per_iteration_; ++j) {
int tree_idx = indices[i] * num_tree_per_iteration_ + j;
auto new_tree = std::unique_ptr<Tree>(new Tree(*(original_models[tree_idx].get())));
models_.push_back(std::move(new_tree));
}
}
}
/*!
* \brief Reset the training data
* \param train_data New Training data
* \param objective_function Training objective function
* \param training_metrics Training metrics
*/
void ResetTrainingData(const Dataset *train_data, const ObjectiveFunction *objective_function,
const std::vector<const Metric *> &training_metrics) override;
/*!
* \brief Reset Boosting Config
* \param gbdt_config Config for boosting
*/
void ResetConfig(const Config *gbdt_config) override;
/*!
* \brief Adding a validation dataset
* \param valid_data Validation dataset
* \param valid_metrics Metrics for validation dataset
*/
void AddValidDataset(const Dataset *valid_data,
const std::vector<const Metric *> &valid_metrics) override;
/*!
* \brief Perform a full training procedure
* \param snapshot_freq frequency of snapshot
* \param model_output_path path of model file
*/
void Train(int snapshot_freq, const std::string &model_output_path) override;
void RefitTree(const std::vector<std::vector<int>> &tree_leaf_prediction) override;
/*!
* \brief Training logic
* \param gradients nullptr for using default objective, otherwise use self-defined boosting
* \param hessians nullptr for using default objective, otherwise use self-defined boosting
* \return True if cannot train any more
*/
bool TrainOneIter(const score_t *gradients, const score_t *hessians) override;
/*!
* \brief Rollback one iteration
*/
void RollbackOneIter() override;
/*!
* \brief Get current iteration
*/
int GetCurrentIteration() const override { return static_cast<int>(models_.size()) / num_tree_per_iteration_; }
/*!
* \brief Can use early stopping for prediction or not
* \return True if cannot use early stopping for prediction
*/
bool NeedAccuratePrediction() const override {
if (objective_function_ == nullptr) {
return true;
} else {
return objective_function_->NeedAccuratePrediction();
}
}
/*!
* \brief Get evaluation result at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \return evaluation result
*/
std::vector<double> GetEvalAt(int data_idx) const override;
/*!
* \brief Get current training score
* \param out_len length of returned score
* \return training score
*/
const double *GetTrainingScore(int64_t *out_len) override;
/*!
* \brief Get size of prediction at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \return The size of prediction
*/
int64_t GetNumPredictAt(int data_idx) const override {
CHECK(data_idx >= 0 && data_idx <= static_cast<int>(valid_score_updater_.size()));
data_size_t num_data = train_data_->num_data();
if (data_idx > 0) {
num_data = valid_score_updater_[data_idx - 1]->num_data();
}
return num_data * num_class_;
}
/*!
* \brief Get prediction result at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \param result used to store prediction result, should allocate memory before call this function
* \param out_len length of returned score
*/
void GetPredictAt(int data_idx, double *out_result, int64_t *out_len) override;
/*!
* \brief Get number of prediction for one data
* \param start_iteration Start index of the iteration to predict
* \param num_iteration number of used iterations
* \param is_pred_leaf True if predicting leaf index
* \param is_pred_contrib True if predicting feature contribution
* \return number of prediction
*/
inline int NumPredictOneRow(int start_iteration, int num_iteration, bool is_pred_leaf,
bool is_pred_contrib) const override {
int num_pred_in_one_row = num_class_;
if (is_pred_leaf) {
int max_iteration = GetCurrentIteration();
start_iteration = std::max(start_iteration, 0);
start_iteration = std::min(start_iteration, max_iteration);
if (num_iteration > 0) {
num_pred_in_one_row *= static_cast<int>(std::min(max_iteration - start_iteration, num_iteration));
} else {
num_pred_in_one_row *= (max_iteration - start_iteration);
}
} else if (is_pred_contrib) {
num_pred_in_one_row =
num_tree_per_iteration_ * (max_feature_idx_ + 2); // +1 for 0-based indexing, +1 for baseline
}
return num_pred_in_one_row;
}
void PredictRaw(const double *features, double *output,
const PredictionEarlyStopInstance *earlyStop) const override;
void PredictRawByMap(const std::unordered_map<int, double> &features, double *output,
const PredictionEarlyStopInstance *early_stop) const override;
void Predict(const double *features, double *output,
const PredictionEarlyStopInstance *earlyStop) const override;
void PredictByMap(const std::unordered_map<int, double> &features, double *output,
const PredictionEarlyStopInstance *early_stop) const override;
void PredictLeafIndex(const double *features, double *output) const override;
void PredictLeafIndexByMap(const std::unordered_map<int, double> &features, double *output) const override;
void PredictContrib(const double *features, double *output) const override;
void PredictContribByMap(const std::unordered_map<int, double> &features,
std::vector<std::unordered_map<int, double>> *output) const override;
/*!
* \brief Dump model to json format string
* \param start_iteration The model will be saved start from
* \param num_iteration Number of iterations that want to dump, -1 means dump all
* \param feature_importance_type Type of feature importance, 0: split, 1: gain
* \return Json format string of model
*/
std::string DumpModel(int start_iteration, int num_iteration,
int feature_importance_type) const override;
/*!
* \brief Translate model to if-else statement
* \param num_iteration Number of iterations that want to translate, -1 means translate all
* \return if-else format codes of model
*/
std::string ModelToIfElse(int num_iteration) const override;
/*!
* \brief Translate model to if-else statement
* \param num_iteration Number of iterations that want to translate, -1 means translate all
* \param filename Filename that want to save to
* \return is_finish Is training finished or not
*/
bool SaveModelToIfElse(int num_iteration, const char *filename) const override;
/*!
* \brief Save model to file
* \param start_iteration The model will be saved start from
* \param num_iterations Number of model that want to save, -1 means save all
* \param feature_importance_type Type of feature importance, 0: split, 1: gain
* \param filename Filename that want to save to
* \return is_finish Is training finished or not
*/
bool SaveModelToFile(int start_iteration, int num_iterations,
int feature_importance_type,
const char *filename) const override;
/*!
* \brief Save model to string
* \param start_iteration The model will be saved start from
* \param num_iterations Number of model that want to save, -1 means save all
* \param feature_importance_type Type of feature importance, 0: split, 1: gain
* \return Non-empty string if succeeded
*/
std::string
SaveModelToString(int start_iteration, int num_iterations, int feature_importance_type) const override;
/*!
* \brief Restore from a serialized buffer
*/
bool LoadModelFromString(const char *buffer, size_t len) override;
/*!
* \brief Calculate feature importances
* \param num_iteration Number of model that want to use for feature importance, -1 means use all
* \param importance_type: 0 for split, 1 for gain
* \return vector of feature_importance
*/
std::vector<double> FeatureImportance(int num_iteration, int importance_type) const override;
/*!
* \brief Calculate upper bound value
* \return upper bound value
*/
double GetUpperBoundValue() const override;
/*!
* \brief Calculate lower bound value
* \return lower bound value
*/
double GetLowerBoundValue() const override;
/*!
* \brief Get max feature index of this model
* \return Max feature index of this model
*/
inline int MaxFeatureIdx() const override { return max_feature_idx_; }
/*!
* \brief Get feature names of this model
* \return Feature names of this model
*/
inline std::vector<std::string> FeatureNames() const override { return feature_names_; }
/*!
* \brief Get index of label column
* \return index of label column
*/
inline int LabelIdx() const override { return label_idx_; }
/*!
* \brief Get number of weak sub-models
* \return Number of weak sub-models
*/
inline int NumberOfTotalModel() const override { return static_cast<int>(models_.size()); }
/*!
* \brief Get number of tree per iteration
* \return number of tree per iteration
*/
inline int NumModelPerIteration() const override { return num_tree_per_iteration_; }
/*!
* \brief Get number of classes
* \return Number of classes
*/
inline int NumberOfClasses() const override { return num_class_; }
inline void InitPredict(int start_iteration, int num_iteration, bool is_pred_contrib) override {
num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
start_iteration = std::max(start_iteration, 0);
start_iteration = std::min(start_iteration, num_iteration_for_pred_);
if (num_iteration > 0) {
num_iteration_for_pred_ = std::min(num_iteration, num_iteration_for_pred_ - start_iteration);
} else {
num_iteration_for_pred_ = num_iteration_for_pred_ - start_iteration;
}
start_iteration_for_pred_ = start_iteration;
if (is_pred_contrib) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < static_cast<int>(models_.size()); ++i) {
models_[i]->RecomputeMaxDepth();
}
}
}
inline double GetLeafValue(int tree_idx, int leaf_idx) const override {
CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size());
CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves());
return models_[tree_idx]->LeafOutput(leaf_idx);
}
inline void SetLeafValue(int tree_idx, int leaf_idx, double val) override {
CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size());
CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves());
models_[tree_idx]->SetLeafOutput(leaf_idx, val);
}
/*!
* \brief Get Type name of this boosting object
*/
const char *SubModelName() const override { return "tree"; }
bool IsLinear() const override { return linear_tree_; }
protected:
virtual bool GetIsConstHessian(const ObjectiveFunction *objective_function) {
if (objective_function != nullptr) {
return objective_function->IsConstantHessian();
} else {
return false;
}
}
/*!
* \brief Print eval result and check early stopping
*/
virtual bool EvalAndCheckEarlyStopping();
/*!
* \brief reset config for bagging
*/
void ResetBaggingConfig(const Config *config, bool is_change_dataset);
/*!
* \brief Implement bagging logic
* \param iter Current interation
*/
virtual void Bagging(int iter);
virtual data_size_t BaggingHelper(data_size_t start, data_size_t cnt,
data_size_t *buffer);
data_size_t BalancedBaggingHelper(data_size_t start, data_size_t cnt,
data_size_t *buffer);
/*!
* \brief calculate the object function
*/
virtual void Boosting();
/*!
* \brief updating score after tree was trained
* \param tree Trained tree of this iteration
* \param cur_tree_id Current tree for multiclass training
*/
virtual void UpdateScore(const Tree *tree, const int cur_tree_id);
/*!
* \brief eval results for one metric
*/
virtual std::vector<double> EvalOneMetric(const Metric *metric, const double *score) const;
/*!
* \brief Print metric result of current iteration
* \param iter Current iteration
* \return best_msg if met early_stopping
*/
std::string OutputMetric(int iter);
double BoostFromAverage(int class_id, bool update_scorer);
/*! \brief current iteration */
int iter_;
/*! \brief Pointer to training data */
const Dataset *train_data_;
/*! \brief Config of gbdt */
std::unique_ptr<Config> config_;
/*! \brief Tree learner, will use this class to learn trees */
std::unique_ptr<TreeLearner> tree_learner_;
/*! \brief Objective function */
const ObjectiveFunction *objective_function_;
/*! \brief Store and update training data's score */
std::unique_ptr<ScoreUpdater> train_score_updater_;
/*! \brief Metrics for training data */
std::vector<const Metric *> training_metrics_;
/*! \brief Store and update validation data's scores */
std::vector<std::unique_ptr<ScoreUpdater>> valid_score_updater_;
/*! \brief Metric for validation data */
std::vector<std::vector<const Metric *>> valid_metrics_;
/*! \brief Number of rounds for early stopping */
int early_stopping_round_;
/*! \brief Only use first metric for early stopping */
bool es_first_metric_only_;
/*! \brief Best iteration(s) for early stopping */
std::vector<std::vector<int>> best_iter_;
/*! \brief Best score(s) for early stopping */
std::vector<std::vector<double>> best_score_;
/*! \brief output message of best iteration */
std::vector<std::vector<std::string>> best_msg_;
/*! \brief Trained models(trees) */
std::vector<std::unique_ptr<Tree>> models_;
/*!
* potential trees
* First index means the index of tree
* Second index means the index of node
* Thrid index means the index of potential tree pointer
*/
std::vector<std::vector<std::vector<Tree>>> potential_models_;
/*! \brief Max feature index of training data*/
int max_feature_idx_;
#ifdef USE_CUDA
/*! \brief First order derivative of training data */
std::vector<score_t, CHAllocator<score_t>> gradients_;
/*! \brief Second order derivative of training data */
std::vector<score_t, CHAllocator<score_t>> hessians_;
#else
/*! \brief First order derivative of training data */
std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> gradients_;
/*! \brief Second order derivative of training data */
std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> hessians_;
#endif
/*! \brief Store the indices of in-bag data */
std::vector<data_size_t, Common::AlignmentAllocator<data_size_t, kAlignedSize>> bag_data_indices_;
/*! \brief Number of in-bag data */
data_size_t bag_data_cnt_;
/*! \brief Number of training data */
data_size_t num_data_;
/*! \brief Number of trees per iterations */
int num_tree_per_iteration_;
/*! \brief Number of class */
int num_class_;
/*! \brief Index of label column */
data_size_t label_idx_;
/*! \brief number of used model */
int num_iteration_for_pred_;
/*! \brief Start iteration of used model */
int start_iteration_for_pred_;
/*! \brief Shrinkage rate for one iteration */
double shrinkage_rate_;
/*! \brief Number of loaded initial models */
int num_init_iteration_;
/*! \brief Feature names */
std::vector<std::string> feature_names_;
std::vector<std::string> feature_infos_;
std::unique_ptr<Dataset> tmp_subset_;
bool is_use_subset_;
std::vector<bool> class_need_train_;
bool is_constant_hessian_;
std::unique_ptr<ObjectiveFunction> loaded_objective_;
bool average_output_;
bool need_re_bagging_;
bool balanced_bagging_;
std::string loaded_parameter_;
std::vector<int8_t> monotone_constraints_;
const int bagging_rand_block_ = 1024;
std::vector<Random> bagging_rands_;
ParallelPartitionRunner<data_size_t, false> bagging_runner_;
Json forced_splits_json_;
bool linear_tree_;
};
} // namespace LightGBM
#endif // LightGBM_BOOSTING_GBDT_H_
|
GB_unaryop__identity_int64_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_int64_uint32
// op(A') function: GB_tran__identity_int64_uint32
// C type: int64_t
// A type: uint32_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
int64_t z = (int64_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_int64_uint32
(
int64_t *Cx, // Cx and Ax may be aliased
uint32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_int64_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
jacobi_omp.c | /*
* Copyright (c) 2008, BSC (Barcelon Supercomputing Center)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the <organization> nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY BSC ''AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <copyright holder> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <math.h>
#include <time.h>
#define NB 128
#define B 64
#define FALSE (0)
#define TRUE (1)
typedef double fp_type;
typedef fp_type *vin;
typedef fp_type *vout;
typedef fp_type *bin;
typedef fp_type *binout;
fp_type *A[NB][NB];
fp_type *A_new[NB][NB];
fp_type *tmp[NB][NB];
void alloc_and_genmat()
{
int init_val, i, j, ii, jj;
fp_type *p, *p_new;
init_val = 1325;
for (ii = 0; ii < NB; ii++)
{
for (jj = 0; jj < NB; jj++)
{
A[ii][jj] = (fp_type *)malloc(B * B * sizeof(fp_type));
A_new[ii][jj] = (fp_type *)malloc(B * B * sizeof(fp_type));
tmp[ii][jj] = (fp_type *)malloc(B * B * sizeof(fp_type));
if (A[ii][jj] == NULL || A_new[ii][jj] == NULL || tmp[ii][jj] == NULL)
{
printf("Out of memory\n");
exit(1);
}
p = A[ii][jj];
p_new = A_new[ii][jj];
for (i = 0; i < B; i++)
{
for (j = 0; j < B; j++)
{
init_val = (3125 * init_val) % 65536;
(*p) = (fp_type)((init_val - 32768.0) / 16384.0);
(*p_new) = (*p);
p++;
p_new++;
}
}
}
}
}
long usecs(void)
{
struct timeval t;
gettimeofday(&t, NULL);
return t.tv_sec * 1000000 + t.tv_usec;
}
void clear(vout v)
{
int i, j, k;
for (i = 0; i < B; i++)
v[i] = (fp_type)0.0;
}
void getlastrow(bin A, vout v)
{
int j;
for (j = 0; j < B; j++)
v[j] = A[(B - 1) * B + j];
}
void getlastcol(bin A, vout v)
{
int i;
for (i = 0; i < B; i++)
v[i] = A[i * B + B - 1];
}
void getfirstrow(bin A, vout v)
{
int j;
for (j = 0; j < B; j++)
v[j] = A[0 * B + j];
}
void getfirstcol(bin A, vout v)
{
int i;
for (i = 0; i < B; i++)
v[i] = A[i * B + 0];
}
void jacobi(vin lefthalo, vin tophalo, vin righthalo, vin bottomhalo, bin A, binout A_new)
{
int i, j;
fp_type tmp;
fp_type left, top, right, bottom;
for (i = 0; (i < B); i++)
{
for (j = 0; j < B; j++)
{
tmp = A[i * B + j];
left = (j == 0 ? lefthalo[j] : A[i * B + j - 1]);
top = (i == 0 ? tophalo[i] : A[(i - 1) * B + j]);
right = (j == B - 1 ? righthalo[i] : A[i * B + j + 1]);
bottom = (i == B - 1 ? bottomhalo[i] : A[(i + 1) * B + j]);
A_new[i * B + j] = 0.2 * (A[i * B + j] + left + top + right + bottom);
}
}
}
double maxdelta()
{
double dmax = -__DBL_MAX__;
int ii, jj, i, j;
#pragma omp parallel for schedule(static) reduction(max: dmax)
for (ii = 0; ii < NB; ii++)
{
for (jj = 0; jj < NB; jj++)
{
for (i = 0; (i < B); i++)
{
for (j = 0; j < B; j++)
{
double diff = fabs(A_new[ii][jj][i * B + j] - A[ii][jj][i * B + j]);
if(diff > dmax) dmax = diff;
}
}
}
}
return dmax;
}
void compute(int niters)
{
int iters;
int ii, jj;
fp_type lefthalo[B], tophalo[B], righthalo[B], bottomhalo[B];
double delta = 2.0;
double epsilon = 1e-7;
iters = 0;
// for (iters = 0; iters < niters; iters++)
while(iters < niters)
{
++iters;
#pragma omp parallel \
private(ii, jj, lefthalo, tophalo, righthalo, bottomhalo) \
shared(A, A_new)
{
#pragma omp for schedule(static)
for (ii = 0; ii < NB; ii++)
{
for (jj = 0; jj < NB; jj++)
{
if (ii > 0)
getlastrow(A[ii - 1][jj], tophalo);
else
clear(tophalo);
if (jj > 0)
getlastcol(A[ii][jj - 1], lefthalo);
else
clear(lefthalo);
if (ii < NB - 1)
getfirstrow(A[ii + 1][jj], bottomhalo);
else
clear(bottomhalo);
if (jj < NB - 1)
getfirstcol(A[ii][jj + 1], righthalo);
else
clear(lefthalo);
jacobi(lefthalo, tophalo, righthalo, bottomhalo, A[ii][jj], A_new[ii][jj]);
} // jj
} // ii
} // end parallel
delta = maxdelta();
printf("iteration %d: delta = %e\n", iters, delta);
// yes, this is an inefficient copy
// however, the library version requires you to do a copy in this way
// on all of the component parts to avoid segmentation fault
#pragma omp parallel for schedule(static) shared(A, A_new)
for(int i = 0; i < NB; ++i)
{
for(int j = 0; j < NB; ++j)
{
for(int k = 0; k < B; ++k)
for(int l = 0; l < B; ++l)
A[i][j][k * B + l] = A_new[i][j][k * B + l];
}
}
} // iter
}
int main(int argc, char *argv[])
{
int niters;
// pp_time_t tm;
// memset( &tm, 0, sizeof(tm) );
struct timespec start, end;
if (argc > 1)
{
niters = atoi(argv[1]);
}
else
niters = 1;
alloc_and_genmat();
clock_gettime(CLOCK_MONOTONIC, &start);
compute(niters);
clock_gettime(CLOCK_MONOTONIC, &end);
double time_taken = (end.tv_sec - start.tv_sec) * 1e9;
time_taken = (time_taken + (end.tv_nsec - start.tv_nsec)) * 1e-9;
printf("Running time = %g %s\n", time_taken, "s");
/* FILE *outFile;
outFile = fopen("./jacobi_omp_values.txt", "w");
if (outFile == NULL)
{
fprintf(stderr, "Error writing to file\n");
}
else
{
int ii, jj, i, j;
for (ii = 0; ii < NB; ++ii)
for (jj = 0; jj < NB; ++jj)
for (i = 0; i < B; ++i)
for (j = 0; j < B; ++j)
fprintf(outFile, "%.15f\n", A[ii][jj][i * B + j]);
fclose(outFile);
} */
return 0;
} |
qdamc.c | /*
* Monte Carlo simulation, first draft from Matlab source
*
* we use NAN to represent the sqrt(-1) flag present in the matlab QDAMC code
*
* Created on May 26, 2011, 10:51 AM
*/
#include "qdamc.h"
#include "qdamc_utils.h"
#include "../ecodes.h"
#include "../dSFMT-2.1/dSFMT.h"
#ifdef DO_PAPI
#include "papi.h"
#endif
#ifdef DO_OMP
#include "omp.h"
#endif
#ifdef DO_MPI
#include "mpi.h"
#endif
#define CACHE_LINE_SIZE 64
/* Main Function */
int run(int rank) {
//stores user input
long long NP = opt.NP; // Number of particles
pfloat sysLeng = opt.sysLen; // Length of system
int numCells = opt.numCells; // Number of cells in system
pfloat tol_std = opt.tol_std; // Tolerance for standard deviation calculations
int runningWidth = opt.runWidth; // Number of iterations to skip before starting averaging
long long NPc = (long long)((pfloat)NP / numCells); // number of particles in cell
long long NP_tot = NP; //total number of particles for the QDAMC
int nprocs;
#ifdef DO_MPI
MPI_Comm_size(MPI_COMM_WORLD,&nprocs);
#else
nprocs = 1;
#endif
//Initialization of vital simulation parameters
pfloat dx = sysLeng / numCells; //cell width
pfloat eps = 0.001; //the threshold angle for tallying
pfloat sig_t = opt.sig_t; //Total cross-section
pfloat sig_s = opt.sig_s; //Scattering cross-section
pfloat D = 1.0 / (3 * sig_t ); //diffusion coefficient
//The source term uniform for now
pfloat * q0 = NULL;
int i;
if( rank == 0 ) {
q0 = (pfloat *) malloc(sizeof (pfloat) * numCells);
for (i = 0; i < numCells; i++) {
q0[i] = 1.0f;
}
}
pfloat q0_lo = 1.0f;
//Form data structure for simulation parameters
struct data data;
data.NP = NP; //Number of Particles
data.lx = sysLeng; //Length of System
data.nx = numCells; //Number of Cells
data.dx = dx; //Cell Width
data.dx2 = dx*dx; // Cell Width SQUARED
data.dx_recip = 1.0/dx; // Pre-computed reciprocal
data.sig_t = sig_t; //Total Collision Cross-Section
data.sig_t_recip = 1.0/sig_t; // Pre-computed reciprocal
data.sig_s = sig_s; //Scattering Collision Cross-Section
data.q0 = q0; //Source term per cell ( Uniform )
data.eps = eps; //Threshold Angle for Tallying
data.q0_lo = q0_lo; //Source Term
data.D = D; //Diffusion Coefficient
data.NP_tot = NP_tot; // Total Number of Particles
data.NPc = NPc; // Number of Particles per Cell
//Initialize the average values
int iter = 0; //tally for iteration for convergence
int iter_avg; // iteration for averaging
if( runningWidth == 0 )
iter_avg = 0;
else
iter_avg = 1;
afloat phi_left_tot = 0.0; //tally for average phi on left wall
afloat phi_right_tot = 0.0; //tally for average phi on right wall
afloat J_left_tot = 0.0; //tally for average J on left wall
afloat J_right_tot = 0.0; //tally for average J on right wall
afloat E_left_tot = 0.0; //tally for average E on left wall
afloat E_right_tot = 0.0; //tally for average E on right wall
// Tally for averaging the parameters above
pfloat phi_left_avg = 0.0;
pfloat phi_right_avg = 0.0;
pfloat J_left_avg = 0.0;
pfloat J_right_avg = 0.0;
pfloat E_left_avg = 0.0;
pfloat E_right_avg = 0.0;
//time keeping structures
struct timeval start_time, end_time, startPerIter, endPerIter;
afloat * phi_n_tot = NULL; //tally of phi from the higher order
afloat * phiS2_n_tot = NULL;
afloat * phi_lo_tot = NULL; //tally of phi from the LO solver
afloat * E_n_tot = NULL; //Tally of eddington tensor from higher order
pfloat * phi_n_avg = NULL; //Average of phi from higher order
pfloat * E_n_avg = NULL; //Average of eddington tensor from higher order
pfloat * phi_lo_avg = NULL; //Average of eddington tensor from higher order
pfloat * E_ho_n = NULL; //Values of eddington tensor from one iteration
pfloat * phi_lo = NULL; //Values of phi returned by lower order solver
triMat A_lo; //Data structure to hold the tri-daiagonal matrix of the lower order sovler
pfloat * b_lo = NULL; //Array to hold the right hand side of the lower order sovler
pfloat * Qbar = NULL; //Input to the higher order system
#if defined(L2_1) || defined(L2_2) || defined(L2_3)
pfloat * anal_soln = NULL; //Array to hold the analytical solution
pfloat l2;
#else
meanDev * phi_nStats = NULL; //Values returned from mean and std dev calculations
unsigned long samp_cnt = 0; //total history tally
#endif
//ALlocate data structure for averaging on process 0 only
if(rank == 0){
phi_n_tot = (afloat *) calloc(numCells, sizeof (afloat));
phiS2_n_tot = (afloat*) calloc(numCells, sizeof(afloat));
phi_lo_tot = (afloat *) calloc(numCells, sizeof (afloat));
E_n_tot = (afloat *) calloc(numCells, sizeof (afloat));
phi_n_avg = (pfloat *) calloc(numCells, sizeof (pfloat));
E_n_avg = (pfloat *) calloc(numCells, sizeof (pfloat));
phi_lo_avg = (pfloat *) calloc(numCells, sizeof (pfloat));
// Initialize lower order scalar flux using diffusion approximation
E_ho_n = (pfloat *) malloc(sizeof (pfloat) * numCells);
for (i = 0; i < numCells; i++) {
E_ho_n[i] = 1.0f / 3.0f;
}
phi_lo = (pfloat*) malloc(data.nx*sizeof(pfloat));
A_lo.a = (pfloat*) malloc(sizeof(pfloat)*(data.nx-1));
A_lo.b = (pfloat*) malloc(sizeof(pfloat)*data.nx);
A_lo.c = (pfloat*) malloc(sizeof(pfloat)*(data.nx-1));
b_lo = (pfloat*) malloc(sizeof(pfloat)*data.nx);
#if defined(L2_1) || defined(L2_2) || defined(L3_2)
anal_soln = (pfloat *) calloc(numCells, sizeof(pfloat));
#if defined(L2_1)
init_L2_norm_1(anal_soln);
#elif defined(L2_2)
init_L2_norm_2(anal_soln);
#elif defined(L2_3)
init_L2_norm_3(anal_soln);
#endif
#else
phi_nStats = (meanDev*) malloc(sizeof(meanDev)*opt.numCells);
#endif
}
/************************************************
* Calculation of Initial Condition
* TODO account for isotropic and beam source
*************************************************/
int flag_sim = 0;
Qbar = malloc(sizeof (pfloat) * numCells);
//Tallies per threads
colData * thread_tallies;
#ifdef DO_OMP
int max_threads = omp_get_max_threads();
#else
int max_threads = 1;
#endif
thread_tallies = (colData*) malloc(max_threads*sizeof(colData));
//Structure to hold the random number generator data structures for each thread
dsfmt_t** thread_rand;
thread_rand = (dsfmt_t**)calloc(max_threads,sizeof(dsfmt_t*));
//Allocating data structures to be used by each thread
for( i = 0 ; i < max_threads ; i++ ) {
//Allocate all data that is written by the thread to cache line size
posix_memalign( (void**)&(thread_tallies[i].phi_n), CACHE_LINE_SIZE, numCells*sizeof(afloat));
posix_memalign( (void**)&(thread_tallies[i].phi_n2), CACHE_LINE_SIZE, numCells*sizeof(afloat));
posix_memalign( (void**)&(thread_tallies[i].E_n), CACHE_LINE_SIZE, numCells*sizeof(afloat));
posix_memalign( (void**)(thread_rand+i), CACHE_LINE_SIZE, sizeof(dsfmt_t));
}
//Tallies across all processes.
colData * all_tallies;
//For MPI, allocate the finall tally on process 0
#ifdef DO_MPI
all_tallies = (colData*) malloc(sizeof(colData));
if( rank == 0 ) {
all_tallies->phi_n = (afloat*) malloc(numCells*sizeof(afloat));
all_tallies->phi_n2 = (afloat*) malloc(numCells*sizeof(afloat));
all_tallies->E_n = (afloat*) malloc(numCells*sizeof(afloat));
}
else{
all_tallies->phi_n = NULL;
all_tallies->phi_n2 = NULL;
all_tallies->E_n = NULL;
}
#else
//Otherwise alias all_tallies to tallies of thread 0.
all_tallies = &(thread_tallies[0]);
#endif
//Calculation starts here
//START TIMING
gettimeofday(&start_time, NULL);
if( rank == 0){
//Plot and print initial condition as well as simulation parameters
if (!opt.silent) sim_param_print(data);
//calls low order solver for initial condition
lo_solver(data, 0.0, 0.0, 0.0, 0.0, 1.0f / 3.0f, 1.0f / 3.0f, E_ho_n, 1,phi_lo,A_lo,b_lo);
free(E_ho_n);
}
#ifdef DO_OMP
#pragma omp parallel default(shared) private(iter,i)
{
int nthreads = omp_get_num_threads();
#else
int nthreads = 1;
#endif
iter = 0; //tally for iteration for convergence
int nstages = 0;
i = 1;
while( i < nthreads )
{
nstages++;
i <<= 1 ;
}
#ifdef DO_OMP
int thread_id = omp_get_thread_num();
#else
int thread_id = 0;
#endif
// MT RNG
dsfmt_init_gen_rand(thread_rand[thread_id], (int)time(NULL) + thread_id);
#ifdef DO_PAPI
PAPI_library_init(PAPI_VER_CURRENT);
int EventSet = PAPI_NULL;
long long start[4],stop[4];
PAPI_create_eventset(&EventSet);
PAPI_add_event(EventSet,PAPI_TOT_CYC);
PAPI_add_event(EventSet,PAPI_TOT_INS);
PAPI_add_event(EventSet,PAPI_FP_OPS);
PAPI_add_event(EventSet,PAPI_FP_INS);
PAPI_start(EventSet);
PAPI_read(EventSet,start);
#endif
// while not converged and total number of iterations < opt.numiters
while (flag_sim == 0 && iter < opt.numIters) {
//start time for iteration
gettimeofday(&startPerIter, NULL);
iter += 1;
if(!opt.silent && rank == 0 && thread_id == 0 ) printf("This is the %dth iteration\n",iter);
if( rank == 0 && thread_id == 0 ){
if( iter == 1 )
for (i = 0; i < numCells; i++) {
Qbar[i] = fabs(q0[i] + sig_s * phi_lo[i]) * data.lx;
}
else
for (i = 0; i < numCells; i++) {
Qbar[i] = fabs(q0[i] + sig_s * phi_lo_avg[i]) * data.lx;
}
}
#ifdef DO_MPI
if( thread_id == 0 ){
MPI_Bcast((void*)Qbar,numCells,MPI_DOUBLE,0,MPI_COMM_WORLD);
}
#endif
#ifdef DO_OMP
#pragma omp barrier
#endif
//Simulate the Higher Order System using Monte Carlo
collision_and_tally(data, Qbar,thread_tallies+ thread_id,thread_id,nthreads,thread_rand[thread_id],rank,nprocs);
#ifdef DO_OMP
//COMBINE TALLIES HERE - TREE REDUCTION
int factor = 1;
int k;
for( i = 0 ; i < nstages ; i++ )
{
#pragma omp barrier
if( ( (thread_id % (factor << 1) ) == 0 ) && ( thread_id + factor < nthreads ) )
{
for( k = 0 ; k < numCells ; k++ )
{
thread_tallies[thread_id].phi_n[k] += thread_tallies[thread_id+factor].phi_n[k];
thread_tallies[thread_id].phi_n2[k] += thread_tallies[thread_id+factor].phi_n2[k];
thread_tallies[thread_id].E_n[k] += thread_tallies[thread_id+factor].E_n[k];
}
thread_tallies[thread_id].phi_left += thread_tallies[thread_id+factor].phi_left;
thread_tallies[thread_id].phi_right += thread_tallies[thread_id+factor].phi_right;
thread_tallies[thread_id].J_left += thread_tallies[thread_id+factor].J_left;
thread_tallies[thread_id].J_right += thread_tallies[thread_id+factor].J_right;
thread_tallies[thread_id].E_left += thread_tallies[thread_id+factor].E_left;
thread_tallies[thread_id].E_right += thread_tallies[thread_id+factor].E_right;
}
factor <<= 1;
}
#endif
#ifdef DO_MPI
//AFTER THREAD reduction, reduction across MPI processes
if( thread_id == 0 )
{
MPI_Reduce((void*)thread_tallies[0].phi_n,(void*)(all_tallies->phi_n),numCells,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD);
MPI_Reduce((void*)thread_tallies[0].phi_n2,(void*)(all_tallies->phi_n2),numCells,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD);
MPI_Reduce((void*)thread_tallies[0].E_n,(void*)(all_tallies->E_n),numCells,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD);
MPI_Reduce((void*)&(thread_tallies[0].phi_left),(void*)&(all_tallies->phi_left),1,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD);
MPI_Reduce((void*)&(thread_tallies[0].phi_right),(void*)&(all_tallies->phi_right),1,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD);
MPI_Reduce((void*)&(thread_tallies[0].J_left),(void*)&(all_tallies->J_left),1,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD);
MPI_Reduce((void*)&(thread_tallies[0].J_right),(void*)&(all_tallies->J_right),1,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD);
MPI_Reduce((void*)&(thread_tallies[0].E_left),(void*)&(all_tallies->E_left),1,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD);
MPI_Reduce((void*)&(thread_tallies[0].E_right),(void*)&(all_tallies->E_right),1,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD);
}
#endif
if( rank == 0 && thread_id == 0 ){
//Normalize all the tallied data
if (!floatEquals(all_tallies->phi_left, 0.0, FLOAT_TOL)) {
all_tallies->E_left /= all_tallies->phi_left;
} else {
all_tallies->E_left = (1.0 / 3.0);
}
if (!floatEquals(all_tallies->phi_right, 0.0, FLOAT_TOL)) {
all_tallies->E_right /= all_tallies->phi_right;
} else {
all_tallies->E_right = (1.0 / 3.0);
}
all_tallies->phi_left /= (afloat)NP;
all_tallies->J_left /= (afloat)NP;
all_tallies->phi_right /= (afloat)NP;
all_tallies->J_right /= (afloat)NP;
for (i = 0; i < numCells; i++) {
if (!floatEquals(all_tallies->phi_n[i], 0.0, FLOAT_TOL)) {
all_tallies->E_n[i] /= all_tallies->phi_n[i];
} else {
all_tallies->E_n[i] = (1.0 / 3.0);
}
all_tallies->phi_n[i] /= (afloat)(dx*NP);
}
/***************************************************
* Calculates the averages
**************************************************/
if( iter <= runningWidth )
{
for (i = 0; i < numCells; i++) {
phi_n_tot[i] = all_tallies->phi_n[i];
phiS2_n_tot[i] = all_tallies->phi_n2[i];
phi_lo_tot[i] = phi_lo[i];
E_n_tot[i] = all_tallies->E_n[i];
}
phi_left_tot = all_tallies->phi_left;
phi_right_tot = all_tallies->phi_right;
E_left_tot = all_tallies->E_left;
E_right_tot = all_tallies->E_right;
J_left_tot = all_tallies->J_left;
J_right_tot = all_tallies->J_right;
// for each cell, calculate the average for phi_n, phi_lo and E_n
for (i = 0; i < numCells; i++) {
phi_n_avg[i] = phi_n_tot[i] ;
phi_lo_avg[i] = phi_lo_tot[i];
E_n_avg[i] = E_n_tot[i] ;
}
// calculate the average for left and right faces of phi, E and J
phi_left_avg = phi_left_tot;
phi_right_avg = phi_right_tot;
E_left_avg = E_left_tot ;
E_right_avg = E_right_tot ;
J_left_avg = J_left_tot ;
J_right_avg = J_right_tot;
}
else{
iter_avg += 1;
//accumulate phase -- add new data to the running averages
for (i = 0; i < numCells; i++) {
phi_n_tot[i] += all_tallies->phi_n[i];
phiS2_n_tot[i] += all_tallies->phi_n2[i];
phi_lo_tot[i] += phi_lo[i];
E_n_tot[i] += all_tallies->E_n[i];
}
phi_left_tot += all_tallies->phi_left;
phi_right_tot += all_tallies->phi_right;
E_left_tot += all_tallies->E_left;
E_right_tot += all_tallies->E_right;
J_left_tot += all_tallies->J_left;
J_right_tot += all_tallies->J_right;
// for each cell, calculate the average for phi_n, phi_lo and E_n
for (i = 0; i < numCells; i++) {
phi_n_avg[i] = phi_n_tot[i] / iter_avg;
phi_lo_avg[i] = phi_lo_tot[i] / iter_avg;
E_n_avg[i] = E_n_tot[i] / iter_avg;
}
// calculate the average for left and right faces of phi, E and J
phi_left_avg = phi_left_tot / iter_avg;
phi_right_avg = phi_right_tot / iter_avg;
E_left_avg = E_left_tot / iter_avg;
E_right_avg = E_right_tot / iter_avg;
J_left_avg = J_left_tot / iter_avg;
J_right_avg = J_right_tot / iter_avg;
//check for convergence
#if !defined(L2_1) && !defined(L2_2) && !defined(L2_3)
// for each cell, do adds for phi_ho_s1_n and phi_ho_s2_n
samp_cnt = NP_tot * ((long) iter_avg);
mean_std_calc(phi_n_tot, phiS2_n_tot, samp_cnt, opt.NP, opt.numCells, opt.dx, phi_nStats);
printf("The maximum standard deviation of flux at node is, max (sig_phi) = %f\n", maxFAS(&phi_nStats[0].stdDev, numCells, 2));
if (maxFAS(&phi_nStats[0].stdDev, numCells, 2) <= tol_std) {
flag_sim = 1;
}
#else
l2 = l2_norm_cmp(phi_n_avg, anal_soln, numCells, dx);
flag_sim = l2 <= tol_std;
if(!opt.silent && rank == 0){
gettimeofday(&end_time, NULL);
printf("L2: %f, Sofar: %ldu_sec\n", l2, (end_time.tv_sec - start_time.tv_sec)*1000000 + (end_time.tv_usec - start_time.tv_usec));
}
#endif
}
//Solve the lower order system using discretizion methods
lo_solver(data, phi_left_avg, phi_right_avg, J_left_avg, J_right_avg, E_left_avg, E_right_avg, E_n_avg, 0, phi_lo, A_lo,b_lo);
}
#ifdef DO_MPI
if( thread_id == 0 )
MPI_Bcast(&flag_sim,1,MPI_INT,0,MPI_COMM_WORLD);
#endif
#ifdef DO_OMP
#pragma omp barrier
#endif
//end time per iteration
gettimeofday(&endPerIter, NULL);
//printf("ID = %d, thread_id = %d, Time per Iteration: %ldu_sec\n\n",rank, thread_id, (endPerIter.tv_sec - startPerIter.tv_sec)*1000000 + (endPerIter.tv_usec - startPerIter.tv_usec));
}
#ifdef DO_PAPI
PAPI_read(EventSet,stop);
printf("%lld %lld %lld %lld\n",stop[0] - start[0],stop[1] - start[1],stop[2] - start[2],stop[3] - start[3]);
PAPI_cleanup_eventset(EventSet);
PAPI_destroy_eventset(&EventSet);
#endif
free(thread_tallies[thread_id].phi_n);
free(thread_tallies[thread_id].phi_n2);
free(thread_tallies[thread_id].E_n);
#ifdef DO_OMP
}
#endif
/************************************************
* Free memory
*************************************************/
//PAUL END TIMING
gettimeofday(&end_time, NULL);
free(thread_tallies);
free(thread_rand);
if( rank == 0 ){
if( !opt.silent ){
printf("NODEAVG\n");
for (i = 0; i < numCells; i++) {
printf("%d %f %f %f\n", i, phi_n_avg[i], E_n_avg[i], phi_lo_avg[i]);
}
}
printf("Elapsed Time: %ldu_sec\n", (end_time.tv_sec - start_time.tv_sec)*1000000 + (end_time.tv_usec - start_time.tv_usec));
free(A_lo.a);
free(A_lo.b);
free(A_lo.c);
free(b_lo);
free(phi_lo);
#ifdef DO_MPI
free(all_tallies->phi_n);
free(all_tallies->phi_n2);
free(all_tallies->E_n);
#endif
free(Qbar);
free(q0);
free(phi_n_tot);
free(phi_lo_tot);
free(E_n_tot);
free(phi_n_avg);
free(E_n_avg);
free(phi_lo_avg);
#if defined(L2_1) || defined(L2_2) || defined(L2_3)
free(anal_soln);
#else
free(phi_nStats);
#endif
}
#ifdef DO_MPI
free(all_tallies);
#endif
//PAUL OUTPUT SECTION
return (EXIT_SUCCESS);
}
/*********************************************************************************************
* sim_param_print
*
* Function that prints the initialization data and simulation parameters
*
* @param data data structure that holds initialization and simulation parameters
*
* @return void
*
* TODO better naming convention for data and struct data
**********************************************************************************************/
void sim_param_print(struct data data) {
pfloat lx = data.lx;
pfloat dx = data.dx;
int nx = data.nx;
int NP = data.NP;
pfloat sig_t = data.sig_t;
pfloat sig_s = data.sig_s;
printf("***********************************************************\n");
printf("******THE SIMULATION PARAMETERS ARE PRINTED OUT BELOW******\n");
printf("The system length is, lx = %f\n", lx);
printf("The cell width is, dx = %f\n", dx);
printf("The number of cell is, nx = %d\n", nx);
printf("The reference number of particle is, NP = %d\n", NP);
printf("The total cross section is, sig_t = %f\n", sig_t);
printf("The scattering cross section is, sig_s = %f\n", sig_s);
printf("Floating point data representation is, %lu byte\n", sizeof (pfloat));
printf("***********************************************************\n");
}
/******************************************************************************************************
* lo_solver
*
* Function call that does the low order solve for a system of equations
*
* @param data data structure that holds initialization and simulation parameters
* @param phi_ho_left
* @param phi_ho_right
* @param J_ho_left
* @param J_ho_right
* @param E_ho_left
* @param E_ho_right
* @param E_ho_n
* @param ic flag for initial condition or not
*
* @return array of floats/doubles
*
* TODO naming conventions should be more abstract since it is used with a variety of input variables
*****************************************************************************************************/
void lo_solver(struct data data, pfloat phi_ho_left, pfloat phi_ho_right, pfloat J_ho_left, pfloat J_ho_right, pfloat E_ho_left, pfloat E_ho_right, pfloat * E_ho_n, int ic, pfloat* phi_lo,triMat A, pfloat* b) {
int i;
//Initialize simulation parameters
int nx = data.nx;
pfloat dx = data.dx;
pfloat sig_t = data.sig_t;
pfloat sig_s = data.sig_s;
pfloat alp = 1.0f / (dx * dx * sig_t);
pfloat beta = sig_t - sig_s;
pfloat gam = sig_s / (dx * sig_t * sig_t);
pfloat D4 = data.D * 4.0f;
pfloat q0_lo = data.q0_lo;
/* pfloat * b = (pfloat *) malloc(sizeof (pfloat) * nx); */
for (i = 0; i < nx; i++) {
b[i] = q0_lo;
}
if (ic == 0) { //not initial condition
//declare the boundary values for the lo system
pfloat phi_l = E_ho_n[0] / (E_ho_left - J_ho_left / (phi_ho_left * 2.0f * gam));
pfloat phi_r = E_ho_n[nx - 1] / (E_ho_right + J_ho_right / (phi_ho_right * 2.0f * gam));
for (i = 0; i < nx; i++) {
if (i == 0) { //left boundary cell
A.b[i] = beta + (gam * E_ho_n[0] * phi_ho_left - phi_l * J_ho_left) / (phi_ho_left * dx);
A.c[i] = -gam * E_ho_n[1] / dx;
} else if (i == nx - 1) { //right boundary cell
A.b[i] = beta + (gam * E_ho_n[nx - 1] * phi_ho_right + phi_r * J_ho_right) / (phi_ho_right * dx);
A.a[i - 1] = -gam * E_ho_n[nx - 2] / dx;
} else { //internal cell
A.b[i] = 2.0f * alp * E_ho_n[i] + beta;
A.c[i] = -alp * E_ho_n[i + 1];
A.a[i - 1] = -alp * E_ho_n[i - 1];
}
}
} else { //initial condition
//declare the boundary values for the lo system
pfloat phi_l = (D4 / (dx + D4));
pfloat phi_r = (D4 / (dx + D4));
for (i = 0; i < nx; i++) {
if (i == 0) { //left boundary cell
A.b[i] = beta + alp * (2.0f * E_ho_left * phi_l - 3.0f * E_ho_n[0]);
A.c[i] = alp * E_ho_n[1];
} else if (i == nx - 1) { //right boundary cell
A.b[i] = beta + alp * (2.0f * E_ho_right * phi_r - 3.0f * E_ho_n[nx - 1]);
A.a[i - 1] = alp * E_ho_n[nx - 2];
} else { //internal cell
A.b[i] = 2.0f * alp * E_ho_n[i] + beta;
A.c[i] = -alp * E_ho_n[i + 1];
A.a[i - 1] = -alp * E_ho_n[i - 1];
}
}
}
triSolve(nx, A.a, A.b, A.c, b, phi_lo);
}
/******************************************************************************************************
* triSolve
*
* Function the does a tridiagonal matrix solve
* Entirely from http://en.wikipedia.org/wiki/Tridiagonal_matrix_algorithm
* Other than changing to pre-c99 for loops, and switching floats for floats
*
* @param n number of equations
* @param a sub-diagonal (means it is the diagonal below the main diagonal)
* @param b the main diagonal
* @param c sup-diagonal (means it is the diagonal above the main diagonal)
* @param v right part
* @param x the answer
*
* @return void
*****************************************************************************************************/
void triSolve(int n, pfloat *a, pfloat *b, pfloat *c, pfloat *v, pfloat *x) {
int i;
for (i = 1; i < n; i++) {
pfloat m = a[i - 1] / b[i - 1];
b[i] = b[i] - m * c[i - 1];
v[i] = v[i] - m * v[i - 1];
}
x[n - 1] = v[n - 1] / b[n - 1];
for (i = n - 2; i >= 0; i--)
x[i] = (v[i] - c[i] * x[i + 1]) / b[i];
}
/**********************************************************************************************************
* collision_and_tally
*
*
*
* @param data
* @param Qbar
*
* @return colData
*
* TODO modularize and more optimized parallel reduction and/or alternate way to get data
*********************************************************************************************************/
void collision_and_tally(struct data data, pfloat * Qbar, colData * tallies, int thread_id, int nthreads, dsfmt_t * dsfmt, int rank, int nprocs) {
//initialize simulation parameters
int nx = data.nx;
int NP = data.NP;
//stream the particles, collide and calculate moment all simultaneously
//calculate the final location of the particle
//initialize tallies and corresponding variables
memset(tallies->phi_n,0,sizeof(afloat)*nx);
memset(tallies->phi_n2,0,sizeof(afloat)*nx);
memset(tallies->E_n,0,sizeof(afloat)*nx);
tallies->E_left = 0.0;
tallies->J_left = 0.0;
tallies->phi_left = 0.0;
tallies->E_right = 0.0;
tallies->J_right = 0.0;
tallies->phi_right = 0.0;
struct timeval startBoth, endBoth;
int localnx,localstart;
//Each process spawns particles in one cell of the domain and streams it. Divide up the cells among all threads on all the MPI processes more or less evenly
#ifdef DO_MPI
int lower_proc = nx/nprocs;
int higher_proc = lower_proc + 1;
int switch_proc = higher_proc * nprocs - nx;
if( rank < switch_proc )
{
localstart = lower_proc * rank;
localnx = lower_proc;
}
else
{
localstart = lower_proc * switch_proc + ( rank - switch_proc ) * higher_proc;
localnx = higher_proc;
}
#else
localnx = nx;
localstart = 0;
#endif
int start_val,end_val;
#ifdef DO_OMP
int local_lower = localnx / nthreads;
int local_upper = local_lower + 1;
int switch_thread = local_upper * nthreads - localnx;
if( thread_id < switch_thread )
{
start_val = localstart + thread_id * local_lower;
end_val = start_val + local_lower;
}
else
{
start_val = localstart + switch_thread * local_lower + ( thread_id - switch_thread ) * local_upper ;
end_val = start_val + local_upper;
}
#else
start_val = localstart;
end_val = localstart + localnx;
#endif
if (NP != 0) {
//start time for montecarlo
//gettimeofday(&startBoth, NULL);
gen_particles_and_tally(data, Qbar, tallies, start_val, end_val, dsfmt);
//end time for montecarlo
//gettimeofday(&endBoth, NULL);
//printf("Time for Monte Carlo per Iteration: %ld u_sec\n", (endBoth.tv_sec - startBoth.tv_sec)*1000000 + (endBoth.tv_usec - startBoth.tv_usec));
}
}
//Function to spawn particles and stream them
//INPUT: 1) data : the physical parameters of the system
// 2) The source term per cell
// 3) Pointer to structure that holds the tallies
// 4) The number of the cell the thread starts spawning particles in
// 5) The number of the cell the threads stops spawning particles in
void gen_particles_and_tally(struct data data, pfloat *Qbar, colData *tallies, int start_iter_cell, int end_iter_cell, dsfmt_t * dsfmt){
const pfloat lx = data.lx, eps = data.eps; //System length and cut off value for mu
const int numCells = data.nx;
const pfloat sig_t_recip = data.sig_t_recip;
const pfloat cellsize = data.dx, cellsize_recip = data.dx_recip, cellsizeS2 = data.dx2; //size of cell, its reciprocal and its square
const long long NPc = data.NPc; //Number of particles per cell
//Temporaries
pfloat fx, mu, efmu, efmu_recip, start, end;
pfloat weight;
unsigned int num_right;
int i, j, k;
pfloat efmu_recipS2;
pfloat weightS2;
pfloat absdist;
int startcell, endcell;
pfloat begin_rightface, end_leftface;
pfloat weight_efmu, weight_efmu_recip, weightS2_efmu_recipS2, weight_cellsize;
pfloat weight_cellsize_efmu_recip, weightS2_cellsizeS2_efmu_recipS2, weight_cellsize_efmu;
#ifndef NDEBUG
pfloat fx_avg = 0.0;
long long NP = data.NP;
#endif
/* create and tally all particles */
for (i = start_iter_cell; i < end_iter_cell; i++) {
weight = Qbar[i]; // weight of each particle
weightS2 = weight*weight; // for STDEV
// Half the particles are going to the right
num_right = NPc>>1;
// Stream right-moving particles
for (j = 0; j < num_right; j++) {
// find angle, distance travelled, and absolute start and end positions
mu = unirandMT_0_1(dsfmt);
fx = (-log(unirandMT_0_1(dsfmt)) * sig_t_recip);
start = (i + unirandMT_0_1(dsfmt)) * cellsize;
end = start + mu * fx;
#ifndef NDEBUG
fx_avg += fx;
#endif
efmu = (mu > eps) ? mu : eps/2; /* if mu is too small, replace it with eps/2 */
efmu_recip = 1.0 / efmu; /* reciprocal */
efmu_recipS2 = efmu_recip*efmu_recip; /* for STDEV */
/* pre-compute repeatedly used measurements*/
weight_efmu_recip = weight*efmu_recip;
weight_efmu = weight*efmu;
weight_cellsize = weight*cellsize;
weightS2_efmu_recipS2 = weightS2*efmu_recipS2;
/* corner-case analysis, right-moving particles */
// left-most cell is the starting cell
startcell = i;
// right-most cell is the ending cell.
//If particles streams out
if (end >= lx) {
end = lx;
endcell = numCells-1;
// for tallying, record this corner case
tallies->phi_right += weight_efmu_recip; //weight * efmu_recip;//
tallies->E_right += weight_efmu; //weight * efmu;//
tallies->J_right += weight;
}
else {
endcell = (int)(end * cellsize_recip);
}
// tally up the information from the corner cells
if (startcell == endcell){
/* tally once, with difference of particles */
absdist = (end-start);
tallies->phi_n[startcell] += absdist * weight_efmu_recip;
tallies->phi_n2[startcell] += absdist * absdist * weightS2_efmu_recipS2;
tallies->E_n[startcell] += absdist * weight_efmu;
}
else{
/* starting cell */
begin_rightface = (startcell + 1) * cellsize;
absdist = (begin_rightface - start); /* otherwise -0.0f can mess things up */
assert(absdist >= -0.0f);
tallies->phi_n[startcell] += absdist * weight_efmu_recip;
tallies->phi_n2[startcell] += absdist * absdist * weightS2_efmu_recipS2;
tallies->E_n[startcell] += absdist * weight_efmu;
/* ending cell */
end_leftface = endcell * cellsize;
absdist = (end - end_leftface); /* otherwise -0.0f can mess things up */
assert(absdist >= -0.0f);
tallies->phi_n[endcell] += absdist * weight_efmu_recip;
tallies->phi_n2[endcell] += absdist * absdist * weightS2_efmu_recipS2;
tallies->E_n[endcell] += absdist * weight_efmu;
}
// precompute values for inner loop
weight_cellsize_efmu_recip = (afloat)weight_efmu_recip*cellsize;
weightS2_cellsizeS2_efmu_recipS2 = (afloat)weightS2_efmu_recipS2*cellsizeS2;
weight_cellsize_efmu = (afloat)weight_efmu*cellsize;
for (k = startcell+1; k <= endcell-1; k++) {
tallies->phi_n[k] += weight_cellsize_efmu_recip;
tallies->phi_n2[k] += weightS2_cellsizeS2_efmu_recipS2;
tallies->E_n[k] += weight_cellsize_efmu;
}
// sanity checks
assert(startcell <= endcell);
assert(startcell >= 0 && endcell <= numCells - 1);
assert(start <= end);
assert(start >= 0.0);
assert(end <= lx);
}
// left-moving particles
for (j = 0; j < NPc-num_right; j++) {
// find angle, distance travelled, and absolute start and end positions
mu = unirandMT_0_1(dsfmt);
fx = (-log(unirandMT_0_1(dsfmt)) * sig_t_recip);
end = (i + unirandMT_0_1(dsfmt)) * cellsize;
start = end - mu * fx;
#ifndef NDEBUG
fx_avg += fx;
#endif
// precompute per-loop constants, to be used below
efmu = (mu > eps) ? mu : eps/2; /* with epsilon boundaries */
efmu_recip = 1.0 / efmu; /* reciprocal */
efmu_recipS2 = efmu_recip*efmu_recip; /* for STDEV */
/* pre-compute repeatedly used measurements*/
weight_efmu_recip = weight*efmu_recip;
weight_efmu = weight*efmu;
weight_cellsize = weight*cellsize;
weightS2_efmu_recipS2 = weightS2*efmu_recipS2;
/* corner-case analysis, left-moving particles */
// right-most cell is the starting cell
endcell = i;
// left-most cell is the starting cell
//If the particle streams out
if (start <= 0.0) { // corner case
start = 0.0;
startcell = 0;
// for tallying, record this corner case
tallies->phi_left += weight * efmu_recip;//weight_efmu_recip;
tallies->E_left += weight * efmu; //weight_efmu;
tallies->J_left += -weight; // note: sign is negative!
}
else {
startcell = (int)(start * cellsize_recip);
}
// tally up the information from the corner cells
/* tally corner cells */
if (startcell == endcell){
/* tally once, with difference of particles */
absdist = (end-start);
tallies->phi_n[startcell] += absdist * weight_efmu_recip;
tallies->phi_n2[startcell] += absdist * absdist * weightS2_efmu_recipS2;
tallies->E_n[startcell] += absdist * weight_efmu;
}
else{
/* starting cell */
begin_rightface = (startcell + 1) * cellsize;
absdist = (begin_rightface - start); /* otherwise -0.0f can mess things up */
assert(absdist >= -0.0f);
tallies->phi_n[startcell] += absdist * weight_efmu_recip;
tallies->phi_n2[startcell] += absdist * absdist * weightS2_efmu_recipS2;
tallies->E_n[startcell] += absdist * weight_efmu;
/* ending cell */
end_leftface = endcell * cellsize;
absdist = (end - end_leftface); /* otherwise -0.0f can mess things up */
assert(absdist >= -0.0f);
tallies->phi_n[endcell] += absdist * weight_efmu_recip;
tallies->phi_n2[endcell] += absdist * absdist * weightS2_efmu_recipS2;
tallies->E_n[endcell] += absdist * weight_efmu;
}
// precompute values for inner loop
weight_cellsize_efmu_recip = (afloat)weight_efmu_recip*cellsize;
weightS2_cellsizeS2_efmu_recipS2 = (afloat)weightS2_efmu_recipS2*cellsizeS2;
weight_cellsize_efmu = (afloat)weight_efmu*cellsize;
for (k = startcell+1; k <= endcell-1; k++) {
tallies->phi_n[k] += weight_cellsize_efmu_recip;
tallies->phi_n2[k] += weightS2_cellsizeS2_efmu_recipS2;
tallies->E_n[k] += weight_cellsize_efmu;
}
//Sanity checks
assert(startcell <= endcell);
assert(startcell >= 0 && endcell <= numCells - 1);
assert(start <= end);
assert(start >= 0.0);
assert(end <= lx);
}
}
#ifndef NDEBUG
printf("Avg fx = %lf\n",fx_avg / ( numCells * NP ));
#endif
}
// calculate the mean and stdev of phi
// assumes that phi^2 was pre-calculated
// value is phi, value2 is phi^2
// NP is used for scaling, along with scale
void mean_std_calc(afloat * value, afloat * value2, unsigned long int samp_cnt, int NP, int arrLen, pfloat scale, meanDev* retvals){
int i;
for (i = 0; i < arrLen; i++) {
retvals[i].mean = (value[i] * NP * scale) / samp_cnt;
retvals[i].stdDev = sqrt(fabs((value2[i] / samp_cnt) - pow(retvals[i].mean, 2.0f)) / (samp_cnt - 1));
}
}
|
atomic-13.c | /* { dg-do run } */
extern void abort (void);
long long l, m;
int i, j;
void
foo (void)
{
#pragma omp atomic read
i = l;
#pragma omp atomic read
m = j;
if (i != 77 || m != 88)
abort ();
#pragma omp atomic write
l = 1 + i + 6 * 1;
#pragma omp atomic write
j = 170 - 170 + m + 1 * 7;
#pragma omp atomic capture
i = l += 4;
#pragma omp atomic capture
m = j += 4;
if (i != 88 || m != 99)
abort ();
#pragma omp atomic capture
{
i = l;
l += 4;
}
#pragma omp atomic capture
{
m = j;
j += 4;
}
if (i != 88 || m != 99)
abort ();
#pragma omp atomic capture
{
l += 4;
i = l;
}
#pragma omp atomic capture
{
j += 4;
m = j;
}
if (i != 96 || m != 107)
abort ();
}
int
main ()
{
l = 77;
j = 88;
foo ();
return 0;
}
|
GB_binop__second_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__second_fp64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__second_fp64)
// A.*B function (eWiseMult): GB (_AemultB_03__second_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__second_fp64)
// A*D function (colscale): GB (_AxD__second_fp64)
// D*A function (rowscale): GB (_DxB__second_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__second_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__second_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__second_fp64)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB (_bind2nd__second_fp64)
// C=A'+scalar GB (_bind2nd_tran__second_fp64)
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = bij
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = y ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
1
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SECOND || GxB_NO_FP64 || GxB_NO_SECOND_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__second_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__second_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__second_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__second_fp64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__second_fp64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__second_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__second_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__second_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__second_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__second_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = Bx [p] ;
Cx [p] = bij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__second_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = y ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = y ; \
}
GrB_Info GB (_bind2nd_tran__second_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__minv_int64_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_int64_uint32
// op(A') function: GB_tran__minv_int64_uint32
// C type: int64_t
// A type: uint32_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 64)
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 64) ;
// casting
#define GB_CASTING(z, x) \
int64_t z = (int64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT64 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_int64_uint32
(
int64_t *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_int64_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
interp_kernel_arm.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: haitao@openailab.com
*/
#include <stdlib.h>
#include <math.h>
#include "sys_port.h"
#include <arm_neon.h>
#include "interp_kernel_arm.h"
#define MIN(a, b) ((a) < (b) ? (a) : (b))
static void linear_coeffs(int w, int outw, int* xofs, float* alpha)
{
double scale = ( double )w / outw;
for (int dx = 0; dx < outw; dx++)
{
float fx = ( float )((dx + 0.5) * scale - 0.5);
int sx = floor(fx);
fx -= sx;
if (sx < 0)
{
sx = 0;
fx = 0.f;
}
if (sx >= w - 1)
{
sx = w - 2;
fx = 1.f;
}
xofs[dx] = sx;
alpha[dx * 2] = 1.f - fx;
alpha[dx * 2 + 1] = fx;
}
}
static void resize_bilinear_image(float* src, float* dst, float* alpha, int* xofs, float* beta, int* yofs, int out_h,
int out_w, int in_h, int in_w)
{
int w = out_w; // dst.w;
int h = out_h; // dst.h;
// loop body
float* rowsbuf0 = ( float* )sys_malloc(w * sizeof(float));
float* rowsbuf1 = ( float* )sys_malloc(w * sizeof(float));
float* rows0 = rowsbuf0;
float* rows1 = rowsbuf1;
int prev_sy1 = -2;
for (int dy = 0; dy < h; dy++)
{
int sy = yofs[dy];
if (sy == prev_sy1)
{
// reuse all rows
}
else if (sy == prev_sy1 + 1)
{
// hresize one row
float* rows0_old = rows0;
rows0 = rows1;
rows1 = rows0_old;
const float* S1 = src + (sy + 1) * in_w; // src.row(sy+1);
const float* alphap = alpha;
float* rows1p = rows1;
// neon
for (int dx = 0; dx + 1 < w; dx += 2)
{
int sx = xofs[dx];
int sxn = xofs[dx + 1];
const float* S1p = S1 + sx;
const float* S1np = S1 + sxn;
float32x4_t _a = vld1q_f32(alphap);
float32x2_t _S1 = vld1_f32(S1p);
float32x2_t _S1n = vld1_f32(S1np);
float32x4_t _S1S1n = vcombine_f32(_S1, _S1n);
float32x4_t _ms1 = vmulq_f32(_S1S1n, _a);
float32x2_t _rows1 = vpadd_f32(vget_low_f32(_ms1), vget_high_f32(_ms1));
vst1_f32(rows1p + dx, _rows1);
alphap += 4;
}
}
else
{
// hresize two rows
const float* S0 = src + sy * in_w; // src.row(sy);
const float* S1 = src + (sy + 1) * in_w; // src.row(sy+1);
const float* alphap = alpha;
float* rows0p = rows0;
float* rows1p = rows1;
for (int dx = 0; dx + 1 < w; dx += 2)
{
int sx = xofs[dx];
int sxn = xofs[dx + 1];
const float* S0p = S0 + sx;
const float* S1p = S1 + sx;
const float* S0np = S0 + sxn;
const float* S1np = S1 + sxn;
float32x4_t _a = vld1q_f32(alphap);
float32x2_t _S0 = vld1_f32(S0p);
float32x2_t _S1 = vld1_f32(S1p);
float32x2_t _S0n = vld1_f32(S0np);
float32x2_t _S1n = vld1_f32(S1np);
float32x4_t _S0S0n = vcombine_f32(_S0, _S0n);
float32x4_t _S1S1n = vcombine_f32(_S1, _S1n);
float32x4_t _ms0 = vmulq_f32(_S0S0n, _a);
float32x4_t _ms1 = vmulq_f32(_S1S1n, _a);
float32x2_t _rows0 = vpadd_f32(vget_low_f32(_ms0), vget_high_f32(_ms0));
float32x2_t _rows1 = vpadd_f32(vget_low_f32(_ms1), vget_high_f32(_ms1));
vst1_f32(rows0p + dx, _rows0);
vst1_f32(rows1p + dx, _rows1);
alphap += 4;
}
}
prev_sy1 = sy;
// vresize
float b0 = beta[0];
float b1 = beta[1];
float* rows0p = rows0;
float* rows1p = rows1;
float* Dp = dst + dy * out_w; // dst.row(dy);
int nn = w >> 3;
int remain = w - (nn << 3);
float32x4_t _b0 = vdupq_n_f32(b0);
float32x4_t _b1 = vdupq_n_f32(b1);
for (; nn > 0; nn--)
{
float32x4_t _rows0 = vld1q_f32(rows0p);
float32x4_t _rows1 = vld1q_f32(rows1p);
float32x4_t _D = vmulq_f32(_rows0, _b0);
_D = vmlaq_f32(_D, _rows1, _b1);
vst1q_f32(Dp, _D);
float32x4_t _rows0n = vld1q_f32(rows0p + 4);
float32x4_t _rows1n = vld1q_f32(rows1p + 4);
float32x4_t _Dn = vmulq_f32(_rows0n, _b0);
_Dn = vmlaq_f32(_Dn, _rows1n, _b1);
vst1q_f32(Dp + 4, _Dn);
Dp += 8;
rows0p += 8;
rows1p += 8;
}
for (; remain; --remain)
{
*Dp++ = *rows0p++ * b0 + *rows1p++ * b1;
}
beta += 2;
}
sys_free(rowsbuf0);
sys_free(rowsbuf1);
}
static inline void interpolate_cubic(float fx, float* coeffs)
{
const float A = -0.75f;
float fx0 = fx + 1;
float fx1 = fx;
float fx2 = 1 - fx;
coeffs[0] = A * fx0 * fx0 * fx0 - 5 * A * fx0 * fx0 + 8 * A * fx0 - 4 * A;
coeffs[1] = (A + 2) * fx1 * fx1 * fx1 - (A + 3) * fx1 * fx1 + 1;
coeffs[2] = (A + 2) * fx2 * fx2 * fx2 - (A + 3) * fx2 * fx2 + 1;
coeffs[3] = 1.f - coeffs[0] - coeffs[1] - coeffs[2];
}
static void cubic_coeffs(int w, int outw, int* xofs, float* alpha)
{
double scale = ( double )w / outw;
for (int dx = 0; dx < outw; dx++)
{
float fx = ( float )((dx + 0.5) * scale - 0.5);
int sx = floor(fx);
fx -= sx;
interpolate_cubic(fx, alpha + dx * 4);
if (sx <= -1)
{
sx = 1;
alpha[dx * 4 + 0] = 1.f - alpha[dx * 4 + 3];
alpha[dx * 4 + 1] = alpha[dx * 4 + 3];
alpha[dx * 4 + 2] = 0.f;
alpha[dx * 4 + 3] = 0.f;
}
if (sx == 0)
{
sx = 1;
alpha[dx * 4 + 0] = alpha[dx * 4 + 0] + alpha[dx * 4 + 1];
alpha[dx * 4 + 1] = alpha[dx * 4 + 2];
alpha[dx * 4 + 2] = alpha[dx * 4 + 3];
alpha[dx * 4 + 3] = 0.f;
}
if (sx == w - 2)
{
sx = w - 3;
alpha[dx * 4 + 3] = alpha[dx * 4 + 2] + alpha[dx * 4 + 3];
alpha[dx * 4 + 2] = alpha[dx * 4 + 1];
alpha[dx * 4 + 1] = alpha[dx * 4 + 0];
alpha[dx * 4 + 0] = 0.f;
}
if (sx >= w - 1)
{
sx = w - 3;
alpha[dx * 4 + 3] = 1.f - alpha[dx * 4 + 0];
alpha[dx * 4 + 2] = alpha[dx * 4 + 0];
alpha[dx * 4 + 1] = 0.f;
alpha[dx * 4 + 0] = 0.f;
}
xofs[dx] = sx;
}
}
static void resize_bicubic_image(float* src, float* dst, float* alpha, int* xofs, float* beta, int* yofs, int out_h,
int out_w, int in_h, int in_w)
{
int w = out_w; // dst.w;
int h = out_h; // dst.h;
// loop body
float* rowsbuf0 = ( float* )sys_malloc(w * sizeof(float));
float* rowsbuf1 = ( float* )sys_malloc(w * sizeof(float));
float* rowsbuf2 = ( float* )sys_malloc(w * sizeof(float));
float* rowsbuf3 = ( float* )sys_malloc(w * sizeof(float));
float* rows0 = rowsbuf0;
float* rows1 = rowsbuf1;
float* rows2 = rowsbuf2;
float* rows3 = rowsbuf3;
int prev_sy1 = -3;
for (int dy = 0; dy < h; dy++)
{
int sy = yofs[dy];
if (sy == prev_sy1)
{
// reuse all rows
}
else if (sy == prev_sy1 + 1)
{
// hresize one row
float* rows0_old = rows0;
rows0 = rows1;
rows1 = rows2;
rows2 = rows3;
rows3 = rows0_old;
const float* S3 = src + (sy + 2) * in_w; // src.row(sy+2);
const float* alphap = alpha;
float* rows3p = rows3;
for (int dx = 0; dx < w; dx++)
{
int sx = xofs[dx];
const float* S3p = S3 + sx;
float a0 = alphap[0];
float a1 = alphap[1];
float a2 = alphap[2];
float a3 = alphap[3];
rows3p[dx] = S3p[-1] * a0 + S3p[0] * a1 + S3p[1] * a2 + S3p[2] * a3;
alphap += 4;
}
}
else if (sy == prev_sy1 + 2)
{
// hresize two rows
float* rows0_old = rows0;
float* rows1_old = rows1;
rows0 = rows2;
rows1 = rows3;
rows2 = rows0_old;
rows3 = rows1_old;
const float* S2 = src + (sy + 1) * in_w; // src.row(sy+1);
const float* S3 = src + (sy + 2) * in_w; // src.row(sy+2);
const float* alphap = alpha;
float* rows2p = rows2;
float* rows3p = rows3;
for (int dx = 0; dx < w; dx++)
{
int sx = xofs[dx];
const float* S2p = S2 + sx;
const float* S3p = S3 + sx;
float a0 = alphap[0];
float a1 = alphap[1];
float a2 = alphap[2];
float a3 = alphap[3];
rows2p[dx] = S2p[-1] * a0 + S2p[0] * a1 + S2p[1] * a2 + S2p[2] * a3;
rows3p[dx] = S3p[-1] * a0 + S3p[0] * a1 + S3p[1] * a2 + S3p[2] * a3;
alphap += 4;
}
}
else if (sy == prev_sy1 + 3)
{
// hresize three rows
float* rows0_old = rows0;
float* rows1_old = rows1;
float* rows2_old = rows2;
rows0 = rows3;
rows1 = rows0_old;
rows2 = rows1_old;
rows3 = rows2_old;
const float* S1 = src + sy * in_w; // src.row(sy);
const float* S2 = src + (sy + 1) * in_w; // src.row(sy+1);
const float* S3 = src + (sy + 2) * in_w; // src.row(sy+2);
const float* alphap = alpha;
float* rows1p = rows1;
float* rows2p = rows2;
float* rows3p = rows3;
for (int dx = 0; dx < w; dx++)
{
int sx = xofs[dx];
const float* S1p = S1 + sx;
const float* S2p = S2 + sx;
const float* S3p = S3 + sx;
float a0 = alphap[0];
float a1 = alphap[1];
float a2 = alphap[2];
float a3 = alphap[3];
rows1p[dx] = S1p[-1] * a0 + S1p[0] * a1 + S1p[1] * a2 + S1p[2] * a3;
rows2p[dx] = S2p[-1] * a0 + S2p[0] * a1 + S2p[1] * a2 + S2p[2] * a3;
rows3p[dx] = S3p[-1] * a0 + S3p[0] * a1 + S3p[1] * a2 + S3p[2] * a3;
alphap += 4;
}
}
else
{
// hresize four rows
const float* S0 = src + (sy - 1) * in_w; // src.row(sy-1);
const float* S1 = src + sy * in_w; // src.row(sy);
const float* S2 = src + (sy + 1) * in_w; // src.row(sy+1);
const float* S3 = src + (sy + 2) * in_w; // src.row(sy+2);
const float* alphap = alpha;
float* rows0p = rows0;
float* rows1p = rows1;
float* rows2p = rows2;
float* rows3p = rows3;
for (int dx = 0; dx < w; dx++)
{
int sx = xofs[dx];
const float* S0p = S0 + sx;
const float* S1p = S1 + sx;
const float* S2p = S2 + sx;
const float* S3p = S3 + sx;
float a0 = alphap[0];
float a1 = alphap[1];
float a2 = alphap[2];
float a3 = alphap[3];
rows0p[dx] = S0p[-1] * a0 + S0p[0] * a1 + S0p[1] * a2 + S0p[2] * a3;
rows1p[dx] = S1p[-1] * a0 + S1p[0] * a1 + S1p[1] * a2 + S1p[2] * a3;
rows2p[dx] = S2p[-1] * a0 + S2p[0] * a1 + S2p[1] * a2 + S2p[2] * a3;
rows3p[dx] = S3p[-1] * a0 + S3p[0] * a1 + S3p[1] * a2 + S3p[2] * a3;
alphap += 4;
}
}
prev_sy1 = sy;
// vresize
float b0 = beta[0];
float b1 = beta[1];
float b2 = beta[2];
float b3 = beta[3];
float* rows0p = rows0;
float* rows1p = rows1;
float* rows2p = rows2;
float* rows3p = rows3;
float* Dp = dst + dy * out_w; // dst.row(dy);
for (int dx = 0; dx < w; dx++)
{
*Dp++ = *rows0p++ * b0 + *rows1p++ * b1 + *rows2p++ * b2 + *rows3p++ * b3;
}
beta += 4;
}
sys_free(rowsbuf0);
sys_free(rowsbuf1);
sys_free(rowsbuf2);
sys_free(rowsbuf3);
}
int interp_run(struct ir_tensor* output_tensor, struct ir_tensor* input_tensor, struct interp_param* interp_param,
int num_thread)
{
int resize_type = interp_param->resize_type;
int out_w = interp_param->output_width;
int out_h = interp_param->output_height;
float width_scale = interp_param->width_scale;
float height_scale = interp_param->height_scale;
int in_c = input_tensor->dims[1];
int in_h = input_tensor->dims[2];
int in_w = input_tensor->dims[3];
float* data = ( float* )input_tensor->data;
float* out_data = ( float* )output_tensor->data;
if (out_h == 0 || out_w == 0)
{
out_h = in_h * height_scale;
out_w = in_w * width_scale;
}
if (out_h == in_h && out_w == in_w)
{
out_data = data;
return 0;
}
int out_channel_size = out_h * out_w;
int in_channel_size = in_h * in_w;
if (input_tensor->dim_num == 1)
{
#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < input_tensor->dims[0]; ++q)
{
for (int i = 0; i < out_h * out_w; i++)
{
out_data[q * out_h * out_w + i] = data[q];
}
}
return 0;
}
if (resize_type == 1) // nearest
{
#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < in_c; q++)
{
for (int y = 0; y < out_h; ++y)
{
const int in_y = MIN(( int )(y / height_scale), (in_h - 1));
for (int x = 0; x < out_w; ++x)
{
const int in_x = MIN(( int )(x / width_scale), (in_w - 1));
out_data[out_w * y + x + out_w * out_h * q] = data[in_y * in_w + in_x + q * in_w * in_h];
}
}
}
}
else if (resize_type == 2) // bilinear
{
int* buf = ( int* )sys_malloc((out_w + out_h + out_w * 2 + out_h * 2) * sizeof(int));
int* xofs = buf; // new int[ow];
int* yofs = buf + out_w; // new int[oh];
float* alpha = ( float* )(buf + out_w + out_h); // new float[ow * 2];
float* beta = ( float* )(buf + out_w + out_h + out_w * 2); // new float[oh * 2];
linear_coeffs(in_w, out_w, xofs, alpha);
linear_coeffs(in_h, out_h, yofs, beta);
#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < in_c; ++q)
{
resize_bilinear_image(data + in_channel_size * q, out_data + out_channel_size * q, alpha, xofs, beta, yofs,
out_h, out_w, in_h, in_w);
}
sys_free(buf);
}
else if (resize_type == 3) // bicubic
{
int* buf = ( int* )sys_malloc((out_w + out_h + out_w * 4 + out_h * 4) * sizeof(int));
int* xofs = buf; // new int[ow];
int* yofs = buf + out_w; // new int[oh];
float* alpha = ( float* )(buf + out_w + out_h); // new float[ow * 4];
float* beta = ( float* )(buf + out_w + out_h + out_w * 4); // new float[oh * 4];
cubic_coeffs(in_w, out_w, xofs, alpha);
cubic_coeffs(in_h, out_h, yofs, beta);
#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < in_c; q++)
{
resize_bicubic_image(data + in_channel_size * q, out_data + out_channel_size * q, alpha, xofs, beta, yofs,
out_h, out_w, in_h, in_w);
}
sys_free(buf);
return 0;
}
return 0;
}
|
GB_binop__lor_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lor_uint32)
// A.*B function (eWiseMult): GB (_AemultB_01__lor_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__lor_uint32)
// A.*B function (eWiseMult): GB (_AemultB_03__lor_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lor_uint32)
// A*D function (colscale): GB (_AxD__lor_uint32)
// D*A function (rowscale): GB (_DxB__lor_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__lor_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__lor_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lor_uint32)
// C=scalar+B GB (_bind1st__lor_uint32)
// C=scalar+B' GB (_bind1st_tran__lor_uint32)
// C=A+scalar GB (_bind2nd__lor_uint32)
// C=A'+scalar GB (_bind2nd_tran__lor_uint32)
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = ((aij != 0) || (bij != 0))
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) || (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOR || GxB_NO_UINT32 || GxB_NO_LOR_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lor_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lor_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lor_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lor_uint32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lor_uint32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lor_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__lor_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lor_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__lor_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lor_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lor_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) || (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lor_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) || (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) || (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lor_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) || (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lor_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
clause-1.c | /* { dg-do compile } */
/* { dg-require-effective-target tls } */
#define p parallel
extern void bar (void);
extern char q[];
int t;
#pragma omp threadprivate (t)
void
foo (int x)
{
char *p;
struct S { int i; int j; } s;
char a[32];
double d;
int i;
const int c = 8;
#pragma omp p shared (x, x) /* { dg-error "more than once" } */
;
#pragma omp p private (x) private (x) /* { dg-error "more than once" } */
;
#pragma omp p shared (x) firstprivate (x) /* { dg-error "more than once" } */
;
#pragma omp p firstprivate (x, x) /* { dg-error "more than once" } */
;
#pragma omp p for shared (x) lastprivate (x) /* { dg-error "more than" } */
for (i = 0; i < 10; i++)
;
#pragma omp p for private (x) lastprivate (x) /* { dg-error "more than" } */
for (i = 0; i < 10; i++)
;
#pragma omp p for lastprivate (x, x) /* { dg-error "more than once" } */
for (i = 0; i < 10; i++)
;
#pragma omp single private (x) copyprivate (x) /* { dg-error "more than" } */
;
#pragma omp p shared (bar) /* { dg-error "is not a variable" } */
;
#pragma omp p private (bar) /* { dg-error "is not a variable" } */
;
#pragma omp p firstprivate (bar) /* { dg-error "is not a variable" } */
;
#pragma omp p reduction (+:p) /* { dg-error "has invalid type for" } */
;
#pragma omp p reduction (*:s) /* { dg-error "has invalid type for" } */
;
#pragma omp p reduction (-:a) /* { dg-error "has invalid type for" } */
;
d = 0;
#pragma omp p reduction (*:d)
;
#pragma omp p reduction (|:d) /* { dg-error "has invalid type for" } */
;
#pragma omp p reduction (&&:d) /* { dg-error "has invalid type for" } */
;
#pragma omp p copyin (d) /* { dg-error "must be 'threadprivate'" } */
;
#pragma omp p copyin (x) /* { dg-error "must be 'threadprivate'" } */
;
#pragma omp p for firstprivate (x) lastprivate (x)
for (i = 0; i < 10; i++)
;
#pragma omp p private (q) /* { dg-error "incomplete type" } */
;
#pragma omp p firstprivate (q) /* { dg-error "incomplete type" } */
;
#pragma omp p for lastprivate (q) /* { dg-error "incomplete type" } */
for (i = 0; i < 10; i++)
;
#pragma omp p shared (t) /* { dg-error "predetermined 'threadprivate'" } */
;
#pragma omp p private (t) /* { dg-error "predetermined 'threadprivate'" } */
;
#pragma omp p firstprivate (t) /* { dg-error "predetermined 'threadpriv" } */
;
#pragma omp p for lastprivate (t) /* { dg-error "predetermined 'threadpr" } */
for (i = 0; i < 10; i++)
;
#pragma omp p reduction (*:t) /* { dg-error "predetermined 'threadprivate" } */
;
#pragma omp p shared (c) /* { dg-error "predetermined 'shared'" } */
;
#pragma omp p private (c) /* { dg-error "predetermined 'shared'" } */
;
#pragma omp p firstprivate (c) /* { dg-error "predetermined 'shared'" } */
;
#pragma omp p for lastprivate (c) /* { dg-error "predetermined 'shared'" } */
for (i = 0; i < 10; i++)
;
#pragma omp p reduction (*:c) /* { dg-error "predetermined 'shared'" } */
;
}
|
builder.h | // Copyright (c) 2015, The Regents of the University of California (Regents)
// See LICENSE.txt for license details
#ifndef BUILDER_H_
#define BUILDER_H_
#include <algorithm>
#include <cinttypes>
#include <fstream>
#include <functional>
#include <type_traits>
#include <utility>
#include "command_line.h"
#include "generator.h"
#include "graph.h"
#include "platform_atomics.h"
#include "pvector.h"
#include "reader.h"
#include "timer.h"
#include "util.h"
/*
GAP Benchmark Suite
Class: BuilderBase
Author: Scott Beamer
Given arguements from the command line (cli), returns a built graph
- MakeGraph() will parse cli and obtain edgelist and call
MakeGraphFromEL(edgelist) to perform actual graph construction
- edgelist can be from file (reader) or synthetically generated (generator)
- Common case: BuilderBase typedef'd (w/ params) to be Builder (benchmark.h)
*/
template <typename NodeID_, typename DestID_ = NodeID_,
typename WeightT_ = NodeID_, bool invert = true>
class BuilderBase {
typedef EdgePair<NodeID_, DestID_> Edge;
typedef pvector<Edge> EdgeList;
const CLBase &cli_;
bool symmetrize_;
bool needs_weights_;
int64_t num_nodes_ = -1;
public:
explicit BuilderBase(const CLBase &cli) : cli_(cli) {
symmetrize_ = cli_.symmetrize();
needs_weights_ = !std::is_same<NodeID_, DestID_>::value;
}
DestID_ GetSource(EdgePair<NodeID_, NodeID_> e) {
return e.u;
}
DestID_ GetSource(EdgePair<NodeID_, NodeWeight<NodeID_, WeightT_>> e) {
return NodeWeight<NodeID_, WeightT_>(e.u, e.v.w);
}
NodeID_ FindMaxNodeID(const EdgeList &el) {
NodeID_ max_seen = 0;
#pragma omp parallel for reduction(max : max_seen)
for (auto it = el.begin(); it < el.end(); it++) {
Edge e = *it;
max_seen = std::max(max_seen, e.u);
max_seen = std::max(max_seen, (NodeID_) e.v);
}
return max_seen;
}
pvector<NodeID_> CountDegrees(const EdgeList &el, bool transpose) {
pvector<NodeID_> degrees(num_nodes_, 0);
//exit(0);
#pragma omp parallel for
for (auto it = el.begin(); it < el.end(); it++) {
Edge e = *it;
if (symmetrize_ || (!symmetrize_ && !transpose))
fetch_and_add(degrees[e.u], 1);
if (symmetrize_ || (!symmetrize_ && transpose))
fetch_and_add(degrees[(NodeID_) e.v], 1);
}
printf("Degrees capacity: %lu\n", degrees.capacity());
printf("Degrees size: %lu\n", degrees.size());
return degrees;
}
static
pvector<SGOffset> PrefixSum(const pvector<NodeID_> °rees) {
pvector<SGOffset> sums(degrees.size() + 1);
SGOffset total = 0;
for (size_t n=0; n < degrees.size(); n++) {
sums[n] = total;
total += degrees[n];
}
sums[degrees.size()] = total;
return sums;
}
static
pvector<SGOffset> ParallelPrefixSum(const pvector<NodeID_> °rees) {
const size_t block_size = 1<<20;
const size_t num_blocks = (degrees.size() + block_size - 1) / block_size;
pvector<SGOffset> local_sums(num_blocks);
#pragma omp parallel for
for (size_t block=0; block < num_blocks; block++) {
SGOffset lsum = 0;
size_t block_end = std::min((block + 1) * block_size, degrees.size());
for (size_t i=block * block_size; i < block_end; i++)
lsum += degrees[i];
local_sums[block] = lsum;
}
pvector<SGOffset> bulk_prefix(num_blocks+1);
SGOffset total = 0;
for (size_t block=0; block < num_blocks; block++) {
bulk_prefix[block] = total;
total += local_sums[block];
}
bulk_prefix[num_blocks] = total;
pvector<SGOffset> prefix(degrees.size() + 1);
#pragma omp parallel for
for (size_t block=0; block < num_blocks; block++) {
SGOffset local_total = bulk_prefix[block];
size_t block_end = std::min((block + 1) * block_size, degrees.size());
for (size_t i=block * block_size; i < block_end; i++) {
prefix[i] = local_total;
local_total += degrees[i];
}
}
prefix[degrees.size()] = bulk_prefix[num_blocks];
return prefix;
}
// Removes self-loops and redundant edges
// Side effect: neighbor IDs will be sorted
void SquishCSR(const CSRGraph<NodeID_, DestID_, invert> &g, bool transpose,
DestID_*** sq_index, DestID_** sq_neighs) {
pvector<NodeID_> diffs(g.num_nodes());
DestID_ *n_start, *n_end;
#pragma omp parallel for private(n_start, n_end)
for (NodeID_ n=0; n < g.num_nodes(); n++) {
if (transpose) {
n_start = g.in_neigh(n).begin();
n_end = g.in_neigh(n).end();
} else {
n_start = g.out_neigh(n).begin();
n_end = g.out_neigh(n).end();
}
std::sort(n_start, n_end);
DestID_ *new_end = std::unique(n_start, n_end);
new_end = std::remove(n_start, new_end, n);
diffs[n] = new_end - n_start;
}
pvector<SGOffset> sq_offsets = ParallelPrefixSum(diffs);
*sq_neighs = new DestID_[sq_offsets[g.num_nodes()]];
*sq_index = CSRGraph<NodeID_, DestID_>::GenIndex(sq_offsets, *sq_neighs);
#pragma omp parallel for private(n_start)
for (NodeID_ n=0; n < g.num_nodes(); n++) {
if (transpose)
n_start = g.in_neigh(n).begin();
else
n_start = g.out_neigh(n).begin();
std::copy(n_start, n_start+diffs[n], (*sq_index)[n]);
}
}
CSRGraph<NodeID_, DestID_, invert> SquishGraph(
const CSRGraph<NodeID_, DestID_, invert> &g) {
DestID_ **out_index, *out_neighs, **in_index, *in_neighs;
SquishCSR(g, false, &out_index, &out_neighs);
if (g.directed()) {
if (invert)
SquishCSR(g, true, &in_index, &in_neighs);
return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), out_index,
out_neighs, in_index,
in_neighs);
} else {
return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), out_index,
out_neighs);
}
}
/*
Graph Bulding Steps (for CSR):
- Read edgelist once to determine vertex degrees (CountDegrees)
- Determine vertex offsets by a prefix sum (ParallelPrefixSum)
- Allocate storage and set points according to offsets (GenIndex)
- Copy edges into storage
*/
void MakeCSR(const EdgeList &el, bool transpose, DestID_*** index,
DestID_** neighs) {
pvector<NodeID_> degrees = CountDegrees(el, transpose);
pvector<SGOffset> offsets = ParallelPrefixSum(degrees);
*neighs = new DestID_[offsets[num_nodes_]];
*index = CSRGraph<NodeID_, DestID_>::GenIndex(offsets, *neighs);
#pragma omp parallel for
for (auto it = el.begin(); it < el.end(); it++) {
Edge e = *it;
if (symmetrize_ || (!symmetrize_ && !transpose))
(*neighs)[fetch_and_add(offsets[e.u], 1)] = e.v;
if (symmetrize_ || (!symmetrize_ && transpose))
(*neighs)[fetch_and_add(offsets[static_cast<NodeID_>(e.v)], 1)] =
GetSource(e);
}
}
CSRGraph<NodeID_, DestID_, invert> MakeGraphFromEL(EdgeList &el) {
DestID_ **index = nullptr, **inv_index = nullptr;
DestID_ *neighs = nullptr, *inv_neighs = nullptr;
Timer t;
t.Start();
if (num_nodes_ == -1)
num_nodes_ = FindMaxNodeID(el)+1;
if (needs_weights_)
Generator<NodeID_, DestID_, WeightT_>::InsertWeights(el);
MakeCSR(el, false, &index, &neighs);
if (!symmetrize_ && invert) {
printf("new flag\n");
MakeCSR(el, true, &inv_index, &inv_neighs);
}
t.Stop();
PrintTime("Build Time", t.Seconds());
if (symmetrize_)
return CSRGraph<NodeID_, DestID_, invert>(num_nodes_, index, neighs);
else
return CSRGraph<NodeID_, DestID_, invert>(num_nodes_, index, neighs,
inv_index, inv_neighs);
}
CSRGraph<NodeID_, DestID_, invert> MakeGraph() {
CSRGraph<NodeID_, DestID_, invert> g;
{ // extra scope to trigger earlier deletion of el (save memory)
EdgeList el;
if (cli_.filename() != "") {
Reader<NodeID_, DestID_, WeightT_, invert> r(cli_.filename());
if ((r.GetSuffix() == ".sg") || (r.GetSuffix() == ".wsg")) {
return r.ReadSerializedGraph();
} else {
el = r.ReadFile(needs_weights_);
}
} else if (cli_.scale() != -1) {
Generator<NodeID_, DestID_> gen(cli_.scale(), cli_.degree());
el = gen.GenerateEL(cli_.uniform());
}
//for (auto wait : el)
//printf("PE %d | u = %lu v = %lu w = %lu\n", 0, wait.u, wait.v.v, wait.v.w);
g = MakeGraphFromEL(el);
//printf("EL Size: %lu | El[0]: (%d, %d)", el.size(), (*(el.begin())).u, (*(el.begin())).v);
}
return SquishGraph(g);
}
// Relabels (and rebuilds) graph by order of decreasing degree
static
CSRGraph<NodeID_, DestID_, invert> RelabelByDegree(
const CSRGraph<NodeID_, DestID_, invert> &g) {
if (g.directed()) {
std::cout << "Cannot relabel directed graph" << std::endl;
std::exit(-11);
}
Timer t;
t.Start();
typedef std::pair<int64_t, NodeID_> degree_node_p;
pvector<degree_node_p> degree_id_pairs(g.num_nodes());
#pragma omp parallel for
for (NodeID_ n=0; n < g.num_nodes(); n++)
degree_id_pairs[n] = std::make_pair(g.out_degree(n), n);
std::sort(degree_id_pairs.begin(), degree_id_pairs.end(),
std::greater<degree_node_p>());
pvector<NodeID_> degrees(g.num_nodes());
pvector<NodeID_> new_ids(g.num_nodes());
#pragma omp parallel for
for (NodeID_ n=0; n < g.num_nodes(); n++) {
degrees[n] = degree_id_pairs[n].first;
new_ids[degree_id_pairs[n].second] = n;
}
for (int i = 0; i < g.num_nodes(); i++)
printf("PE %d | degrees[%d] = %d, new_ids[%d] = %d\n", 0, i, degrees[i], i, new_ids[i]);
pvector<SGOffset> offsets = ParallelPrefixSum(degrees);
DestID_* neighs = new DestID_[offsets[g.num_nodes()]];
DestID_** index = CSRGraph<NodeID_, DestID_>::GenIndex(offsets, neighs);
#pragma omp parallel for
for (NodeID_ u=0; u < g.num_nodes(); u++) {
for (NodeID_ v : g.out_neigh(u))
neighs[offsets[new_ids[u]]++] = new_ids[v];
std::sort(index[new_ids[u]], index[new_ids[u]+1]);
}
t.Stop();
PrintTime("Relabel", t.Seconds());
return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), index, neighs);
}
};
#endif // BUILDER_H_
|
GB_unaryop__ainv_uint8_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint8_int64
// op(A') function: GB_tran__ainv_uint8_int64
// C type: uint8_t
// A type: int64_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, aij) \
uint8_t z = (uint8_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT8 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint8_int64
(
uint8_t *Cx, // Cx and Ax may be aliased
int64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint8_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
IcgThreshold.c | // Copyright (C) 2016 Gernot Riegler
// Institute for Computer Graphics and Vision (ICG)
// Graz University of Technology (TU GRAZ)
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// 3. All advertising materials mentioning features or use of this software
// must display the following acknowledgement:
// This product includes software developed by the ICG, TU GRAZ.
// 4. Neither the name of the ICG, TU GRAZ nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
// THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE PROVIDER BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/IcgThreshold.c"
#else
static int icgnn_(IcgThreshold_updateOutput)(lua_State *L) {
THTensor* input = luaT_checkudata(L, 2, torch_Tensor);
real threshold = luaT_getfieldchecknumber(L, 1, "threshold");
real val_greater = luaT_getfieldchecknumber(L, 1, "val_greater");
real val_smaller = luaT_getfieldchecknumber(L, 1, "val_smaller");
THTensor* output = luaT_getfieldcheckudata(L, 1, "output", torch_Tensor);
input = THTensor_(newContiguous)(input);
real* in = THTensor_(data)(input);
THTensor_(resizeAs)(output, input);
real* out = THTensor_(data)(output);
long nelem = THTensor_(nElement)(input);
long idx;
#pragma omp parallel for private(idx)
for(idx = 0; idx < nelem; ++idx) {
out[idx] = in[idx] > threshold ? val_greater : val_smaller;
}
THTensor_(free)(input);
return 1;
}
static int icgnn_(IcgThreshold_updateGradInput)(lua_State *L) {
THTensor* input = luaT_checkudata(L, 2, torch_Tensor);
THTensor* grad_input = luaT_checkudata(L, 3, torch_Tensor);
THTensor_(resizeAs)(grad_input, input);
real* grad_in1 = THTensor_(data)(grad_input);
long nelem = THTensor_(nElement)(input);
long idx;
#pragma omp parallel for private(idx)
for(idx = 0; idx < nelem; ++idx) {
grad_in1[idx] = 0;
}
return 1;
}
static const struct luaL_Reg icgnn_(IcgThreshold__) [] = {
{"IcgThreshold_updateOutput", icgnn_(IcgThreshold_updateOutput)},
{"IcgThreshold_updateGradInput", icgnn_(IcgThreshold_updateGradInput)},
{NULL, NULL}
};
static void icgnn_(IcgThreshold_init)(lua_State *L) {
luaT_pushmetatable(L, torch_Tensor);
luaT_registeratname(L, icgnn_(IcgThreshold__), "icgnn");
lua_pop(L,1);
}
#endif
|
DRB048-firstprivate-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Example use of firstprivate()
*/
void foo(int * a, int n, int g)
{
int i;
#pragma omp parallel for firstprivate (g)
for (i=0;i<n;i++)
{
a[i] = a[i]+g;
}
}
int a[100];
int main()
{
foo(a, 100, 7);
return 0;
}
|
TomoP3DModelSino_core.c | /*
* Copyright 2017 Daniil Kazantsev
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "TomoP3DModelSino_core.h"
#define M_PI 3.14159265358979323846
#define M_PI2 1.57079632679
#define EPS 0.000000001
#define MAXCHAR 1000
/* Function to create 3D analytical projection data (parallel beam geometry) for 3D models
*
* Input Parameters:
* - ModelNo - the model number from Phantom3DLibrary file
* - Horiz_det - the number of horizontal detectors
* - Vert_det - - the number of vertical detectors
* - N - the size of the Phantom (N x N x N), currently Vert_det must be set to N
* - Theta_proj - a vector of projection anges in degrees
* - C0 - intensity
* - x0 - x0 position
* - y0 - y0 position
* - z0 - z0 position
* - a - size object
* - b - size object
* - c - size object
* - psi_gr1 - rotation angle1
*
* Output:
* 1. The analytical phantom size of [N1 x N2 x N3] or temporal 4D phantom (N1 x N2 x N3 x time-frames)
* Note if Z1, Z2 indeces selected then the size can be [N1 x N2 x Z2-Z1]
*
*/
float TomoP3DObjectSino_core(float *A, long Horiz_det, long Vert_det, long Z1, long Z2, long N, float *Theta_proj, int AngTot, char *Object,
float C0, /* intensity */
float x0, /* x0 position */
float y0, /* y0 position */
float z0, /* z0 position */
float a, /* a - size object */
float b, /* b - size object */
float c, /* c - size object */
float psi_gr1, /* rotation angle1 */
float psi_gr2, /* rotation angle2 */
float psi_gr3, /* rotation angle3 */
long tt /*temporal index, 0 - for stationary */)
{
int ll;
long i, j, k, index, sub_vol_size;
float *DetectorRange_Horiz_ar=NULL, DetectorRange_Horiz_min, DetectorRange_Horiz_max, *DetectorRange_Vert_ar=NULL, DetectorRange_Vert_min, DetectorRange_Vert_max, U_step, V_step, a22, b22, c2, *Tomorange_Z_Ar = NULL, *Zdel = NULL;
float *AnglesRad=NULL;
float Tomorange_Xmin, Tomorange_Xmax, H_x, multiplier, x00, y00, z00, a2, b2;
DetectorRange_Horiz_max = (float)(Horiz_det)/(float)(N+1); /* horizontal detector range */
DetectorRange_Horiz_min = -DetectorRange_Horiz_max;
/* Here assuming that the size of the vertical detector array is always equal to Z-dim of the phantom */
DetectorRange_Vert_max = (float)(Vert_det)/(float)(N+1); /* vertical detector range */
DetectorRange_Vert_min = -DetectorRange_Vert_max;
DetectorRange_Horiz_ar = malloc(Horiz_det*sizeof(float)); /* horizontal array */
DetectorRange_Vert_ar = malloc(Vert_det*sizeof(float)); /* vertical array */
U_step = (DetectorRange_Horiz_max - DetectorRange_Horiz_min)/(float)(Horiz_det-1);
V_step = (DetectorRange_Vert_max - DetectorRange_Vert_min)/(float)(Vert_det-1);
for(i=0; i<Horiz_det; i++) {DetectorRange_Horiz_ar[i] = (DetectorRange_Horiz_max) - (float)i*U_step;}
for(i=0; i<Vert_det; i++) {DetectorRange_Vert_ar[i] = (DetectorRange_Vert_max) - (float)i*V_step;}
sub_vol_size = Z2 - Z1;
Tomorange_Xmin = -1.0f;
Tomorange_Xmax = 1.0f;
H_x = (Tomorange_Xmax - Tomorange_Xmin)/(float)(N);
Tomorange_Z_Ar = malloc(N * sizeof(float));
for (i = 0; i<N; i++) { Tomorange_Z_Ar[i] = Tomorange_Xmin + (float)i*H_x; }
Zdel = malloc(N * sizeof(float));
for (i = 0; i<N; i++) Zdel[i] = Tomorange_Z_Ar[i] - z0;
/* convert to radians */
AnglesRad = malloc(AngTot*sizeof(float));
for(ll=0; ll<AngTot; ll++) AnglesRad[ll] = (Theta_proj[ll])*((float)M_PI/180.0f);
float alog2 = logf(2.0f);
multiplier = (C0*(N/2.0f));
float RS = 1.0f;
/*Angles: TETA1, PSIs, FI1 ? */
float TETAs, TETA1, FIs, FI1, PSI1, PSIs;
float psi1, psi2, psi3;
psi_gr1 = psi_gr1 + 90.0f;
psi_gr2 = psi_gr2 + 90.0f;
psi1 = psi_gr1*((float)M_PI/180.0f);
psi2 = psi_gr2*((float)M_PI/180.0f);
psi3 = psi_gr3*((float)M_PI/180.0f);
float xwid=0.0f,ywid=0.0f,p00,ksi00,ksi1=0.0f; /* cuboid-related */
float p,ksi,C,S,A2,B2,FI,CF,SF,P0,TF,PC,QM,DEL,XSYC,QP,SS,x11=0.0f,y11=0.0f;
/* fix for centering */
x00 = x0 + 0.5f*H_x;
y00 = y0 - 0.5f*H_x;
z00 = z0 - 0.5f*H_x;
/* parameters of an object have been extracted, now run the building module */
/************************************************/
if (strcmp("gaussian",Object) == 0) {
a = 0.5f*a;
b = 0.5f*b;
c = 0.5f*c;
}
a22 = a*a;
b22 = b*b;
a2 = 1.0f/(a22);
b2 = 1.0f/(b22);
c2 = 1.0f/(c*c);
float xh[3] = {0.0f, 0.0f, 0.0f};
float xh1[3] = {0.0f, 0.0f, 0.0f};
float vh1[3] = {0.0f, 0.0f, 0.0f};
float aa[3] = {0.0f, 0.0f, 0.0f};
float aa1[3] = {0.0f, 0.0f, 0.0f};
float al[3] = {0.0f, 0.0f, 0.0f};
float ai[3][3] = {
{0.0f,0.0f,0.0f},
{0.0f,0.0f,0.0f},
{0.0f,0.0f,0.0f} };
float bs[3][3] = {
{0.0f,0.0f,0.0f},
{0.0f,0.0f,0.0f},
{0.0f,0.0f,0.0f} };
float bsai[3][3] = {
{0.0f,0.0f,0.0f},
{0.0f,0.0f,0.0f},
{0.0f,0.0f,0.0f} };
matrot3(bs,psi1,psi2,psi3); /* rotation of 3x3 matrix */
xh1[0] = x00; xh1[1] = y00; xh1[2] = z00;
matvet3(bs,xh1,xh); /* matrix-vector multiplication */
float a_v, b_v, c_v, d_v, p1, p2, alh, bth, gmh;
if (strcmp("cuboid",Object) == 0) {
/* the object is a cuboid */
x11 = 2.0f*x0;
y11 = 2.0f*y0 + 1.0f*H_x;
xwid = b;
ywid = a;
c2 = 0.5f*c;
if (psi3 < 0) {ksi1 = (float)M_PI + psi3;}
else ksi1 = psi3;
}
float AA5, sin_2, cos_2, delta1, delta_sq, first_dr, AA2, AA3, AA6;
AA5 = (N*C0*a*b);
/*printf("%s %f %f %f %f %f %f %f %f %f %f %ld\n", Object, C0, x0, y0, z0, a, b, c, psi_gr1, psi_gr2, psi_gr3, tt);*/
#pragma omp parallel for shared(A) private(index,k,j,ll,TETAs,FIs,PSIs,aa1,aa,FI1,TETA1,PSI1,ai,bsai,vh1,al,a_v,b_v,c_v,d_v,p1,p2,alh,bth,gmh,sin_2, cos_2,delta1,delta_sq,first_dr, AA2, AA3, AA6, p00,ksi00,p,ksi,C,S,A2,B2,FI,CF,SF,P0,TF,PC,QM,DEL,XSYC,QP,SS)
for(ll=0; ll<AngTot; ll++) {
TETAs = AnglesRad[ll]; /* the variable projection angle (AnglesRad) */
TETA1 = TETAs - M_PI2;
FIs = 0.0f; /* always zero for the fixed source? */
PSIs = 0.0f; /* always zero for the fixed source? */
aa1[0]=-RS*sinf(TETAs)*cosf(FIs);
aa1[1]=-RS*sinf(TETAs)*sinf(FIs);
aa1[2]=-RS*cosf(TETAs);
matvet3(bs,aa1,aa); /* matrix-vector multiplication */
/* calculation of inverse matrix */
FI1=-FIs;
TETA1=-TETA1;
PSI1=-PSIs;
matrot3(ai,PSI1,TETA1,FI1); /* rotation of 3x3 matrix */
/* A transform matrix from projection space to object space */
matmat3(bs,ai,bsai);
vh1[0]=0.0f;
/* the object is an ellipsoid */
for(j=0; j<Horiz_det; j++) {
// for(k=0; k<Vert_det; k++) {
for(k=Z1; k<Z2; k++) {
//index = tt*Vert_det*Horiz_det*AngTot + ll*Vert_det*Horiz_det + k*Horiz_det + j;
index = tt*Horiz_det*AngTot*sub_vol_size + ll*sub_vol_size*Horiz_det + (k - Z1)*Horiz_det + j;
vh1[2]=DetectorRange_Horiz_ar[j];
vh1[1]=DetectorRange_Vert_ar[k];
matvet3(bsai,vh1,al); /*matrix-vector multiplication */
if (strcmp("ellipsoid",Object) == 0) {
a_v = powf((aa[0]/a),2) + powf((aa[1]/b),2) + powf((aa[2]/c),2);
b_v = aa[0]*(al[0]-xh[0])*a2 + aa[1]*(al[1]-xh[1])*b2 + aa[2]*(al[2]-xh[2])*c2;
c_v = powf(((al[0]-xh[0])/a),2) + powf(((al[1]-xh[1])/b), 2) + powf(((al[2]-xh[2])/c),2) - 1.0f;
d_v = b_v*b_v - a_v*c_v;
if(d_v > 0) {
p1 = -(sqrtf(d_v)+b_v)/a_v;
p2 = (sqrtf(d_v)-b_v)/a_v;
A[index] += (p2-p1)*multiplier;
}
}
if (strcmp("paraboloid",Object) == 0) {
/* the object is a parabola Lambda = 1 */
a_v = powf((aa[0]/a),2) + powf((aa[1]/b),2) + powf((aa[2]/c),2);
b_v = aa[0]*(al[0]-xh[0])*a2 + aa[1]*(al[1]-xh[1])*b2 + aa[2]*(al[2]-xh[2])*c2;
c_v = powf(((al[0]-xh[0])/a),2) + powf(((al[1]-xh[1])/b), 2) + powf(((al[2]-xh[2])/c),2) - 1.0f;
d_v = b_v*b_v - a_v*c_v;
if(d_v > 0) {
p1 = -(sqrtf(d_v)+b_v)/a_v;
p2 = (sqrtf(d_v)-b_v)/a_v;
A[index] += multiplier*(a_v/3.0f*(pow(p1,3.0f) - pow(p2,3.0f)) + b_v*(pow(p1,2.0f) - pow(p2,2.0f)) + c_v*(p1-p2));
}
}
if (strcmp("gaussian",Object) == 0) {
/* The object is a volumetric gaussian */
alh=alog2*a2;
bth=alog2*b2;
gmh=alog2*c2;
a_v = 1.0f/(alh*powf((aa[0]),2) + bth*powf((aa[1]),2) + gmh*powf((aa[2]),2));
b_v = aa[0]*alh*(al[0]-xh[0]) + aa[1]*bth*(al[1]-xh[1]) + aa[2]*gmh*(al[2]-xh[2]);
c_v = alh*powf(((al[0]-xh[0])),2) + bth*powf(((al[1]-xh[1])),2) + gmh*powf(((al[2]-xh[2])),2);
A[index] += multiplier*sqrtf(M_PI*a_v)*expf((pow(b_v,2))*a_v-c_v);
}
}} /*main for j-k loop*/
if (strcmp("elliptical_cylinder",Object) == 0) {
sin_2 = powf((sinf(TETAs - psi3)),2);
cos_2 = powf((cosf(TETAs - psi3)),2);
delta1 = 1.0f/(a22*sin_2 + b22*cos_2);
delta_sq = sqrtf(delta1);
first_dr = AA5*delta_sq;
AA2 = -x00*sinf(TETAs) + y00*cosf(TETAs);
for(k=Z1; k<Z2; k++) {
if (fabs(Zdel[k]) < c) {
for(j=0; j<Horiz_det; j++) {
AA3 = powf((DetectorRange_Horiz_ar[j] - AA2),2);
AA6 = (AA3)*delta1;
//index = tt*Vert_det*Horiz_det*AngTot + ll*Vert_det*Horiz_det + k*Horiz_det + j;
index = tt*sub_vol_size*Horiz_det*AngTot + ll*sub_vol_size*Horiz_det + (k - Z1)*Horiz_det + j;
if (AA6 < 1.0f) A[index] += first_dr*sqrtf(1.0f - AA6);
}
}
}
}
if (strcmp("cuboid",Object) == 0) {
/* the object is a cuboid */
ksi00 = AnglesRad[(AngTot-1)-ll];
for(k=Z1; k<Z2; k++) {
if (fabs(Zdel[k]) < c2) {
for(j=0; j< Horiz_det; j++) {
p00 = DetectorRange_Horiz_ar[j];
index = tt*sub_vol_size*Horiz_det*AngTot + ll*sub_vol_size*Horiz_det + (k - Z1)*Horiz_det + j;
p = p00;
ksi=ksi00;
if (ksi > (float)M_PI) {
ksi = ksi - (float)M_PI;
p = -p00; }
C = cosf(ksi); S = sinf(ksi);
XSYC = -x11*S + y11*C;
A2 = xwid*0.5f;
B2 = ywid*0.5f;
if ((ksi - ksi1) < 0.0f) FI = (float)M_PI + ksi - ksi1;
else FI = ksi - ksi1;
if (FI > M_PI2) FI = (float)M_PI - FI;
CF = cosf(FI);
SF = sinf(FI);
P0 = fabs(p-XSYC);
SS = xwid/CF*C0;
if (fabs(CF) <= (float)EPS) {
SS = ywid*C0;
if ((P0 - A2) > (float)EPS) SS=0.0f;
}
if (fabs(SF) <= (float)EPS) {
SS = xwid*C0;
if ((P0 - B2) > (float)EPS) SS=0.0f;
}
TF = SF/CF;
PC = P0/CF;
QP = B2+A2*TF;
QM = QP+PC;
if (QM > ywid) {
DEL = P0+B2*CF;
SS = ywid/SF*C0;
if (DEL > (A2*SF)) SS = (QP-PC)/SF*C0;
}
if (QM > ywid) {
DEL = P0+B2*CF;
if (DEL > A2*SF) SS = (QP-PC)/SF*C0;
else SS = ywid/SF*C0;
}
else SS = xwid/CF*C0;
if (PC >= QP) SS=0.0f;
A[index] += (N/2.0f)*SS;
} /*j-loop*/
}
} /*k-loop*/
}
}
/************************************************/
free(AnglesRad);
free(DetectorRange_Horiz_ar);
free(DetectorRange_Vert_ar);
free(Tomorange_Z_Ar);
free(Zdel);
return *A;
}
/********************Core Function*****************************/
float TomoP3DModelSino_core(float *A, int ModelSelected, long Horiz_det, long Vert_det, long Z1, long Z2, long N, float *Angl_vector, int AngTot, char* ModelParametersFilename)
{
int Model = 0, Components = 0, steps = 0, counter = 0, ii;
float C0 = 0.0f, x0 = 0.0f, y0 = 0.0f, z0 = 0.0f, a = 0.0f, b = 0.0f, c = 0.0f, psi_gr1 = 0.0f, psi_gr2 = 0.0f, psi_gr3 = 0.0f;
FILE *fp = fopen(ModelParametersFilename, "r"); // read parameters file
if (fp == NULL) {
printf("%s \n", "Cannot open the file");
}
else {
char str[MAXCHAR];
char tmpstr1[16];
char tmpstr2[22];
char tmpstr3[16];
char tmpstr4[16];
char tmpstr5[16];
char tmpstr6[16];
char tmpstr7[16];
char tmpstr8[16];
char tmpstr9[16];
char tmpstr10[16];
char tmpstr11[16];
char tmpstr12[16];
while (fgets(str, MAXCHAR, fp) != NULL)
{
/* work with non-# commented lines */
if (str[0] != '#') {
sscanf(str, "%15s : %21[^;];", tmpstr1, tmpstr2);
if (strcmp(tmpstr1, "Model") == 0)
{
Model = atoi(tmpstr2);
if ((ModelSelected == Model) && (counter == 0)) {
/* check if we have a right model */
if (fgets(str, MAXCHAR, fp) != NULL) sscanf(str, "%15s : %21[^;];", tmpstr1, tmpstr2);
else {
break;
}
if (strcmp(tmpstr1, "Components") == 0) Components = atoi(tmpstr2);
//printf("%s %i\n", "Components:", Components);
if (Components <= 0) {
printf("%s %i\n", "Components cannot be negative, the given value is", Components);
break;
}
if (fgets(str, MAXCHAR, fp) != NULL) sscanf(str, "%15s : %21[^;];", tmpstr1, tmpstr2);
else {
break;
}
if (strcmp(tmpstr1, "TimeSteps") == 0) steps = atoi(tmpstr2);
if (steps <= 0) {
printf("%s %i\n", "TimeSteps cannot be negative, the given value is", steps);
break;
}
//printf("%s %i\n", "TimeSteps:", steps);
if (steps == 1) {
/**************************************************/
//printf("\n %s %i %s \n", "Stationary 3D model", ModelSelected, " is selected");
/* loop over all components */
for (ii = 0; ii<Components; ii++) {
if (fgets(str, MAXCHAR, fp) != NULL) sscanf(str, "%15s : %21s %15s %15s %15s %15s %15s %15s %15s %15s %15s %15[^;];", tmpstr1, tmpstr2, tmpstr3, tmpstr4, tmpstr5, tmpstr6, tmpstr7, tmpstr8, tmpstr9, tmpstr10, tmpstr11, tmpstr12);
else {
break;
}
if (strcmp(tmpstr1, "Object") == 0) {
C0 = (float)atof(tmpstr3); /* intensity */
x0 = (float)atof(tmpstr4); /* x0 position */
y0 = (float)atof(tmpstr5); /* y0 position */
z0 = (float)atof(tmpstr6); /* z0 position */
a = (float)atof(tmpstr7); /* a - size object */
b = (float)atof(tmpstr8); /* b - size object */
c = (float)atof(tmpstr9); /* c - size object */
psi_gr1 = (float)atof(tmpstr10); /* rotation angle 1*/
psi_gr2 = (float)atof(tmpstr11); /* rotation angle 2*/
psi_gr3 = (float)atof(tmpstr12); /* rotation angle 3*/
}
else {
break;
}
// printf("\nObject : %s \nC0 : %f \nx0 : %f \ny0 : %f \nz0 : %f \na : %f \nb : %f \nc : %f \n", tmpstr2, C0, x0, y0, z0, a, b, c);
if ((strcmp("gaussian",tmpstr2) == 0) || (strcmp("paraboloid",tmpstr2) == 0) || (strcmp("ellipsoid",tmpstr2) == 0)) {
TomoP3DObjectSino_core(A, Horiz_det, Vert_det, Z1, Z2, N, Angl_vector, AngTot, tmpstr2, C0, y0, -z0, -x0, b, a, c, psi_gr3, psi_gr2, psi_gr1, 0l); //python
}
else if (strcmp("elliptical_cylinder",tmpstr2) == 0) {
TomoP3DObjectSino_core(A, Horiz_det, Vert_det, Z1, Z2, N, Angl_vector, AngTot, tmpstr2, C0, x0, -y0, z0, b, a, c, psi_gr3, psi_gr2, psi_gr1, 0l); //python
}
else {
TomoP3DObjectSino_core(A, Horiz_det, Vert_det, Z1, Z2, N, Angl_vector, AngTot, tmpstr2, C0, x0, y0, z0, a, b, c, psi_gr3, psi_gr2, -psi_gr1, 0l); //python
}
}
}
else {
/**************************************************/
//printf("\n %s \n", "Temporal model is selected");
/* temporal phantom 3D + time (4D) */
float C1 = 0.0f, x1 = 0.0f, y1 = 0.0f, z1 = 0.0f, a1 = 0.0f, b1 = 0.0f, c1 = 0.0f, psi_gr1_1 = 0.0f, psi_gr2_1 = 0.0f, psi_gr3_1 = 0.0f;
/* loop over all components */
for (ii = 0; ii<Components; ii++) {
if (fgets(str, MAXCHAR, fp) != NULL) sscanf(str, "%15s : %21s %15s %15s %15s %15s %15s %15s %15s %15s %15s %15[^;];", tmpstr1, tmpstr2, tmpstr3, tmpstr4, tmpstr5, tmpstr6, tmpstr7, tmpstr8, tmpstr9, tmpstr10, tmpstr11, tmpstr12);
else {
break;
}
if (strcmp(tmpstr1, "Object") == 0) {
C0 = (float)atof(tmpstr3); /* intensity */
x0 = (float)atof(tmpstr4); /* x0 position */
y0 = (float)atof(tmpstr5); /* y0 position */
z0 = (float)atof(tmpstr6); /* y0 position */
a = (float)atof(tmpstr7); /* a - size object */
b = (float)atof(tmpstr8); /* b - size object */
c = (float)atof(tmpstr9); /* b - size object */
psi_gr1 = (float)atof(tmpstr10); /* rotation angle 1*/
psi_gr2 = (float)atof(tmpstr11); /* rotation angle 2*/
psi_gr3 = (float)atof(tmpstr12); /* rotation angle 3*/
}
else {
break;
}
// printf("\nObject : %s \nC0 : %f \nx0 : %f \ny0 : %f \nz0 : %f \na : %f \nb : %f \n", tmpstr2, C0, x0, y0, z0, a, b, c);
/* check Endvar relatedparameters */
if (fgets(str, MAXCHAR, fp) != NULL) sscanf(str, "%15s : %15s %15s %15s %15s %15s %15s %15s %15s %15s %15[^;];", tmpstr1, tmpstr3, tmpstr4, tmpstr5, tmpstr6, tmpstr7, tmpstr8, tmpstr9, tmpstr10, tmpstr11, tmpstr12);
else break;
if (strcmp(tmpstr1, "Endvar") == 0) {
C1 = (float)atof(tmpstr3); /* intensity */
x1 = (float)atof(tmpstr4); /* x0 position */
y1 = (float)atof(tmpstr5); /* y0 position */
z1 = (float)atof(tmpstr6); /* z0 position */
a1 = (float)atof(tmpstr7); /* a - size object */
b1 = (float)atof(tmpstr8); /* b - size object */
c1 = (float)atof(tmpstr9); /* c - size object */
psi_gr1_1 = (float)atof(tmpstr10); /* rotation angle 1*/
psi_gr2_1 = (float)atof(tmpstr11); /* rotation angle 2*/
psi_gr3_1 = (float)atof(tmpstr12); /* rotation angle 3*/
}
else {
printf("%s\n", "Cannot find 'Endvar' string in parameters file");
break;
}
//printf("\nObject : %s \nC0 : %f \nx0 : %f \ny0 : %f \nz0 : %f \na : %f \nb : %f \nc : %f \n", tmpstr2, C0, x0, y0, z0, a1, b1, c1);
/*now we know the initial parameters of the object and the final ones. We linearly extrapolate to establish steps and coordinates. */
/* calculating the full distance berween the start and the end points */
float distance = sqrtf(pow((x1 - x0), 2) + pow((y1 - y0), 2) + pow((z1 - z0), 2));
float d_dist = distance / (steps - 1); /*a step over line */
float C_step = (C1 - C0) / (steps - 1);
float a_step = (a1 - a) / (steps - 1);
float b_step = (b1 - b) / (steps - 1);
float c_step = (c1 - c) / (steps - 1);
float phi_rot_step1 = (psi_gr1_1 - psi_gr1) / (steps - 1);
float phi_rot_step2 = (psi_gr2_1 - psi_gr2) / (steps - 1);
float phi_rot_step3 = (psi_gr3_1 - psi_gr3) / (steps - 1);
long tt;
float x_t, y_t, z_t, a_t, b_t, c_t, C_t, phi1_t, phi2_t, phi3_t, d_step;
/* initialize */
x_t = x0; y_t = y0; z_t = z0; a_t = a; b_t = b; c_t = c; C_t = C0; phi1_t = psi_gr1; phi2_t = psi_gr2; phi3_t = psi_gr3; d_step = d_dist;
/*loop over time frames*/
for (tt = 0; tt < (long)steps; tt++) {
if ((strcmp("gaussian",tmpstr2) == 0) || (strcmp("paraboloid",tmpstr2) == 0) || (strcmp("ellipsoid",tmpstr2) == 0)) {
TomoP3DObjectSino_core(A, Horiz_det, Vert_det, Z1, Z2, N, Angl_vector, AngTot, tmpstr2, C_t, y_t, -z_t, -x_t, b_t, a_t, c_t, phi3_t, phi2_t, phi1_t, tt); //python
}
else if (strcmp("elliptical_cylinder",tmpstr2) == 0) {
TomoP3DObjectSino_core(A, Horiz_det, Vert_det, Z1, Z2, N, Angl_vector, AngTot, tmpstr2, C_t, x_t, -y_t, z_t, b_t, a_t, c_t, phi3_t, phi2_t, phi1_t, tt); //python
}
else {
TomoP3DObjectSino_core(A, Horiz_det, Vert_det, Z1, Z2, N, Angl_vector, AngTot, tmpstr2, C_t, x_t, y_t, z_t, a_t, b_t, c_t, phi3_t, phi2_t, -phi1_t, tt); //python
}
/* calculating new coordinates of an object */
if (distance != 0.0f) {
float t = d_step / distance;
x_t = (1 - t)*x0 + t*x1;
y_t = (1 - t)*y0 + t*y1;
z_t = (1 - t)*z0 + t*z1;
}
else {
x_t = x0;
y_t = y0;
z_t = z0;
}
d_step += d_dist;
a_t += a_step;
b_t += b_step;
c_t += c_step;
C_t += C_step;
phi1_t += phi_rot_step1;
phi2_t += phi_rot_step2;
phi3_t += phi_rot_step3;
} /*time steps*/
} /*components loop*/
}
counter++;
}
}
}
}
}
fclose(fp);
return *A;
}
|
morphology.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M OOO RRRR PPPP H H OOO L OOO GGGG Y Y %
% MM MM O O R R P P H H O O L O O G Y Y %
% M M M O O RRRR PPPP HHHHH O O L O O G GGG Y %
% M M O O R R P H H O O L O O G G Y %
% M M OOO R R P H H OOO LLLLL OOO GGG Y %
% %
% %
% MagickCore Morphology Methods %
% %
% Software Design %
% Anthony Thyssen %
% January 2010 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Morphology is the application of various kernels, of any size or shape, to an
% image in various ways (typically binary, but not always).
%
% Convolution (weighted sum or average) is just one specific type of
% morphology. Just one that is very common for image bluring and sharpening
% effects. Not only 2D Gaussian blurring, but also 2-pass 1D Blurring.
%
% This module provides not only a general morphology function, and the ability
% to apply more advanced or iterative morphologies, but also functions for the
% generation of many different types of kernel arrays from user supplied
% arguments. Prehaps even the generation of a kernel from a small image.
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/cache-view.h"
#include "magick/color-private.h"
#include "magick/channel.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/hashmap.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/memory-private.h"
#include "magick/monitor-private.h"
#include "magick/morphology.h"
#include "magick/morphology-private.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/prepress.h"
#include "magick/quantize.h"
#include "magick/registry.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/splay-tree.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/utility.h"
/*
Other global definitions used by module.
*/
#define Minimize(assign,value) assign=MagickMin(assign,value)
#define Maximize(assign,value) assign=MagickMax(assign,value)
/* Integer Factorial Function - for a Binomial kernel */
#if 1
static inline size_t fact(size_t n)
{
size_t l,f;
for(f=1, l=2; l <= n; f=f*l, l++);
return(f);
}
#elif 1 /* glibc floating point alternatives */
#define fact(n) ((size_t)tgamma((double)n+1))
#else
#define fact(n) ((size_t)lgamma((double)n+1))
#endif
/* Currently these are only internal to this module */
static void
CalcKernelMetaData(KernelInfo *),
ExpandMirrorKernelInfo(KernelInfo *),
ExpandRotateKernelInfo(KernelInfo *, const double),
RotateKernelInfo(KernelInfo *, double);
/* Quick function to find last kernel in a kernel list */
static inline KernelInfo *LastKernelInfo(KernelInfo *kernel)
{
while (kernel->next != (KernelInfo *) NULL)
kernel=kernel->next;
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelInfo() takes the given string (generally supplied by the
% user) and converts it into a Morphology/Convolution Kernel. This allows
% users to specify a kernel from a number of pre-defined kernels, or to fully
% specify their own kernel for a specific Convolution or Morphology
% Operation.
%
% The kernel so generated can be any rectangular array of floating point
% values (doubles) with the 'control point' or 'pixel being affected'
% anywhere within that array of values.
%
% Previously IM was restricted to a square of odd size using the exact
% center as origin, this is no longer the case, and any rectangular kernel
% with any value being declared the origin. This in turn allows the use of
% highly asymmetrical kernels.
%
% The floating point values in the kernel can also include a special value
% known as 'nan' or 'not a number' to indicate that this value is not part
% of the kernel array. This allows you to shaped the kernel within its
% rectangular area. That is 'nan' values provide a 'mask' for the kernel
% shape. However at least one non-nan value must be provided for correct
% working of a kernel.
%
% The returned kernel should be freed using the DestroyKernelInfo method
% when you are finished with it. Do not free this memory yourself.
%
% Input kernel defintion strings can consist of any of three types.
%
% "name:args[[@><]"
% Select from one of the built in kernels, using the name and
% geometry arguments supplied. See AcquireKernelBuiltIn()
%
% "WxH[+X+Y][@><]:num, num, num ..."
% a kernel of size W by H, with W*H floating point numbers following.
% the 'center' can be optionally be defined at +X+Y (such that +0+0
% is top left corner). If not defined the pixel in the center, for
% odd sizes, or to the immediate top or left of center for even sizes
% is automatically selected.
%
% "num, num, num, num, ..."
% list of floating point numbers defining an 'old style' odd sized
% square kernel. At least 9 values should be provided for a 3x3
% square kernel, 25 for a 5x5 square kernel, 49 for 7x7, etc.
% Values can be space or comma separated. This is not recommended.
%
% You can define a 'list of kernels' which can be used by some morphology
% operators A list is defined as a semi-colon separated list kernels.
%
% " kernel ; kernel ; kernel ; "
%
% Any extra ';' characters, at start, end or between kernel defintions are
% simply ignored.
%
% The special flags will expand a single kernel, into a list of rotated
% kernels. A '@' flag will expand a 3x3 kernel into a list of 45-degree
% cyclic rotations, while a '>' will generate a list of 90-degree rotations.
% The '<' also exands using 90-degree rotates, but giving a 180-degree
% reflected kernel before the +/- 90-degree rotations, which can be important
% for Thinning operations.
%
% Note that 'name' kernels will start with an alphabetic character while the
% new kernel specification has a ':' character in its specification string.
% If neither is the case, it is assumed an old style of a simple list of
% numbers generating a odd-sized square kernel has been given.
%
% The format of the AcquireKernal method is:
%
% KernelInfo *AcquireKernelInfo(const char *kernel_string)
%
% A description of each parameter follows:
%
% o kernel_string: the Morphology/Convolution kernel wanted.
%
*/
/* This was separated so that it could be used as a separate
** array input handling function, such as for -color-matrix
*/
static KernelInfo *ParseKernelArray(const char *kernel_string)
{
KernelInfo
*kernel;
char
token[MaxTextExtent];
const char
*p,
*end;
register ssize_t
i;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
MagickStatusType
flags;
GeometryInfo
args;
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) memset(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = UserDefinedKernel;
kernel->next = (KernelInfo *) NULL;
kernel->signature = MagickCoreSignature;
if (kernel_string == (const char *) NULL)
return(kernel);
/* find end of this specific kernel definition string */
end = strchr(kernel_string, ';');
if ( end == (char *) NULL )
end = strchr(kernel_string, '\0');
/* clear flags - for Expanding kernel lists thorugh rotations */
flags = NoValue;
/* Has a ':' in argument - New user kernel specification
FUTURE: this split on ':' could be done by StringToken()
*/
p = strchr(kernel_string, ':');
if ( p != (char *) NULL && p < end)
{
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, kernel_string, (size_t) (p-kernel_string));
token[p-kernel_string] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
/* Size handling and checks of geometry settings */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 1.0; /* then width = 1 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
kernel->width = (size_t)args.rho;
kernel->height = (size_t)args.sigma;
/* Offset Handling and Checks */
if ( args.xi < 0.0 || args.psi < 0.0 )
return(DestroyKernelInfo(kernel));
kernel->x = ((flags & XValue)!=0) ? (ssize_t)args.xi
: (ssize_t) (kernel->width-1)/2;
kernel->y = ((flags & YValue)!=0) ? (ssize_t)args.psi
: (ssize_t) (kernel->height-1)/2;
if ( kernel->x >= (ssize_t) kernel->width ||
kernel->y >= (ssize_t) kernel->height )
return(DestroyKernelInfo(kernel));
p++; /* advance beyond the ':' */
}
else
{ /* ELSE - Old old specification, forming odd-square kernel */
/* count up number of values given */
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
for (i=0; p < end; i++)
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
}
/* set the size of the kernel - old sized square */
kernel->width = kernel->height= (size_t) sqrt((double) i+1.0);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
}
/* Read in the kernel values from rest of input string argument */
kernel->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(
kernel->width,kernel->height*sizeof(*kernel->values)));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
kernel->minimum=MagickMaximumValue;
kernel->maximum=(-MagickMaximumValue);
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; (i < (ssize_t) (kernel->width*kernel->height)) && (p < end); i++)
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
if ( LocaleCompare("nan",token) == 0
|| LocaleCompare("-",token) == 0 ) {
kernel->values[i] = nan; /* this value is not part of neighbourhood */
}
else {
kernel->values[i] = StringToDouble(token,(char **) NULL);
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
}
/* sanity check -- no more values in kernel definition */
(void) GetNextToken(p,&p,MaxTextExtent,token);
if ( *token != '\0' && *token != ';' && *token != '\'' )
return(DestroyKernelInfo(kernel));
#if 0
/* this was the old method of handling a incomplete kernel */
if ( i < (ssize_t) (kernel->width*kernel->height) ) {
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
for ( ; i < (ssize_t) (kernel->width*kernel->height); i++)
kernel->values[i]=0.0;
}
#else
/* Number of values for kernel was not enough - Report Error */
if ( i < (ssize_t) (kernel->width*kernel->height) )
return(DestroyKernelInfo(kernel));
#endif
/* check that we recieved at least one real (non-nan) value! */
if (kernel->minimum == MagickMaximumValue)
return(DestroyKernelInfo(kernel));
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel size */
ExpandRotateKernelInfo(kernel, 45.0); /* cyclic rotate 3x3 kernels */
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0); /* 90 degree rotate of kernel */
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel); /* 90 degree mirror rotate */
return(kernel);
}
static KernelInfo *ParseKernelName(const char *kernel_string)
{
char
token[MaxTextExtent];
const char
*p,
*end;
GeometryInfo
args;
KernelInfo
*kernel;
MagickStatusType
flags;
ssize_t
type;
/* Parse special 'named' kernel */
(void) GetNextToken(kernel_string,&p,MaxTextExtent,token);
type=ParseCommandOption(MagickKernelOptions,MagickFalse,token);
if ( type < 0 || type == UserDefinedKernel )
return((KernelInfo *) NULL); /* not a valid named kernel */
while (((isspace((int) ((unsigned char) *p)) != 0) ||
(*p == ',') || (*p == ':' )) && (*p != '\0') && (*p != ';'))
p++;
end = strchr(p, ';'); /* end of this kernel defintion */
if ( end == (char *) NULL )
end = strchr(p, '\0');
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, p, (size_t) (end-p));
token[end-p] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
/* special handling of missing values in input string */
switch( type ) {
/* Shape Kernel Defaults */
case UnityKernel:
if ( (flags & WidthValue) == 0 )
args.rho = 1.0; /* Default scale = 1.0, zero is valid */
break;
case SquareKernel:
case DiamondKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
if ( (flags & HeightValue) == 0 )
args.sigma = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RingKernel:
if ( (flags & XValue) == 0 )
args.xi = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RectangleKernel: /* Rectangle - set size defaults */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 3; /* then width = 3 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
if ( (flags & XValue) == 0 ) /* center offset if not defined */
args.xi = (double)(((ssize_t)args.rho-1)/2);
if ( (flags & YValue) == 0 )
args.psi = (double)(((ssize_t)args.sigma-1)/2);
break;
/* Distance Kernel Defaults */
case ChebyshevKernel:
case ManhattanKernel:
case OctagonalKernel:
case EuclideanKernel:
if ( (flags & HeightValue) == 0 ) /* no distance scale */
args.sigma = 100.0; /* default distance scaling */
else if ( (flags & AspectValue ) != 0 ) /* '!' flag */
args.sigma = QuantumRange/(args.sigma+1); /* maximum pixel distance */
else if ( (flags & PercentValue ) != 0 ) /* '%' flag */
args.sigma *= QuantumRange/100.0; /* percentage of color range */
break;
default:
break;
}
kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args);
if ( kernel == (KernelInfo *) NULL )
return(kernel);
/* global expand to rotated kernel list - only for single kernels */
if ( kernel->next == (KernelInfo *) NULL ) {
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 45.0);
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0);
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel);
}
return(kernel);
}
MagickExport KernelInfo *AcquireKernelInfo(const char *kernel_string)
{
KernelInfo
*kernel,
*new_kernel;
char
*kernel_cache,
token[MaxTextExtent];
const char
*p;
if (kernel_string == (const char *) NULL)
return(ParseKernelArray(kernel_string));
p=kernel_string;
kernel_cache=(char *) NULL;
if (*kernel_string == '@')
{
ExceptionInfo *exception=AcquireExceptionInfo();
kernel_cache=FileToString(kernel_string+1,~0UL,exception);
exception=DestroyExceptionInfo(exception);
if (kernel_cache == (char *) NULL)
return((KernelInfo *) NULL);
p=(const char *) kernel_cache;
}
kernel=NULL;
while (GetNextToken(p,(const char **) NULL,MaxTextExtent,token), *token != '\0')
{
/* ignore extra or multiple ';' kernel separators */
if (*token != ';')
{
/* tokens starting with alpha is a Named kernel */
if (isalpha((int) ((unsigned char) *token)) != 0)
new_kernel=ParseKernelName(p);
else /* otherwise a user defined kernel array */
new_kernel=ParseKernelArray(p);
/* Error handling -- this is not proper error handling! */
if (new_kernel == (KernelInfo *) NULL)
{
if (kernel != (KernelInfo *) NULL)
kernel=DestroyKernelInfo(kernel);
return((KernelInfo *) NULL);
}
/* initialise or append the kernel list */
if (kernel == (KernelInfo *) NULL)
kernel=new_kernel;
else
LastKernelInfo(kernel)->next=new_kernel;
}
/* look for the next kernel in list */
p=strchr(p,';');
if (p == (char *) NULL)
break;
p++;
}
if (kernel_cache != (char *) NULL)
kernel_cache=DestroyString(kernel_cache);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e K e r n e l B u i l t I n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelBuiltIn() returned one of the 'named' built-in types of
% kernels used for special purposes such as gaussian blurring, skeleton
% pruning, and edge distance determination.
%
% They take a KernelType, and a set of geometry style arguments, which were
% typically decoded from a user supplied string, or from a more complex
% Morphology Method that was requested.
%
% The format of the AcquireKernalBuiltIn method is:
%
% KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
% const GeometryInfo args)
%
% A description of each parameter follows:
%
% o type: the pre-defined type of kernel wanted
%
% o args: arguments defining or modifying the kernel
%
% Convolution Kernels
%
% Unity
% The a No-Op or Scaling single element kernel.
%
% Gaussian:{radius},{sigma}
% Generate a two-dimensional gaussian kernel, as used by -gaussian.
% The sigma for the curve is required. The resulting kernel is
% normalized,
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% NOTE: that the 'radius' is optional, but if provided can limit (clip)
% the final size of the resulting kernel to a square 2*radius+1 in size.
% The radius should be at least 2 times that of the sigma value, or
% sever clipping and aliasing may result. If not given or set to 0 the
% radius will be determined so as to produce the best minimal error
% result, which is usally much larger than is normally needed.
%
% LoG:{radius},{sigma}
% "Laplacian of a Gaussian" or "Mexician Hat" Kernel.
% The supposed ideal edge detection, zero-summing kernel.
%
% An alturnative to this kernel is to use a "DoG" with a sigma ratio of
% approx 1.6 (according to wikipedia).
%
% DoG:{radius},{sigma1},{sigma2}
% "Difference of Gaussians" Kernel.
% As "Gaussian" but with a gaussian produced by 'sigma2' subtracted
% from the gaussian produced by 'sigma1'. Typically sigma2 > sigma1.
% The result is a zero-summing kernel.
%
% Blur:{radius},{sigma}[,{angle}]
% Generates a 1 dimensional or linear gaussian blur, at the angle given
% (current restricted to orthogonal angles). If a 'radius' is given the
% kernel is clipped to a width of 2*radius+1. Kernel can be rotated
% by a 90 degree angle.
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% Note that two convolutions with two "Blur" kernels perpendicular to
% each other, is equivalent to a far larger "Gaussian" kernel with the
% same sigma value, However it is much faster to apply. This is how the
% "-blur" operator actually works.
%
% Comet:{width},{sigma},{angle}
% Blur in one direction only, much like how a bright object leaves
% a comet like trail. The Kernel is actually half a gaussian curve,
% Adding two such blurs in opposite directions produces a Blur Kernel.
% Angle can be rotated in multiples of 90 degrees.
%
% Note that the first argument is the width of the kernel and not the
% radius of the kernel.
%
% Binomial:[{radius}]
% Generate a discrete kernel using a 2 dimentional Pascel's Triangle
% of values. Used for special forma of image filters
%
% # Still to be implemented...
% #
% # Filter2D
% # Filter1D
% # Set kernel values using a resize filter, and given scale (sigma)
% # Cylindrical or Linear. Is this possible with an image?
% #
%
% Named Constant Convolution Kernels
%
% All these are unscaled, zero-summing kernels by default. As such for
% non-HDRI version of ImageMagick some form of normalization, user scaling,
% and biasing the results is recommended, to prevent the resulting image
% being 'clipped'.
%
% The 3x3 kernels (most of these) can be circularly rotated in multiples of
% 45 degrees to generate the 8 angled varients of each of the kernels.
%
% Laplacian:{type}
% Discrete Lapacian Kernels, (without normalization)
% Type 0 : 3x3 with center:8 surounded by -1 (8 neighbourhood)
% Type 1 : 3x3 with center:4 edge:-1 corner:0 (4 neighbourhood)
% Type 2 : 3x3 with center:4 edge:1 corner:-2
% Type 3 : 3x3 with center:4 edge:-2 corner:1
% Type 5 : 5x5 laplacian
% Type 7 : 7x7 laplacian
% Type 15 : 5x5 LoG (sigma approx 1.4)
% Type 19 : 9x9 LoG (sigma approx 1.4)
%
% Sobel:{angle}
% Sobel 'Edge' convolution kernel (3x3)
% | -1, 0, 1 |
% | -2, 0, 2 |
% | -1, 0, 1 |
%
% Roberts:{angle}
% Roberts convolution kernel (3x3)
% | 0, 0, 0 |
% | -1, 1, 0 |
% | 0, 0, 0 |
%
% Prewitt:{angle}
% Prewitt Edge convolution kernel (3x3)
% | -1, 0, 1 |
% | -1, 0, 1 |
% | -1, 0, 1 |
%
% Compass:{angle}
% Prewitt's "Compass" convolution kernel (3x3)
% | -1, 1, 1 |
% | -1,-2, 1 |
% | -1, 1, 1 |
%
% Kirsch:{angle}
% Kirsch's "Compass" convolution kernel (3x3)
% | -3,-3, 5 |
% | -3, 0, 5 |
% | -3,-3, 5 |
%
% FreiChen:{angle}
% Frei-Chen Edge Detector is based on a kernel that is similar to
% the Sobel Kernel, but is designed to be isotropic. That is it takes
% into account the distance of the diagonal in the kernel.
%
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) |
% | 1, 0, -1 |
%
% FreiChen:{type},{angle}
%
% Frei-Chen Pre-weighted kernels...
%
% Type 0: default un-nomalized version shown above.
%
% Type 1: Orthogonal Kernel (same as type 11 below)
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 2: Diagonal form of Kernel...
% | 1, sqrt(2), 0 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 0, -sqrt(2) -1 |
%
% However this kernel is als at the heart of the FreiChen Edge Detection
% Process which uses a set of 9 specially weighted kernel. These 9
% kernels not be normalized, but directly applied to the image. The
% results is then added together, to produce the intensity of an edge in
% a specific direction. The square root of the pixel value can then be
% taken as the cosine of the edge, and at least 2 such runs at 90 degrees
% from each other, both the direction and the strength of the edge can be
% determined.
%
% Type 10: All 9 of the following pre-weighted kernels...
%
% Type 11: | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 12: | 1, sqrt(2), 1 |
% | 0, 0, 0 | / 2*sqrt(2)
% | 1, sqrt(2), 1 |
%
% Type 13: | sqrt(2), -1, 0 |
% | -1, 0, 1 | / 2*sqrt(2)
% | 0, 1, -sqrt(2) |
%
% Type 14: | 0, 1, -sqrt(2) |
% | -1, 0, 1 | / 2*sqrt(2)
% | sqrt(2), -1, 0 |
%
% Type 15: | 0, -1, 0 |
% | 1, 0, 1 | / 2
% | 0, -1, 0 |
%
% Type 16: | 1, 0, -1 |
% | 0, 0, 0 | / 2
% | -1, 0, 1 |
%
% Type 17: | 1, -2, 1 |
% | -2, 4, -2 | / 6
% | -1, -2, 1 |
%
% Type 18: | -2, 1, -2 |
% | 1, 4, 1 | / 6
% | -2, 1, -2 |
%
% Type 19: | 1, 1, 1 |
% | 1, 1, 1 | / 3
% | 1, 1, 1 |
%
% The first 4 are for edge detection, the next 4 are for line detection
% and the last is to add a average component to the results.
%
% Using a special type of '-1' will return all 9 pre-weighted kernels
% as a multi-kernel list, so that you can use them directly (without
% normalization) with the special "-set option:morphology:compose Plus"
% setting to apply the full FreiChen Edge Detection Technique.
%
% If 'type' is large it will be taken to be an actual rotation angle for
% the default FreiChen (type 0) kernel. As such FreiChen:45 will look
% like a Sobel:45 but with 'sqrt(2)' instead of '2' values.
%
% WARNING: The above was layed out as per
% http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf
% But rotated 90 degrees so direction is from left rather than the top.
% I have yet to find any secondary confirmation of the above. The only
% other source found was actual source code at
% http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf
% Neigher paper defineds the kernels in a way that looks locical or
% correct when taken as a whole.
%
% Boolean Kernels
%
% Diamond:[{radius}[,{scale}]]
% Generate a diamond shaped kernel with given radius to the points.
% Kernel size will again be radius*2+1 square and defaults to radius 1,
% generating a 3x3 kernel that is slightly larger than a square.
%
% Square:[{radius}[,{scale}]]
% Generate a square shaped kernel of size radius*2+1, and defaulting
% to a 3x3 (radius 1).
%
% Octagon:[{radius}[,{scale}]]
% Generate octagonal shaped kernel of given radius and constant scale.
% Default radius is 3 producing a 7x7 kernel. A radius of 1 will result
% in "Diamond" kernel.
%
% Disk:[{radius}[,{scale}]]
% Generate a binary disk, thresholded at the radius given, the radius
% may be a float-point value. Final Kernel size is floor(radius)*2+1
% square. A radius of 5.3 is the default.
%
% NOTE: That a low radii Disk kernels produce the same results as
% many of the previously defined kernels, but differ greatly at larger
% radii. Here is a table of equivalences...
% "Disk:1" => "Diamond", "Octagon:1", or "Cross:1"
% "Disk:1.5" => "Square"
% "Disk:2" => "Diamond:2"
% "Disk:2.5" => "Octagon"
% "Disk:2.9" => "Square:2"
% "Disk:3.5" => "Octagon:3"
% "Disk:4.5" => "Octagon:4"
% "Disk:5.4" => "Octagon:5"
% "Disk:6.4" => "Octagon:6"
% All other Disk shapes are unique to this kernel, but because a "Disk"
% is more circular when using a larger radius, using a larger radius is
% preferred over iterating the morphological operation.
%
% Rectangle:{geometry}
% Simply generate a rectangle of 1's with the size given. You can also
% specify the location of the 'control point', otherwise the closest
% pixel to the center of the rectangle is selected.
%
% Properly centered and odd sized rectangles work the best.
%
% Symbol Dilation Kernels
%
% These kernel is not a good general morphological kernel, but is used
% more for highlighting and marking any single pixels in an image using,
% a "Dilate" method as appropriate.
%
% For the same reasons iterating these kernels does not produce the
% same result as using a larger radius for the symbol.
%
% Plus:[{radius}[,{scale}]]
% Cross:[{radius}[,{scale}]]
% Generate a kernel in the shape of a 'plus' or a 'cross' with
% a each arm the length of the given radius (default 2).
%
% NOTE: "plus:1" is equivalent to a "Diamond" kernel.
%
% Ring:{radius1},{radius2}[,{scale}]
% A ring of the values given that falls between the two radii.
% Defaults to a ring of approximataly 3 radius in a 7x7 kernel.
% This is the 'edge' pixels of the default "Disk" kernel,
% More specifically, "Ring" -> "Ring:2.5,3.5,1.0"
%
% Hit and Miss Kernels
%
% Peak:radius1,radius2
% Find any peak larger than the pixels the fall between the two radii.
% The default ring of pixels is as per "Ring".
% Edges
% Find flat orthogonal edges of a binary shape
% Corners
% Find 90 degree corners of a binary shape
% Diagonals:type
% A special kernel to thin the 'outside' of diagonals
% LineEnds:type
% Find end points of lines (for pruning a skeletion)
% Two types of lines ends (default to both) can be searched for
% Type 0: All line ends
% Type 1: single kernel for 4-conneected line ends
% Type 2: single kernel for simple line ends
% LineJunctions
% Find three line junctions (within a skeletion)
% Type 0: all line junctions
% Type 1: Y Junction kernel
% Type 2: Diagonal T Junction kernel
% Type 3: Orthogonal T Junction kernel
% Type 4: Diagonal X Junction kernel
% Type 5: Orthogonal + Junction kernel
% Ridges:type
% Find single pixel ridges or thin lines
% Type 1: Fine single pixel thick lines and ridges
% Type 2: Find two pixel thick lines and ridges
% ConvexHull
% Octagonal Thickening Kernel, to generate convex hulls of 45 degrees
% Skeleton:type
% Traditional skeleton generating kernels.
% Type 1: Tradional Skeleton kernel (4 connected skeleton)
% Type 2: HIPR2 Skeleton kernel (8 connected skeleton)
% Type 3: Thinning skeleton based on a ressearch paper by
% Dan S. Bloomberg (Default Type)
% ThinSE:type
% A huge variety of Thinning Kernels designed to preserve conectivity.
% many other kernel sets use these kernels as source definitions.
% Type numbers are 41-49, 81-89, 481, and 482 which are based on
% the super and sub notations used in the source research paper.
%
% Distance Measuring Kernels
%
% Different types of distance measuring methods, which are used with the
% a 'Distance' morphology method for generating a gradient based on
% distance from an edge of a binary shape, though there is a technique
% for handling a anti-aliased shape.
%
% See the 'Distance' Morphological Method, for information of how it is
% applied.
%
% Chebyshev:[{radius}][x{scale}[%!]]
% Chebyshev Distance (also known as Tchebychev or Chessboard distance)
% is a value of one to any neighbour, orthogonal or diagonal. One why
% of thinking of it is the number of squares a 'King' or 'Queen' in
% chess needs to traverse reach any other position on a chess board.
% It results in a 'square' like distance function, but one where
% diagonals are given a value that is closer than expected.
%
% Manhattan:[{radius}][x{scale}[%!]]
% Manhattan Distance (also known as Rectilinear, City Block, or the Taxi
% Cab distance metric), it is the distance needed when you can only
% travel in horizontal or vertical directions only. It is the
% distance a 'Rook' in chess would have to travel, and results in a
% diamond like distances, where diagonals are further than expected.
%
% Octagonal:[{radius}][x{scale}[%!]]
% An interleving of Manhatten and Chebyshev metrics producing an
% increasing octagonally shaped distance. Distances matches those of
% the "Octagon" shaped kernel of the same radius. The minimum radius
% and default is 2, producing a 5x5 kernel.
%
% Euclidean:[{radius}][x{scale}[%!]]
% Euclidean distance is the 'direct' or 'as the crow flys' distance.
% However by default the kernel size only has a radius of 1, which
% limits the distance to 'Knight' like moves, with only orthogonal and
% diagonal measurements being correct. As such for the default kernel
% you will get octagonal like distance function.
%
% However using a larger radius such as "Euclidean:4" you will get a
% much smoother distance gradient from the edge of the shape. Especially
% if the image is pre-processed to include any anti-aliasing pixels.
% Of course a larger kernel is slower to use, and not always needed.
%
% The first three Distance Measuring Kernels will only generate distances
% of exact multiples of {scale} in binary images. As such you can use a
% scale of 1 without loosing any information. However you also need some
% scaling when handling non-binary anti-aliased shapes.
%
% The "Euclidean" Distance Kernel however does generate a non-integer
% fractional results, and as such scaling is vital even for binary shapes.
%
*/
MagickExport KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
const GeometryInfo *args)
{
KernelInfo
*kernel;
register ssize_t
i;
register ssize_t
u,
v;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
/* Generate a new empty kernel if needed */
kernel=(KernelInfo *) NULL;
switch(type) {
case UndefinedKernel: /* These should not call this function */
case UserDefinedKernel:
assert("Should not call this function" != (char *) NULL);
break;
case LaplacianKernel: /* Named Descrete Convolution Kernels */
case SobelKernel: /* these are defined using other kernels */
case RobertsKernel:
case PrewittKernel:
case CompassKernel:
case KirschKernel:
case FreiChenKernel:
case EdgesKernel: /* Hit and Miss kernels */
case CornersKernel:
case DiagonalsKernel:
case LineEndsKernel:
case LineJunctionsKernel:
case RidgesKernel:
case ConvexHullKernel:
case SkeletonKernel:
case ThinSEKernel:
break; /* A pre-generated kernel is not needed */
#if 0
/* set to 1 to do a compile-time check that we haven't missed anything */
case UnityKernel:
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case BlurKernel:
case CometKernel:
case BinomialKernel:
case DiamondKernel:
case SquareKernel:
case RectangleKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
case RingKernel:
case PeaksKernel:
case ChebyshevKernel:
case ManhattanKernel:
case OctangonalKernel:
case EuclideanKernel:
#else
default:
#endif
/* Generate the base Kernel Structure */
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) memset(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = type;
kernel->next = (KernelInfo *) NULL;
kernel->signature = MagickCoreSignature;
break;
}
switch(type) {
/*
Convolution Kernels
*/
case UnityKernel:
{
kernel->height = kernel->width = (size_t) 1;
kernel->x = kernel->y = (ssize_t) 0;
kernel->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(1,
sizeof(*kernel->values)));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
kernel->maximum = kernel->values[0] = args->rho;
break;
}
break;
case GaussianKernel:
case DoGKernel:
case LoGKernel:
{ double
sigma = fabs(args->sigma),
sigma2 = fabs(args->xi),
A, B, R;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else if ( (type != DoGKernel) || (sigma >= sigma2) )
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma);
else
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma2);
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(
kernel->width,kernel->height*sizeof(*kernel->values)));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* WARNING: The following generates a 'sampled gaussian' kernel.
* What we really want is a 'discrete gaussian' kernel.
*
* How to do this is I don't know, but appears to be basied on the
* Error Function 'erf()' (intergral of a gaussian)
*/
if ( type == GaussianKernel || type == DoGKernel )
{ /* Calculate a Gaussian, OR positive half of a DoG */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
if ( type == DoGKernel )
{ /* Subtract a Negative Gaussian for "Difference of Gaussian" */
if ( sigma2 > MagickEpsilon )
{ sigma = sigma2; /* simplify loop expressions */
A = 1.0/(2.0*sigma*sigma);
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0;
}
if ( type == LoGKernel )
{ /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ R = ((double)(u*u+v*v))*A;
kernel->values[i] = (1-R)*exp(-R)*B;
}
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
/* Note the above kernels may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, and thus
** producing a very bright kernel.
**
** Normalization will still be needed.
*/
/* Normalize the 2D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
break;
}
case BlurKernel:
{ double
sigma = fabs(args->sigma),
alpha, beta;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else
kernel->width = GetOptimalKernelWidth1D(args->rho,sigma);
kernel->height = 1;
kernel->x = (ssize_t) (kernel->width-1)/2;
kernel->y = 0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
#if 1
#define KernelRank 3
/* Formula derived from GetBlurKernel() in "effect.c" (plus bug fix).
** It generates a gaussian 3 times the width, and compresses it into
** the expected range. This produces a closer normalization of the
** resulting kernel, especially for very low sigma values.
** As such while wierd it is prefered.
**
** I am told this method originally came from Photoshop.
**
** A properly normalized curve is generated (apart from edge clipping)
** even though we later normalize the result (for edge clipping)
** to allow the correct generation of a "Difference of Blurs".
*/
/* initialize */
v = (ssize_t) (kernel->width*KernelRank-1)/2; /* start/end points to fit range */
(void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
/* Calculate a Positive 1D Gaussian */
if ( sigma > MagickEpsilon )
{ sigma *= KernelRank; /* simplify loop expressions */
alpha = 1.0/(2.0*sigma*sigma);
beta= (double) (1.0/(MagickSQ2PI*sigma ));
for ( u=-v; u <= v; u++) {
kernel->values[(u+v)/KernelRank] +=
exp(-((double)(u*u))*alpha)*beta;
}
}
else /* special case - generate a unity kernel */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
#else
/* Direct calculation without curve averaging
This is equivelent to a KernelRank of 1 */
/* Calculate a Positive Gaussian */
if ( sigma > MagickEpsilon )
{ alpha = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
beta = 1.0/(MagickSQ2PI*sigma);
for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u))*alpha)*beta;
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
#endif
/* Note the above kernel may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (< 0.1) the central value becomes larger than one, as a
** result of not generating a actual 'discrete' kernel, and thus
** producing a very bright 'impulse'.
**
** Becuase of these two factors Normalization is required!
*/
/* Normalize the 1D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
/* rotate the 1D kernel by given angle */
RotateKernelInfo(kernel, args->xi );
break;
}
case CometKernel:
{ double
sigma = fabs(args->sigma),
A;
if ( args->rho < 1.0 )
kernel->width = (GetOptimalKernelWidth1D(args->rho,sigma)-1)/2+1;
else
kernel->width = (size_t)args->rho;
kernel->x = kernel->y = 0;
kernel->height = 1;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* A comet blur is half a 1D gaussian curve, so that the object is
** blurred in one direction only. This may not be quite the right
** curve to use so may change in the future. The function must be
** normalised after generation, which also resolves any clipping.
**
** As we are normalizing and not subtracting gaussians,
** there is no need for a divisor in the gaussian formula
**
** It is less comples
*/
if ( sigma > MagickEpsilon )
{
#if 1
#define KernelRank 3
v = (ssize_t) kernel->width*KernelRank; /* start/end points */
(void) memset(kernel->values,0, (size_t)
kernel->width*sizeof(*kernel->values));
sigma *= KernelRank; /* simplify the loop expression */
A = 1.0/(2.0*sigma*sigma);
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( u=0; u < v; u++) {
kernel->values[u/KernelRank] +=
exp(-((double)(u*u))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
}
for (i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range += kernel->values[i];
#else
A = 1.0/(2.0*sigma*sigma); /* simplify the loop expression */
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range +=
kernel->values[i] = exp(-((double)(i*i))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
#endif
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
}
kernel->minimum = 0.0;
kernel->maximum = kernel->values[0];
kernel->negative_range = 0.0;
ScaleKernelInfo(kernel, 1.0, NormalizeValue); /* Normalize */
RotateKernelInfo(kernel, args->xi); /* Rotate by angle */
break;
}
case BinomialKernel:
{
size_t
order_f;
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
order_f = fact(kernel->width-1);
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=0; v < (ssize_t)kernel->height; v++)
{ size_t
alpha = order_f / ( fact((size_t) v) * fact(kernel->height-v-1) );
for ( u=0; u < (ssize_t)kernel->width; u++, i++)
kernel->positive_range += kernel->values[i] = (double)
(alpha * order_f / ( fact((size_t) u) * fact(kernel->height-u-1) ));
}
kernel->minimum = 1.0;
kernel->maximum = kernel->values[kernel->x+kernel->y*kernel->width];
kernel->negative_range = 0.0;
break;
}
/*
Convolution Kernels - Well Known Named Constant Kernels
*/
case LaplacianKernel:
{ switch ( (int) args->rho ) {
case 0:
default: /* laplacian square filter -- default */
kernel=ParseKernelArray("3: -1,-1,-1 -1,8,-1 -1,-1,-1");
break;
case 1: /* laplacian diamond filter */
kernel=ParseKernelArray("3: 0,-1,0 -1,4,-1 0,-1,0");
break;
case 2:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
break;
case 3:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 1,-2,1");
break;
case 5: /* a 5x5 laplacian */
kernel=ParseKernelArray(
"5: -4,-1,0,-1,-4 -1,2,3,2,-1 0,3,4,3,0 -1,2,3,2,-1 -4,-1,0,-1,-4");
break;
case 7: /* a 7x7 laplacian */
kernel=ParseKernelArray(
"7:-10,-5,-2,-1,-2,-5,-10 -5,0,3,4,3,0,-5 -2,3,6,7,6,3,-2 -1,4,7,8,7,4,-1 -2,3,6,7,6,3,-2 -5,0,3,4,3,0,-5 -10,-5,-2,-1,-2,-5,-10" );
break;
case 15: /* a 5x5 LoG (sigma approx 1.4) */
kernel=ParseKernelArray(
"5: 0,0,-1,0,0 0,-1,-2,-1,0 -1,-2,16,-2,-1 0,-1,-2,-1,0 0,0,-1,0,0");
break;
case 19: /* a 9x9 LoG (sigma approx 1.4) */
/* http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue1/IJIP-15.pdf */
kernel=ParseKernelArray(
"9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,12,24,12,-3,-5,-2 -2,-5,-0,24,40,24,-0,-5,-2 -2,-5,-3,12,24,12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
break;
}
case SobelKernel:
{ /* Simple Sobel Kernel */
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case RobertsKernel:
{
kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case PrewittKernel:
{
kernel=ParseKernelArray("3: 1,0,-1 1,0,-1 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case CompassKernel:
{
kernel=ParseKernelArray("3: 1,1,-1 1,-2,-1 1,1,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case KirschKernel:
{
kernel=ParseKernelArray("3: 5,-3,-3 5,0,-3 5,-3,-3");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case FreiChenKernel:
/* Direction is set to be left to right positive */
/* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf -- RIGHT? */
/* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf -- WRONG? */
{ switch ( (int) args->rho ) {
default:
case 0:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +MagickSQ2;
kernel->values[5] = -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
break;
case 2:
kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = kernel->values[3]= +MagickSQ2;
kernel->values[5] = kernel->values[7]= -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 10:
kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19");
if (kernel == (KernelInfo *) NULL)
return(kernel);
break;
case 1:
case 11:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +MagickSQ2;
kernel->values[5] = -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 12:
kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = +MagickSQ2;
kernel->values[7] = +MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 13:
kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[0] = +MagickSQ2;
kernel->values[8] = -MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 14:
kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[2] = -MagickSQ2;
kernel->values[6] = +MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 15:
kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 16:
kernel=ParseKernelArray("3: 1,0,-1 0,0,0 -1,0,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 17:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 -1,-2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 18:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 19:
kernel=ParseKernelArray("3: 1,1,1 1,1,1 1,1,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/3.0, NoValue);
break;
}
if ( fabs(args->sigma) >= MagickEpsilon )
/* Rotate by correctly supplied 'angle' */
RotateKernelInfo(kernel, args->sigma);
else if ( args->rho > 30.0 || args->rho < -30.0 )
/* Rotate by out of bounds 'type' */
RotateKernelInfo(kernel, args->rho);
break;
}
/*
Boolean or Shaped Kernels
*/
case DiamondKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <= (long) kernel->x)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case SquareKernel:
case RectangleKernel:
{ double
scale;
if ( type == SquareKernel )
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = (size_t) (2*args->rho+1);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
scale = args->sigma;
}
else {
/* NOTE: user defaults set in "AcquireKernelInfo()" */
if ( args->rho < 1.0 || args->sigma < 1.0 )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->width = (size_t)args->rho;
kernel->height = (size_t)args->sigma;
if ( args->xi < 0.0 || args->xi > (double)kernel->width ||
args->psi < 0.0 || args->psi > (double)kernel->height )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->x = (ssize_t) args->xi;
kernel->y = (ssize_t) args->psi;
scale = 1.0;
}
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values to scale given */
u=(ssize_t) (kernel->width*kernel->height);
for ( i=0; i < u; i++)
kernel->values[i] = scale;
kernel->minimum = kernel->maximum = scale; /* a flat shape */
kernel->positive_range = scale*u;
break;
}
case OctagonKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <=
((long)kernel->x + (long)(kernel->x/2)) )
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case DiskKernel:
{
ssize_t
limit = (ssize_t)(args->rho*args->rho);
if (args->rho < 0.4) /* default radius approx 4.3 */
kernel->width = kernel->height = 9L, limit = 18L;
else
kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ((u*u+v*v) <= limit)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case PlusKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
case CrossKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == v || u == -v) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
/*
HitAndMiss Kernels
*/
case RingKernel:
case PeaksKernel:
{
ssize_t
limit1,
limit2,
scale;
if (args->rho < args->sigma)
{
kernel->width = ((size_t)args->sigma)*2+1;
limit1 = (ssize_t)(args->rho*args->rho);
limit2 = (ssize_t)(args->sigma*args->sigma);
}
else
{
kernel->width = ((size_t)args->rho)*2+1;
limit1 = (ssize_t)(args->sigma*args->sigma);
limit2 = (ssize_t)(args->rho*args->rho);
}
if ( limit2 <= 0 )
kernel->width = 7L, limit1 = 7L, limit2 = 11L;
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */
scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi);
for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ ssize_t radius=u*u+v*v;
if (limit1 < radius && radius <= limit2)
kernel->positive_range += kernel->values[i] = (double) scale;
else
kernel->values[i] = nan;
}
kernel->minimum = kernel->maximum = (double) scale;
if ( type == PeaksKernel ) {
/* set the central point in the middle */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
kernel->maximum = 1.0;
}
break;
}
case EdgesKernel:
{
kernel=AcquireKernelInfo("ThinSE:482");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */
break;
}
case CornersKernel:
{
kernel=AcquireKernelInfo("ThinSE:87");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */
break;
}
case DiagonalsKernel:
{
switch ( (int) args->rho ) {
case 0:
default:
{ KernelInfo
*new_kernel;
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
ExpandMirrorKernelInfo(kernel);
return(kernel);
}
case 1:
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
break;
case 2:
kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineEndsKernel:
{ /* Kernels for finding the end of thin lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all end of lines */
return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>"));
case 1:
/* kernel for 4-connected line ends - no rotation */
kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-");
break;
case 2:
/* kernel to add for 8-connected lines - no rotation */
kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1");
break;
case 3:
/* kernel to add for orthogonal line ends - does not find corners */
kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0");
break;
case 4:
/* traditional line end - fails on last T end */
kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineJunctionsKernel:
{ /* kernels for finding the junctions of multiple lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all line junctions */
return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>"));
case 1:
/* Y Junction */
kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-");
break;
case 2:
/* Diagonal T Junctions */
kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1");
break;
case 3:
/* Orthogonal T Junctions */
kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-");
break;
case 4:
/* Diagonal X Junctions */
kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1");
break;
case 5:
/* Orthogonal X Junctions - minimal diamond kernel */
kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case RidgesKernel:
{ /* Ridges - Ridge finding kernels */
KernelInfo
*new_kernel;
switch ( (int) args->rho ) {
case 1:
default:
kernel=ParseKernelArray("3x1:0,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */
break;
case 2:
kernel=ParseKernelArray("4x1:0,1,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */
/* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */
/* Unfortunatally we can not yet rotate a non-square kernel */
/* But then we can't flip a non-symetrical kernel either */
new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
break;
}
break;
}
case ConvexHullKernel:
{
KernelInfo
*new_kernel;
/* first set of 8 kernels */
kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0);
/* append the mirror versions too - no flip function yet */
new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
ExpandRotateKernelInfo(new_kernel, 90.0);
LastKernelInfo(kernel)->next = new_kernel;
break;
}
case SkeletonKernel:
{
switch ( (int) args->rho ) {
case 1:
default:
/* Traditional Skeleton...
** A cyclically rotated single kernel
*/
kernel=AcquireKernelInfo("ThinSE:482");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */
break;
case 2:
/* HIPR Variation of the cyclic skeleton
** Corners of the traditional method made more forgiving,
** but the retain the same cyclic order.
*/
kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;");
if (kernel == (KernelInfo *) NULL)
return(kernel);
if (kernel->next == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
kernel->type = type;
kernel->next->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */
break;
case 3:
/* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
*/
kernel=AcquireKernelInfo(
"ThinSE:41; ThinSE:42; ThinSE:43");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->next->type = type;
kernel->next->next->type = type;
ExpandMirrorKernelInfo(kernel); /* 12 kernels total */
break;
}
break;
}
case ThinSEKernel:
{ /* Special kernels for general thinning, while preserving connections
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
** And
** http://tpgit.github.com/Leptonica/ccthin_8c_source.html
**
** Note kernels do not specify the origin pixel, allowing them
** to be used for both thickening and thinning operations.
*/
switch ( (int) args->rho ) {
/* SE for 4-connected thinning */
case 41: /* SE_4_1 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1");
break;
case 42: /* SE_4_2 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-");
break;
case 43: /* SE_4_3 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1");
break;
case 44: /* SE_4_4 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-");
break;
case 45: /* SE_4_5 */
kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-");
break;
case 46: /* SE_4_6 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1");
break;
case 47: /* SE_4_7 */
kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-");
break;
case 48: /* SE_4_8 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1");
break;
case 49: /* SE_4_9 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1");
break;
/* SE for 8-connected thinning - negatives of the above */
case 81: /* SE_8_0 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-");
break;
case 82: /* SE_8_2 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-");
break;
case 83: /* SE_8_3 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-");
break;
case 84: /* SE_8_4 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-");
break;
case 85: /* SE_8_5 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-");
break;
case 86: /* SE_8_6 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1");
break;
case 87: /* SE_8_7 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-");
break;
case 88: /* SE_8_8 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-");
break;
case 89: /* SE_8_9 */
kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-");
break;
/* Special combined SE kernels */
case 423: /* SE_4_2 , SE_4_3 Combined Kernel */
kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-");
break;
case 823: /* SE_8_2 , SE_8_3 Combined Kernel */
kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-");
break;
case 481: /* SE_48_1 - General Connected Corner Kernel */
kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-");
break;
default:
case 482: /* SE_48_2 - General Edge Kernel */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
/*
Distance Measuring Kernels
*/
case ChebyshevKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*MagickMax(fabs((double)u),fabs((double)v)) );
kernel->maximum = kernel->values[0];
break;
}
case ManhattanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*(labs((long) u)+labs((long) v)) );
kernel->maximum = kernel->values[0];
break;
}
case OctagonalKernel:
{
if (args->rho < 2.0)
kernel->width = kernel->height = 5; /* default/minimum radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{
double
r1 = MagickMax(fabs((double)u),fabs((double)v)),
r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5);
kernel->positive_range += kernel->values[i] =
args->sigma*MagickMax(r1,r2);
}
kernel->maximum = kernel->values[0];
break;
}
case EuclideanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*sqrt((double)(u*u+v*v)) );
kernel->maximum = kernel->values[0];
break;
}
default:
{
/* No-Op Kernel - Basically just a single pixel on its own */
kernel=ParseKernelArray("1:1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = UndefinedKernel;
break;
}
break;
}
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneKernelInfo() creates a new clone of the given Kernel List so that its
% can be modified without effecting the original. The cloned kernel should
% be destroyed using DestoryKernelInfo() when no longer needed.
%
% The format of the CloneKernelInfo method is:
%
% KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be cloned
%
*/
MagickExport KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
{
register ssize_t
i;
KernelInfo
*new_kernel;
assert(kernel != (KernelInfo *) NULL);
new_kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (new_kernel == (KernelInfo *) NULL)
return(new_kernel);
*new_kernel=(*kernel); /* copy values in structure */
/* replace the values with a copy of the values */
new_kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (new_kernel->values == (double *) NULL)
return(DestroyKernelInfo(new_kernel));
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
new_kernel->values[i]=kernel->values[i];
/* Also clone the next kernel in the kernel list */
if ( kernel->next != (KernelInfo *) NULL ) {
new_kernel->next = CloneKernelInfo(kernel->next);
if ( new_kernel->next == (KernelInfo *) NULL )
return(DestroyKernelInfo(new_kernel));
}
return(new_kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyKernelInfo() frees the memory used by a Convolution/Morphology
% kernel.
%
% The format of the DestroyKernelInfo method is:
%
% KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be destroyed
%
*/
MagickExport KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
{
assert(kernel != (KernelInfo *) NULL);
if (kernel->next != (KernelInfo *) NULL)
kernel->next=DestroyKernelInfo(kernel->next);
kernel->values=(double *) RelinquishAlignedMemory(kernel->values);
kernel=(KernelInfo *) RelinquishMagickMemory(kernel);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d M i r r o r K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandMirrorKernelInfo() takes a single kernel, and expands it into a
% sequence of 90-degree rotated kernels but providing a reflected 180
% rotatation, before the -/+ 90-degree rotations.
%
% This special rotation order produces a better, more symetrical thinning of
% objects.
%
% The format of the ExpandMirrorKernelInfo method is:
%
% void ExpandMirrorKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
#if 0
static void FlopKernelInfo(KernelInfo *kernel)
{ /* Do a Flop by reversing each row. */
size_t
y;
register ssize_t
x,r;
register double
*k,t;
for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width)
for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--)
t=k[x], k[x]=k[r], k[r]=t;
kernel->x = kernel->width - kernel->x - 1;
angle = fmod(angle+180.0, 360.0);
}
#endif
static void ExpandMirrorKernelInfo(KernelInfo *kernel)
{
KernelInfo
*clone,
*last;
last = kernel;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 180); /* flip */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 90); /* transpose */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 180); /* flop */
LastKernelInfo(last)->next = clone;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating
% incrementally by the angle given, until the kernel repeats.
%
% WARNING: 45 degree rotations only works for 3x3 kernels.
% While 90 degree roatations only works for linear and square kernels
%
% The format of the ExpandRotateKernelInfo method is:
%
% void ExpandRotateKernelInfo(KernelInfo *kernel,double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
/* Internal Routine - Return true if two kernels are the same */
static MagickBooleanType SameKernelInfo(const KernelInfo *kernel1,
const KernelInfo *kernel2)
{
register size_t
i;
/* check size and origin location */
if ( kernel1->width != kernel2->width
|| kernel1->height != kernel2->height
|| kernel1->x != kernel2->x
|| kernel1->y != kernel2->y )
return MagickFalse;
/* check actual kernel values */
for (i=0; i < (kernel1->width*kernel1->height); i++) {
/* Test for Nan equivalence */
if ( IsNaN(kernel1->values[i]) && !IsNaN(kernel2->values[i]) )
return MagickFalse;
if ( IsNaN(kernel2->values[i]) && !IsNaN(kernel1->values[i]) )
return MagickFalse;
/* Test actual values are equivalent */
if ( fabs(kernel1->values[i] - kernel2->values[i]) >= MagickEpsilon )
return MagickFalse;
}
return MagickTrue;
}
static void ExpandRotateKernelInfo(KernelInfo *kernel,const double angle)
{
KernelInfo
*clone_info,
*last;
clone_info=(KernelInfo *) NULL;
last=kernel;
DisableMSCWarning(4127)
while (1) {
RestoreMSCWarning
clone_info=CloneKernelInfo(last);
if (clone_info == (KernelInfo *) NULL)
break;
RotateKernelInfo(clone_info,angle);
if (SameKernelInfo(kernel,clone_info) != MagickFalse)
break;
LastKernelInfo(last)->next=clone_info;
last=clone_info;
}
if (clone_info != (KernelInfo *) NULL)
clone_info=DestroyKernelInfo(clone_info); /* kernel repeated - junk */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a l c M e t a K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CalcKernelMetaData() recalculate the KernelInfo meta-data of this kernel only,
% using the kernel values. This should only ne used if it is not possible to
% calculate that meta-data in some easier way.
%
% It is important that the meta-data is correct before ScaleKernelInfo() is
% used to perform kernel normalization.
%
% The format of the CalcKernelMetaData method is:
%
% void CalcKernelMetaData(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% WARNING: Minimum and Maximum values are assumed to include zero, even if
% zero is not part of the kernel (as in Gaussian Derived kernels). This
% however is not true for flat-shaped morphological kernels.
%
% WARNING: Only the specific kernel pointed to is modified, not a list of
% multiple kernels.
%
% This is an internal function and not expected to be useful outside this
% module. This could change however.
*/
static void CalcKernelMetaData(KernelInfo *kernel)
{
register size_t
i;
kernel->minimum = kernel->maximum = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; i < (kernel->width*kernel->height); i++)
{
if ( fabs(kernel->values[i]) < MagickEpsilon )
kernel->values[i] = 0.0;
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y A p p l y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyApply() applies a morphological method, multiple times using
% a list of multiple kernels. This is the method that should be called by
% other 'operators' that internally use morphology operations as part of
% their processing.
%
% It is basically equivalent to as MorphologyImage() (see below) but
% without any user controls. This allows internel programs to use this
% function, to actually perform a specific task without possible interference
% by any API user supplied settings.
%
% It is MorphologyImage() task to extract any such user controls, and
% pass them to this function for processing.
%
% More specifically all given kernels should already be scaled, normalised,
% and blended appropriatally before being parred to this routine. The
% appropriate bias, and compose (typically 'UndefinedComposeOp') given.
%
% The format of the MorphologyApply method is:
%
% Image *MorphologyApply(const Image *image,MorphologyMethod method,
% const ChannelType channel, const ssize_t iterations,
% const KernelInfo *kernel, const CompositeMethod compose,
% const double bias, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the source image
%
% o method: the morphology method to be applied.
%
% o channel: the channels to which the operations are applied
% The channel 'sync' flag determines if 'alpha weighting' is
% applied for convolution style operations.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
%
% o compose: How to handle or merge multi-kernel results.
% If 'UndefinedCompositeOp' use default for the Morphology method.
% If 'NoCompositeOp' force image to be re-iterated by each kernel.
% Otherwise merge the results using the compose method given.
%
% o bias: Convolution Output Bias.
%
% o exception: return any errors or warnings in this structure.
%
*/
/* Apply a Morphology Primative to an image using the given kernel.
** Two pre-created images must be provided, and no image is created.
** It returns the number of pixels that changed between the images
** for result convergence determination.
*/
static ssize_t MorphologyPrimitive(const Image *image, Image *result_image,
const MorphologyMethod method, const ChannelType channel,
const KernelInfo *kernel,const double bias,ExceptionInfo *exception)
{
#define MorphologyTag "Morphology/Image"
CacheView
*p_view,
*q_view;
register ssize_t
i;
size_t
*changes,
changed,
virt_width;
ssize_t
y,
offx,
offy;
MagickBooleanType
status;
MagickOffsetType
progress;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(result_image != (Image *) NULL);
assert(result_image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=MagickTrue;
progress=0;
p_view=AcquireVirtualCacheView(image,exception);
q_view=AcquireAuthenticCacheView(result_image,exception);
virt_width=image->columns+kernel->width-1;
/* Some methods (including convolve) needs use a reflected kernel.
* Adjust 'origin' offsets to loop though kernel as a reflection.
*/
offx = kernel->x;
offy = kernel->y;
switch(method) {
case ConvolveMorphology:
case DilateMorphology:
case DilateIntensityMorphology:
case IterativeDistanceMorphology:
/* kernel needs to used with reflection about origin */
offx = (ssize_t) kernel->width-offx-1;
offy = (ssize_t) kernel->height-offy-1;
break;
case ErodeMorphology:
case ErodeIntensityMorphology:
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
/* kernel is used as is, without reflection */
break;
default:
assert("Not a Primitive Morphology Method" != (char *) NULL);
break;
}
changed=0;
changes=(size_t *) AcquireQuantumMemory(GetOpenMPMaximumThreads(),
sizeof(*changes));
if (changes == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++)
changes[i]=0;
if ( method == ConvolveMorphology && kernel->width == 1 )
{ /* Special handling (for speed) of vertical (blur) kernels.
** This performs its handling in columns rather than in rows.
** This is only done for convolve as it is the only method that
** generates very large 1-D vertical kernels (such as a 'BlurKernel')
**
** Timing tests (on single CPU laptop)
** Using a vertical 1-d Blue with normal row-by-row (below)
** time convert logo: -morphology Convolve Blur:0x10+90 null:
** 0.807u
** Using this column method
** time convert logo: -morphology Convolve Blur:0x10+90 null:
** 0.620u
**
** Anthony Thyssen, 14 June 2010
*/
register ssize_t
x;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,result_image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
register const PixelPacket
*magick_restrict p;
register const IndexPacket
*magick_restrict p_indexes;
register PixelPacket
*magick_restrict q;
register IndexPacket
*magick_restrict q_indexes;
register ssize_t
y;
ssize_t
r;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(p_view,x,-offy,1,image->rows+kernel->height-1,
exception);
q=GetCacheViewAuthenticPixels(q_view,x,0,1,result_image->rows,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
p_indexes=GetCacheViewVirtualIndexQueue(p_view);
q_indexes=GetCacheViewAuthenticIndexQueue(q_view);
/* offset to origin in 'p'. while 'q' points to it directly */
r = offy;
for (y=0; y < (ssize_t) image->rows; y++)
{
DoublePixelPacket
result;
register ssize_t
v;
register const double
*magick_restrict k;
register const PixelPacket
*magick_restrict k_pixels;
register const IndexPacket
*magick_restrict k_indexes;
/* Copy input image to the output image for unused channels
* This removes need for 'cloning' a new image every iteration
*/
*q = p[r];
if (image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+y,GetPixelIndex(p_indexes+y+r));
/* Set the bias of the weighted average output */
result.red =
result.green =
result.blue =
result.opacity =
result.index = bias;
/* Weighted Average of pixels using reflected kernel
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
*/
k = &kernel->values[ kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes+y;
if ( ((channel & SyncChannels) == 0 ) ||
(image->matte == MagickFalse) )
{ /* No 'Sync' involved.
** Convolution is simple greyscale channel operation
*/
for (v=0; v < (ssize_t) kernel->height; v++) {
if ( IsNaN(*k) ) continue;
result.red += (*k)*GetPixelRed(k_pixels);
result.green += (*k)*GetPixelGreen(k_pixels);
result.blue += (*k)*GetPixelBlue(k_pixels);
result.opacity += (*k)*GetPixelOpacity(k_pixels);
if ( image->colorspace == CMYKColorspace)
result.index += (*k)*(*k_indexes);
k--;
k_pixels++;
k_indexes++;
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetPixelOpacity(q,ClampToQuantum(result.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(q_indexes+y,ClampToQuantum(result.index));
}
else
{ /* Channel 'Sync' Flag, and Alpha Channel enabled.
** Weight the color channels with Alpha Channel so that
** transparent pixels are not part of the results.
*/
double
gamma; /* divisor, sum of color alpha weighting */
MagickRealType
alpha; /* alpha weighting for colors : alpha */
size_t
count; /* alpha valus collected, number kernel values */
count=0;
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++) {
if ( IsNaN(*k) ) continue;
alpha=QuantumScale*(QuantumRange-GetPixelOpacity(k_pixels));
count++; /* number of alpha values collected */
alpha*=(*k); /* include kernel weighting now */
gamma += alpha; /* normalize alpha weights only */
result.red += alpha*GetPixelRed(k_pixels);
result.green += alpha*GetPixelGreen(k_pixels);
result.blue += alpha*GetPixelBlue(k_pixels);
result.opacity += (*k)*GetPixelOpacity(k_pixels);
if ( image->colorspace == CMYKColorspace)
result.index += alpha*(*k_indexes);
k--;
k_pixels++;
k_indexes++;
}
/* Sync'ed channels, all channels are modified */
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height/count;
SetPixelRed(q,ClampToQuantum(gamma*result.red));
SetPixelGreen(q,ClampToQuantum(gamma*result.green));
SetPixelBlue(q,ClampToQuantum(gamma*result.blue));
SetPixelOpacity(q,ClampToQuantum(result.opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+y,ClampToQuantum(gamma*result.index));
}
/* Count up changed pixels */
if ( ( p[r].red != GetPixelRed(q))
|| ( p[r].green != GetPixelGreen(q))
|| ( p[r].blue != GetPixelBlue(q))
|| ( (image->matte != MagickFalse) &&
(p[r].opacity != GetPixelOpacity(q)))
|| ( (image->colorspace == CMYKColorspace) &&
(GetPixelIndex(p_indexes+y+r) != GetPixelIndex(q_indexes+y))) )
changes[id]++;
p++;
q++;
} /* y */
if ( SyncCacheViewAuthenticPixels(q_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
} /* x */
result_image->type=image->type;
q_view=DestroyCacheView(q_view);
p_view=DestroyCacheView(p_view);
for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++)
changed+=changes[i];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t) changed : 0);
}
/*
** Normal handling of horizontal or rectangular kernels (row by row)
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,result_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const PixelPacket
*magick_restrict p;
register const IndexPacket
*magick_restrict p_indexes;
register PixelPacket
*magick_restrict q;
register IndexPacket
*magick_restrict q_indexes;
register ssize_t
x;
size_t
r;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(p_view, -offx, y-offy, virt_width,
kernel->height, exception);
q=GetCacheViewAuthenticPixels(q_view,0,y,result_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
p_indexes=GetCacheViewVirtualIndexQueue(p_view);
q_indexes=GetCacheViewAuthenticIndexQueue(q_view);
/* offset to origin in 'p'. while 'q' points to it directly */
r = virt_width*offy + offx;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
v;
register ssize_t
u;
register const double
*magick_restrict k;
register const PixelPacket
*magick_restrict k_pixels;
register const IndexPacket
*magick_restrict k_indexes;
DoublePixelPacket
result,
min,
max;
/* Copy input image to the output image for unused channels
* This removes need for 'cloning' a new image every iteration
*/
*q = p[r];
if (image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+x,GetPixelIndex(p_indexes+x+r));
/* Defaults */
min.red =
min.green =
min.blue =
min.opacity =
min.index = (double) QuantumRange;
max.red =
max.green =
max.blue =
max.opacity =
max.index = 0.0;
/* default result is the original pixel value */
result.red = (double) p[r].red;
result.green = (double) p[r].green;
result.blue = (double) p[r].blue;
result.opacity = QuantumRange - (double) p[r].opacity;
result.index = 0.0;
if ( image->colorspace == CMYKColorspace)
result.index = (double) GetPixelIndex(p_indexes+x+r);
switch (method) {
case ConvolveMorphology:
/* Set the bias of the weighted average output */
result.red =
result.green =
result.blue =
result.opacity =
result.index = bias;
break;
case DilateIntensityMorphology:
case ErodeIntensityMorphology:
/* use a boolean flag indicating when first match found */
result.red = 0.0; /* result is not used otherwise */
break;
default:
break;
}
switch ( method ) {
case ConvolveMorphology:
/* Weighted Average of pixels using reflected kernel
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
**
** Correlation is actually the same as this but without reflecting
** the kernel, and thus 'lower-level' that Convolution. However
** as Convolution is the more common method used, and it does not
** really cost us much in terms of processing to use a reflected
** kernel, so it is Convolution that is implemented.
**
** Correlation will have its kernel reflected before calling
** this function to do a Convolve.
**
** For more details of Correlation vs Convolution see
** http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
if ( ((channel & SyncChannels) == 0 ) ||
(image->matte == MagickFalse) )
{ /* No 'Sync' involved.
** Convolution is simple greyscale channel operation
*/
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
result.red += (*k)*k_pixels[u].red;
result.green += (*k)*k_pixels[u].green;
result.blue += (*k)*k_pixels[u].blue;
result.opacity += (*k)*k_pixels[u].opacity;
if ( image->colorspace == CMYKColorspace)
result.index += (*k)*GetPixelIndex(k_indexes+u);
}
k_pixels += virt_width;
k_indexes += virt_width;
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum((MagickRealType) result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum((MagickRealType) result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum((MagickRealType) result.blue));
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetPixelOpacity(q,ClampToQuantum((MagickRealType) result.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(q_indexes+x,ClampToQuantum(result.index));
}
else
{ /* Channel 'Sync' Flag, and Alpha Channel enabled.
** Weight the color channels with Alpha Channel so that
** transparent pixels are not part of the results.
*/
double
alpha, /* alpha weighting for colors : alpha */
gamma; /* divisor, sum of color alpha weighting */
size_t
count; /* alpha valus collected, number kernel values */
count=0;
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
alpha=QuantumScale*(QuantumRange-k_pixels[u].opacity);
count++; /* number of alpha values collected */
alpha*=(*k); /* include kernel weighting now */
gamma += alpha; /* normalize alpha weights only */
result.red += alpha*k_pixels[u].red;
result.green += alpha*k_pixels[u].green;
result.blue += alpha*k_pixels[u].blue;
result.opacity += (*k)*k_pixels[u].opacity;
if ( image->colorspace == CMYKColorspace)
result.index+=alpha*GetPixelIndex(k_indexes+u);
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* Sync'ed channels, all channels are modified */
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height*kernel->width/count;
SetPixelRed(q,ClampToQuantum((MagickRealType) (gamma*result.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (gamma*result.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (gamma*result.blue)));
SetPixelOpacity(q,ClampToQuantum(result.opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+x,ClampToQuantum((MagickRealType) (gamma*
result.index)));
}
break;
case ErodeMorphology:
/* Minimum Value within kernel neighbourhood
**
** NOTE that the kernel is not reflected for this operation!
**
** NOTE: in normal Greyscale Morphology, the kernel value should
** be added to the real value, this is currently not done, due to
** the nature of the boolean kernels being used.
*/
k = kernel->values;
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k++) {
if ( IsNaN(*k) || (*k) < 0.5 ) continue;
Minimize(min.red, (double) k_pixels[u].red);
Minimize(min.green, (double) k_pixels[u].green);
Minimize(min.blue, (double) k_pixels[u].blue);
Minimize(min.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(min.index,(double) GetPixelIndex(k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case DilateMorphology:
/* Maximum Value within kernel neighbourhood
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
**
** NOTE: in normal Greyscale Morphology, the kernel value should
** be added to the real value, this is currently not done, due to
** the nature of the boolean kernels being used.
**
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) || (*k) < 0.5 ) continue;
Maximize(max.red, (double) k_pixels[u].red);
Maximize(max.green, (double) k_pixels[u].green);
Maximize(max.blue, (double) k_pixels[u].blue);
Maximize(max.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Maximize(max.index, (double) GetPixelIndex(
k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
/* Minimum of Foreground Pixel minus Maxumum of Background Pixels
**
** NOTE that the kernel is not reflected for this operation,
** and consists of both foreground and background pixel
** neighbourhoods, 0.0 for background, and 1.0 for foreground
** with either Nan or 0.5 values for don't care.
**
** Note that this will never produce a meaningless negative
** result. Such results can cause Thinning/Thicken to not work
** correctly when used against a greyscale image.
*/
k = kernel->values;
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k++) {
if ( IsNaN(*k) ) continue;
if ( (*k) > 0.7 )
{ /* minimim of foreground pixels */
Minimize(min.red, (double) k_pixels[u].red);
Minimize(min.green, (double) k_pixels[u].green);
Minimize(min.blue, (double) k_pixels[u].blue);
Minimize(min.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(min.index,(double) GetPixelIndex(
k_indexes+u));
}
else if ( (*k) < 0.3 )
{ /* maximum of background pixels */
Maximize(max.red, (double) k_pixels[u].red);
Maximize(max.green, (double) k_pixels[u].green);
Maximize(max.blue, (double) k_pixels[u].blue);
Maximize(max.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Maximize(max.index, (double) GetPixelIndex(
k_indexes+u));
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* Pattern Match if difference is positive */
min.red -= max.red; Maximize( min.red, 0.0 );
min.green -= max.green; Maximize( min.green, 0.0 );
min.blue -= max.blue; Maximize( min.blue, 0.0 );
min.opacity -= max.opacity; Maximize( min.opacity, 0.0 );
min.index -= max.index; Maximize( min.index, 0.0 );
break;
case ErodeIntensityMorphology:
/* Select Pixel with Minimum Intensity within kernel neighbourhood
**
** WARNING: the intensity test fails for CMYK and does not
** take into account the moderating effect of the alpha channel
** on the intensity.
**
** NOTE that the kernel is not reflected for this operation!
*/
k = kernel->values;
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k++) {
if ( IsNaN(*k) || (*k) < 0.5 ) continue;
if ( result.red == 0.0 ||
GetPixelIntensity(image,&(k_pixels[u])) < GetPixelIntensity(result_image,q) ) {
/* copy the whole pixel - no channel selection */
*q = k_pixels[u];
if ( result.red > 0.0 ) changes[id]++;
result.red = 1.0;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case DilateIntensityMorphology:
/* Select Pixel with Maximum Intensity within kernel neighbourhood
**
** WARNING: the intensity test fails for CMYK and does not
** take into account the moderating effect of the alpha channel
** on the intensity (yet).
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) || (*k) < 0.5 ) continue; /* boolean kernel */
if ( result.red == 0.0 ||
GetPixelIntensity(image,&(k_pixels[u])) > GetPixelIntensity(result_image,q) ) {
/* copy the whole pixel - no channel selection */
*q = k_pixels[u];
if ( result.red > 0.0 ) changes[id]++;
result.red = 1.0;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case IterativeDistanceMorphology:
/* Work out an iterative distance from black edge of a white image
** shape. Essentially white values are decreased to the smallest
** 'distance from edge' it can find.
**
** It works by adding kernel values to the neighbourhood, and
** select the minimum value found. The kernel is rotated before
** use, so kernel distances match resulting distances, when a user
** provided asymmetric kernel is applied.
**
**
** This code is almost identical to True GrayScale Morphology But
** not quite.
**
** GreyDilate Kernel values added, maximum value found Kernel is
** rotated before use.
**
** GrayErode: Kernel values subtracted and minimum value found No
** kernel rotation used.
**
** Note the Iterative Distance method is essentially a
** GrayErode, but with negative kernel values, and kernel
** rotation applied.
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index,(*k)+GetPixelIndex(k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case UndefinedMorphology:
default:
break; /* Do nothing */
}
/* Final mathematics of results (combine with original image?)
**
** NOTE: Difference Morphology operators Edge* and *Hat could also
** be done here but works better with iteration as a image difference
** in the controlling function (below). Thicken and Thinning however
** should be done here so thay can be iterated correctly.
*/
switch ( method ) {
case HitAndMissMorphology:
case ErodeMorphology:
result = min; /* minimum of neighbourhood */
break;
case DilateMorphology:
result = max; /* maximum of neighbourhood */
break;
case ThinningMorphology:
/* subtract pattern match from original */
result.red -= min.red;
result.green -= min.green;
result.blue -= min.blue;
result.opacity -= min.opacity;
result.index -= min.index;
break;
case ThickenMorphology:
/* Add the pattern matchs to the original */
result.red += min.red;
result.green += min.green;
result.blue += min.blue;
result.opacity += min.opacity;
result.index += min.index;
break;
default:
/* result directly calculated or assigned */
break;
}
/* Assign the resulting pixel values - Clamping Result */
switch ( method ) {
case UndefinedMorphology:
case ConvolveMorphology:
case DilateIntensityMorphology:
case ErodeIntensityMorphology:
break; /* full pixel was directly assigned - not a channel method */
default:
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if ((channel & OpacityChannel) != 0
&& image->matte != MagickFalse )
SetPixelAlpha(q,ClampToQuantum(result.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(q_indexes+x,ClampToQuantum(result.index));
break;
}
/* Count up changed pixels */
if ( ( p[r].red != GetPixelRed(q) )
|| ( p[r].green != GetPixelGreen(q) )
|| ( p[r].blue != GetPixelBlue(q) )
|| ( (image->matte != MagickFalse) &&
(p[r].opacity != GetPixelOpacity(q)))
|| ( (image->colorspace == CMYKColorspace) &&
(GetPixelIndex(p_indexes+x+r) != GetPixelIndex(q_indexes+x))) )
changes[id]++;
p++;
q++;
} /* x */
if ( SyncCacheViewAuthenticPixels(q_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
} /* y */
q_view=DestroyCacheView(q_view);
p_view=DestroyCacheView(p_view);
for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++)
changed+=changes[i];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t)changed : -1);
}
/* This is almost identical to the MorphologyPrimative() function above,
** but will apply the primitive directly to the actual image using two
** passes, once in each direction, with the results of the previous (and
** current) row being re-used.
**
** That is after each row is 'Sync'ed' into the image, the next row will
** make use of those values as part of the calculation of the next row.
** It then repeats, but going in the oppisite (bottom-up) direction.
**
** Because of this 're-use of results' this function can not make use
** of multi-threaded, parellel processing.
*/
static ssize_t MorphologyPrimitiveDirect(Image *image,
const MorphologyMethod method, const ChannelType channel,
const KernelInfo *kernel,ExceptionInfo *exception)
{
CacheView
*auth_view,
*virt_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y, offx, offy;
size_t
changed,
virt_width;
status=MagickTrue;
changed=0;
progress=0;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/* Some methods (including convolve) needs use a reflected kernel.
* Adjust 'origin' offsets to loop though kernel as a reflection.
*/
offx = kernel->x;
offy = kernel->y;
switch(method) {
case DistanceMorphology:
case VoronoiMorphology:
/* kernel needs to used with reflection about origin */
offx = (ssize_t) kernel->width-offx-1;
offy = (ssize_t) kernel->height-offy-1;
break;
#if 0
case ?????Morphology:
/* kernel is used as is, without reflection */
break;
#endif
default:
assert("Not a PrimativeDirect Morphology Method" != (char *) NULL);
break;
}
/* DO NOT THREAD THIS CODE! */
/* two views into same image (virtual, and actual) */
virt_view=AcquireVirtualCacheView(image,exception);
auth_view=AcquireAuthenticCacheView(image,exception);
virt_width=image->columns+kernel->width-1;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register const IndexPacket
*magick_restrict p_indexes;
register PixelPacket
*magick_restrict q;
register IndexPacket
*magick_restrict q_indexes;
register ssize_t
x;
ssize_t
r;
/* NOTE read virtual pixels, and authentic pixels, from the same image!
** we read using virtual to get virtual pixel handling, but write back
** into the same image.
**
** Only top half of kernel is processed as we do a single pass downward
** through the image iterating the distance function as we go.
*/
if (status == MagickFalse)
break;
p=GetCacheViewVirtualPixels(virt_view, -offx, y-offy, virt_width, (size_t) offy+1,
exception);
q=GetCacheViewAuthenticPixels(auth_view, 0, y, image->columns, 1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
status=MagickFalse;
if (status == MagickFalse)
break;
p_indexes=GetCacheViewVirtualIndexQueue(virt_view);
q_indexes=GetCacheViewAuthenticIndexQueue(auth_view);
/* offset to origin in 'p'. while 'q' points to it directly */
r = (ssize_t) virt_width*offy + offx;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
v;
register ssize_t
u;
register const double
*magick_restrict k;
register const PixelPacket
*magick_restrict k_pixels;
register const IndexPacket
*magick_restrict k_indexes;
MagickPixelPacket
result;
/* Starting Defaults */
GetMagickPixelPacket(image,&result);
SetMagickPixelPacket(image,q,q_indexes,&result);
if ( method != VoronoiMorphology )
result.opacity = QuantumRange - result.opacity;
switch ( method ) {
case DistanceMorphology:
/* Add kernel Value and select the minimum value found. */
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v <= (ssize_t) offy; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=0; u < (ssize_t) offx; u++, k--) {
if ( x+u-offx < 0 ) continue; /* off the edge! */
if ( IsNaN(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u));
}
break;
case VoronoiMorphology:
/* Apply Distance to 'Matte' channel, while coping the color
** values of the closest pixel.
**
** This is experimental, and realy the 'alpha' component should
** be completely separate 'masking' channel so that alpha can
** also be used as part of the results.
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v <= (ssize_t) offy; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=0; u < (ssize_t) offx; u++, k--) {
if ( x+u-offx < 0 ) continue; /* off the edge! */
if ( IsNaN(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
break;
default:
/* result directly calculated or assigned */
break;
}
/* Assign the resulting pixel values - Clamping Result */
switch ( method ) {
case VoronoiMorphology:
SetPixelPacket(image,&result,q,q_indexes);
break;
default:
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
SetPixelAlpha(q,ClampToQuantum(result.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(q_indexes+x,ClampToQuantum(result.index));
break;
}
/* Count up changed pixels */
if ( ( p[r].red != GetPixelRed(q) )
|| ( p[r].green != GetPixelGreen(q) )
|| ( p[r].blue != GetPixelBlue(q) )
|| ( (image->matte != MagickFalse) &&
(p[r].opacity != GetPixelOpacity(q)))
|| ( (image->colorspace == CMYKColorspace) &&
(GetPixelIndex(p_indexes+x+r) != GetPixelIndex(q_indexes+x))) )
changed++; /* The pixel was changed in some way! */
p++; /* increment pixel buffers */
q++;
} /* x */
if ( SyncCacheViewAuthenticPixels(auth_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
if (SetImageProgress(image,MorphologyTag,progress,image->rows) == MagickFalse )
status=MagickFalse;
}
} /* y */
/* Do the reversed pass through the image */
for (y=(ssize_t)image->rows-1; y >= 0; y--)
{
register const PixelPacket
*magick_restrict p;
register const IndexPacket
*magick_restrict p_indexes;
register PixelPacket
*magick_restrict q;
register IndexPacket
*magick_restrict q_indexes;
register ssize_t
x;
ssize_t
r;
if (status == MagickFalse)
break;
/* NOTE read virtual pixels, and authentic pixels, from the same image!
** we read using virtual to get virtual pixel handling, but write back
** into the same image.
**
** Only the bottom half of the kernel will be processes as we
** up the image.
*/
p=GetCacheViewVirtualPixels(virt_view, -offx, y, virt_width, (size_t) kernel->y+1,
exception);
q=GetCacheViewAuthenticPixels(auth_view, 0, y, image->columns, 1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
status=MagickFalse;
if (status == MagickFalse)
break;
p_indexes=GetCacheViewVirtualIndexQueue(virt_view);
q_indexes=GetCacheViewAuthenticIndexQueue(auth_view);
/* adjust positions to end of row */
p += image->columns-1;
q += image->columns-1;
/* offset to origin in 'p'. while 'q' points to it directly */
r = offx;
for (x=(ssize_t)image->columns-1; x >= 0; x--)
{
ssize_t
v;
register ssize_t
u;
register const double
*magick_restrict k;
register const PixelPacket
*magick_restrict k_pixels;
register const IndexPacket
*magick_restrict k_indexes;
MagickPixelPacket
result;
/* Default - previously modified pixel */
GetMagickPixelPacket(image,&result);
SetMagickPixelPacket(image,q,q_indexes,&result);
if ( method != VoronoiMorphology )
result.opacity = QuantumRange - result.opacity;
switch ( method ) {
case DistanceMorphology:
/* Add kernel Value and select the minimum value found. */
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
for (v=offy; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index,(*k)+GetPixelIndex(k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y)+kernel->x-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=offx+1; u < (ssize_t) kernel->width; u++, k--) {
if ( (x+u-offx) >= (ssize_t)image->columns ) continue;
if ( IsNaN(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u));
}
break;
case VoronoiMorphology:
/* Apply Distance to 'Matte' channel, coping the closest color.
**
** This is experimental, and realy the 'alpha' component should
** be completely separate 'masking' channel.
*/
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
for (v=offy; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y)+kernel->x-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=offx+1; u < (ssize_t) kernel->width; u++, k--) {
if ( (x+u-offx) >= (ssize_t)image->columns ) continue;
if ( IsNaN(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
break;
default:
/* result directly calculated or assigned */
break;
}
/* Assign the resulting pixel values - Clamping Result */
switch ( method ) {
case VoronoiMorphology:
SetPixelPacket(image,&result,q,q_indexes);
break;
default:
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
SetPixelAlpha(q,ClampToQuantum(result.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(q_indexes+x,ClampToQuantum(result.index));
break;
}
/* Count up changed pixels */
if ( ( p[r].red != GetPixelRed(q) )
|| ( p[r].green != GetPixelGreen(q) )
|| ( p[r].blue != GetPixelBlue(q) )
|| ( (image->matte != MagickFalse) &&
(p[r].opacity != GetPixelOpacity(q)))
|| ( (image->colorspace == CMYKColorspace) &&
(GetPixelIndex(p_indexes+x+r) != GetPixelIndex(q_indexes+x))) )
changed++; /* The pixel was changed in some way! */
p--; /* go backward through pixel buffers */
q--;
} /* x */
if ( SyncCacheViewAuthenticPixels(auth_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
if ( SetImageProgress(image,MorphologyTag,progress,image->rows) == MagickFalse )
status=MagickFalse;
}
} /* y */
auth_view=DestroyCacheView(auth_view);
virt_view=DestroyCacheView(virt_view);
return(status ? (ssize_t) changed : -1);
}
/* Apply a Morphology by calling one of the above low level primitive
** application functions. This function handles any iteration loops,
** composition or re-iteration of results, and compound morphology methods
** that is based on multiple low-level (staged) morphology methods.
**
** Basically this provides the complex grue between the requested morphology
** method and raw low-level implementation (above).
*/
MagickExport Image *MorphologyApply(const Image *image, const ChannelType
channel,const MorphologyMethod method, const ssize_t iterations,
const KernelInfo *kernel, const CompositeOperator compose,
const double bias, ExceptionInfo *exception)
{
CompositeOperator
curr_compose;
Image
*curr_image, /* Image we are working with or iterating */
*work_image, /* secondary image for primitive iteration */
*save_image, /* saved image - for 'edge' method only */
*rslt_image; /* resultant image - after multi-kernel handling */
KernelInfo
*reflected_kernel, /* A reflected copy of the kernel (if needed) */
*norm_kernel, /* the current normal un-reflected kernel */
*rflt_kernel, /* the current reflected kernel (if needed) */
*this_kernel; /* the kernel being applied */
MorphologyMethod
primitive; /* the current morphology primitive being applied */
CompositeOperator
rslt_compose; /* multi-kernel compose method for results to use */
MagickBooleanType
special, /* do we use a direct modify function? */
verbose; /* verbose output of results */
size_t
method_loop, /* Loop 1: number of compound method iterations (norm 1) */
method_limit, /* maximum number of compound method iterations */
kernel_number, /* Loop 2: the kernel number being applied */
stage_loop, /* Loop 3: primitive loop for compound morphology */
stage_limit, /* how many primitives are in this compound */
kernel_loop, /* Loop 4: iterate the kernel over image */
kernel_limit, /* number of times to iterate kernel */
count, /* total count of primitive steps applied */
kernel_changed, /* total count of changed using iterated kernel */
method_changed; /* total count of changed over method iteration */
ssize_t
changed; /* number pixels changed by last primitive operation */
char
v_info[MaxTextExtent];
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
count = 0; /* number of low-level morphology primitives performed */
if ( iterations == 0 )
return((Image *) NULL); /* null operation - nothing to do! */
kernel_limit = (size_t) iterations;
if ( iterations < 0 ) /* negative interations = infinite (well alomst) */
kernel_limit = image->columns>image->rows ? image->columns : image->rows;
verbose = IsMagickTrue(GetImageArtifact(image,"debug"));
/* initialise for cleanup */
curr_image = (Image *) image;
curr_compose = image->compose;
(void) curr_compose;
work_image = save_image = rslt_image = (Image *) NULL;
reflected_kernel = (KernelInfo *) NULL;
/* Initialize specific methods
* + which loop should use the given iteratations
* + how many primitives make up the compound morphology
* + multi-kernel compose method to use (by default)
*/
method_limit = 1; /* just do method once, unless otherwise set */
stage_limit = 1; /* assume method is not a compound */
special = MagickFalse; /* assume it is NOT a direct modify primitive */
rslt_compose = compose; /* and we are composing multi-kernels as given */
switch( method ) {
case SmoothMorphology: /* 4 primitive compound morphology */
stage_limit = 4;
break;
case OpenMorphology: /* 2 primitive compound morphology */
case OpenIntensityMorphology:
case TopHatMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case EdgeMorphology:
stage_limit = 2;
break;
case HitAndMissMorphology:
rslt_compose = LightenCompositeOp; /* Union of multi-kernel results */
/* FALL THUR */
case ThinningMorphology:
case ThickenMorphology:
method_limit = kernel_limit; /* iterate the whole method */
kernel_limit = 1; /* do not do kernel iteration */
break;
case DistanceMorphology:
case VoronoiMorphology:
special = MagickTrue; /* use special direct primative */
break;
default:
break;
}
/* Apply special methods with special requirments
** For example, single run only, or post-processing requirements
*/
if ( special != MagickFalse )
{
rslt_image=CloneImage(image,0,0,MagickTrue,exception);
if (rslt_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(rslt_image,DirectClass) == MagickFalse)
{
InheritException(exception,&rslt_image->exception);
goto error_cleanup;
}
changed = MorphologyPrimitiveDirect(rslt_image, method,
channel, kernel, exception);
if ( verbose != MagickFalse )
(void) (void) FormatLocaleFile(stderr,
"%s:%.20g.%.20g #%.20g => Changed %.20g\n",
CommandOptionToMnemonic(MagickMorphologyOptions, method),
1.0,0.0,1.0, (double) changed);
if ( changed < 0 )
goto error_cleanup;
if ( method == VoronoiMorphology ) {
/* Preserve the alpha channel of input image - but turned off */
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel);
(void) CompositeImageChannel(rslt_image, DefaultChannels,
CopyOpacityCompositeOp, image, 0, 0);
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel);
}
goto exit_cleanup;
}
/* Handle user (caller) specified multi-kernel composition method */
if ( compose != UndefinedCompositeOp )
rslt_compose = compose; /* override default composition for method */
if ( rslt_compose == UndefinedCompositeOp )
rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */
/* Some methods require a reflected kernel to use with primitives.
* Create the reflected kernel for those methods. */
switch ( method ) {
case CorrelateMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case SmoothMorphology:
reflected_kernel = CloneKernelInfo(kernel);
if (reflected_kernel == (KernelInfo *) NULL)
goto error_cleanup;
RotateKernelInfo(reflected_kernel,180);
break;
default:
break;
}
/* Loops around more primitive morpholgy methods
** erose, dilate, open, close, smooth, edge, etc...
*/
/* Loop 1: iterate the compound method */
method_loop = 0;
method_changed = 1;
while ( method_loop < method_limit && method_changed > 0 ) {
method_loop++;
method_changed = 0;
/* Loop 2: iterate over each kernel in a multi-kernel list */
norm_kernel = (KernelInfo *) kernel;
this_kernel = (KernelInfo *) kernel;
rflt_kernel = reflected_kernel;
kernel_number = 0;
while ( norm_kernel != NULL ) {
/* Loop 3: Compound Morphology Staging - Select Primative to apply */
stage_loop = 0; /* the compound morphology stage number */
while ( stage_loop < stage_limit ) {
stage_loop++; /* The stage of the compound morphology */
/* Select primitive morphology for this stage of compound method */
this_kernel = norm_kernel; /* default use unreflected kernel */
primitive = method; /* Assume method is a primitive */
switch( method ) {
case ErodeMorphology: /* just erode */
case EdgeInMorphology: /* erode and image difference */
primitive = ErodeMorphology;
break;
case DilateMorphology: /* just dilate */
case EdgeOutMorphology: /* dilate and image difference */
primitive = DilateMorphology;
break;
case OpenMorphology: /* erode then dialate */
case TopHatMorphology: /* open and image difference */
primitive = ErodeMorphology;
if ( stage_loop == 2 )
primitive = DilateMorphology;
break;
case OpenIntensityMorphology:
primitive = ErodeIntensityMorphology;
if ( stage_loop == 2 )
primitive = DilateIntensityMorphology;
break;
case CloseMorphology: /* dilate, then erode */
case BottomHatMorphology: /* close and image difference */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
if ( stage_loop == 2 )
primitive = ErodeMorphology;
break;
case CloseIntensityMorphology:
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateIntensityMorphology;
if ( stage_loop == 2 )
primitive = ErodeIntensityMorphology;
break;
case SmoothMorphology: /* open, close */
switch ( stage_loop ) {
case 1: /* start an open method, which starts with Erode */
primitive = ErodeMorphology;
break;
case 2: /* now Dilate the Erode */
primitive = DilateMorphology;
break;
case 3: /* Reflect kernel a close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
break;
case 4: /* Finish the Close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ErodeMorphology;
break;
}
break;
case EdgeMorphology: /* dilate and erode difference */
primitive = DilateMorphology;
if ( stage_loop == 2 ) {
save_image = curr_image; /* save the image difference */
curr_image = (Image *) image;
primitive = ErodeMorphology;
}
break;
case CorrelateMorphology:
/* A Correlation is a Convolution with a reflected kernel.
** However a Convolution is a weighted sum using a reflected
** kernel. It may seem stange to convert a Correlation into a
** Convolution as the Correlation is the simplier method, but
** Convolution is much more commonly used, and it makes sense to
** implement it directly so as to avoid the need to duplicate the
** kernel when it is not required (which is typically the
** default).
*/
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ConvolveMorphology;
break;
default:
break;
}
assert( this_kernel != (KernelInfo *) NULL );
/* Extra information for debugging compound operations */
if ( verbose != MagickFalse ) {
if ( stage_limit > 1 )
(void) FormatLocaleString(v_info,MaxTextExtent,"%s:%.20g.%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions,method),(double)
method_loop,(double) stage_loop);
else if ( primitive != method )
(void) FormatLocaleString(v_info, MaxTextExtent, "%s:%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions, method),(double)
method_loop);
else
v_info[0] = '\0';
}
/* Loop 4: Iterate the kernel with primitive */
kernel_loop = 0;
kernel_changed = 0;
changed = 1;
while ( kernel_loop < kernel_limit && changed > 0 ) {
kernel_loop++; /* the iteration of this kernel */
/* Create a clone as the destination image, if not yet defined */
if ( work_image == (Image *) NULL )
{
work_image=CloneImage(image,0,0,MagickTrue,exception);
if (work_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(work_image,DirectClass) == MagickFalse)
{
InheritException(exception,&work_image->exception);
goto error_cleanup;
}
/* work_image->type=image->type; ??? */
}
/* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */
count++;
changed = MorphologyPrimitive(curr_image, work_image, primitive,
channel, this_kernel, bias, exception);
if ( verbose != MagickFalse ) {
if ( kernel_loop > 1 )
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line from previous */
(void) (void) FormatLocaleFile(stderr,
"%s%s%s:%.20g.%.20g #%.20g => Changed %.20g",
v_info,CommandOptionToMnemonic(MagickMorphologyOptions,
primitive),(this_kernel == rflt_kernel ) ? "*" : "",
(double) (method_loop+kernel_loop-1),(double) kernel_number,
(double) count,(double) changed);
}
if ( changed < 0 )
goto error_cleanup;
kernel_changed += changed;
method_changed += changed;
/* prepare next loop */
{ Image *tmp = work_image; /* swap images for iteration */
work_image = curr_image;
curr_image = tmp;
}
if ( work_image == image )
work_image = (Image *) NULL; /* replace input 'image' */
} /* End Loop 4: Iterate the kernel with primitive */
if ( verbose != MagickFalse && kernel_changed != (size_t)changed )
(void) FormatLocaleFile(stderr, " Total %.20g",(double) kernel_changed);
if ( verbose != MagickFalse && stage_loop < stage_limit )
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line before looping */
#if 0
(void) FormatLocaleFile(stderr, "--E-- image=0x%lx\n", (unsigned long)image);
(void) FormatLocaleFile(stderr, " curr =0x%lx\n", (unsigned long)curr_image);
(void) FormatLocaleFile(stderr, " work =0x%lx\n", (unsigned long)work_image);
(void) FormatLocaleFile(stderr, " save =0x%lx\n", (unsigned long)save_image);
(void) FormatLocaleFile(stderr, " union=0x%lx\n", (unsigned long)rslt_image);
#endif
} /* End Loop 3: Primative (staging) Loop for Coumpound Methods */
/* Final Post-processing for some Compound Methods
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** Turn off SVG composition 'alpha blending'.
*/
switch( method ) {
case EdgeOutMorphology:
case EdgeInMorphology:
case TopHatMorphology:
case BottomHatMorphology:
if ( verbose != MagickFalse )
(void) FormatLocaleFile(stderr,
"\n%s: Difference with original image",
CommandOptionToMnemonic(MagickMorphologyOptions,method));
(void) CompositeImageChannel(curr_image,(ChannelType)
(channel & ~SyncChannels),DifferenceCompositeOp,image,0,0);
break;
case EdgeMorphology:
if ( verbose != MagickFalse )
(void) FormatLocaleFile(stderr,
"\n%s: Difference of Dilate and Erode",
CommandOptionToMnemonic(MagickMorphologyOptions,method));
(void) CompositeImageChannel(curr_image,(ChannelType)
(channel & ~SyncChannels),DifferenceCompositeOp,save_image,0,0);
save_image = DestroyImage(save_image); /* finished with save image */
break;
default:
break;
}
/* multi-kernel handling: re-iterate, or compose results */
if ( kernel->next == (KernelInfo *) NULL )
rslt_image = curr_image; /* just return the resulting image */
else if ( rslt_compose == NoCompositeOp )
{ if ( verbose != MagickFalse ) {
if ( this_kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " (re-iterate)");
else
(void) FormatLocaleFile(stderr, " (done)");
}
rslt_image = curr_image; /* return result, and re-iterate */
}
else if ( rslt_image == (Image *) NULL)
{ if ( verbose != MagickFalse )
(void) FormatLocaleFile(stderr, " (save for compose)");
rslt_image = curr_image;
curr_image = (Image *) image; /* continue with original image */
}
else
{ /* Add the new 'current' result to the composition
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** IE: Turn off SVG composition 'alpha blending'.
*/
if ( verbose != MagickFalse )
(void) FormatLocaleFile(stderr, " (compose \"%s\")",
CommandOptionToMnemonic(MagickComposeOptions, rslt_compose) );
(void) CompositeImageChannel(rslt_image,
(ChannelType) (channel & ~SyncChannels), rslt_compose,
curr_image, 0, 0);
curr_image = DestroyImage(curr_image);
curr_image = (Image *) image; /* continue with original image */
}
if ( verbose != MagickFalse )
(void) FormatLocaleFile(stderr, "\n");
/* loop to the next kernel in a multi-kernel list */
norm_kernel = norm_kernel->next;
if ( rflt_kernel != (KernelInfo *) NULL )
rflt_kernel = rflt_kernel->next;
kernel_number++;
} /* End Loop 2: Loop over each kernel */
} /* End Loop 1: compound method interation */
goto exit_cleanup;
/* Yes goto's are bad, but it makes cleanup lot more efficient */
error_cleanup:
if ( curr_image == rslt_image )
curr_image = (Image *) NULL;
if ( rslt_image != (Image *) NULL )
rslt_image = DestroyImage(rslt_image);
exit_cleanup:
if ( curr_image == rslt_image || curr_image == image )
curr_image = (Image *) NULL;
if ( curr_image != (Image *) NULL )
curr_image = DestroyImage(curr_image);
if ( work_image != (Image *) NULL )
work_image = DestroyImage(work_image);
if ( save_image != (Image *) NULL )
save_image = DestroyImage(save_image);
if ( reflected_kernel != (KernelInfo *) NULL )
reflected_kernel = DestroyKernelInfo(reflected_kernel);
return(rslt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyImageChannel() applies a user supplied kernel to the image
% according to the given mophology method.
%
% This function applies any and all user defined settings before calling
% the above internal function MorphologyApply().
%
% User defined settings include...
% * Output Bias for Convolution and correlation ("-bias"
or "-define convolve:bias=??")
% * Kernel Scale/normalize settings ("-set 'option:convolve:scale'")
% This can also includes the addition of a scaled unity kernel.
% * Show Kernel being applied ("-set option:showKernel 1")
%
% The format of the MorphologyImage method is:
%
% Image *MorphologyImage(const Image *image,MorphologyMethod method,
% const ssize_t iterations,KernelInfo *kernel,ExceptionInfo *exception)
%
% Image *MorphologyImageChannel(const Image *image, const ChannelType
% channel,MorphologyMethod method,const ssize_t iterations,
% KernelInfo *kernel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
% Warning: kernel may be normalized for the Convolve method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphologyImage(const Image *image,
const MorphologyMethod method,const ssize_t iterations,
const KernelInfo *kernel,ExceptionInfo *exception)
{
Image
*morphology_image;
morphology_image=MorphologyImageChannel(image,DefaultChannels,method,
iterations,kernel,exception);
return(morphology_image);
}
MagickExport Image *MorphologyImageChannel(const Image *image,
const ChannelType channel,const MorphologyMethod method,
const ssize_t iterations,const KernelInfo *kernel,ExceptionInfo *exception)
{
KernelInfo
*curr_kernel;
CompositeOperator
compose;
double
bias;
Image
*morphology_image;
/* Apply Convolve/Correlate Normalization and Scaling Factors.
* This is done BEFORE the ShowKernelInfo() function is called so that
* users can see the results of the 'option:convolve:scale' option.
*/
curr_kernel = (KernelInfo *) kernel;
bias=image->bias;
if ((method == ConvolveMorphology) || (method == CorrelateMorphology))
{
const char
*artifact;
artifact = GetImageArtifact(image,"convolve:bias");
if (artifact != (const char *) NULL)
bias=StringToDoubleInterval(artifact,(double) QuantumRange+1.0);
artifact = GetImageArtifact(image,"convolve:scale");
if ( artifact != (const char *) NULL ) {
if ( curr_kernel == kernel )
curr_kernel = CloneKernelInfo(kernel);
if (curr_kernel == (KernelInfo *) NULL) {
curr_kernel=DestroyKernelInfo(curr_kernel);
return((Image *) NULL);
}
ScaleGeometryKernelInfo(curr_kernel, artifact);
}
}
/* display the (normalized) kernel via stderr */
if ( IsMagickTrue(GetImageArtifact(image,"showKernel"))
|| IsMagickTrue(GetImageArtifact(image,"convolve:showKernel"))
|| IsMagickTrue(GetImageArtifact(image,"morphology:showKernel")) )
ShowKernelInfo(curr_kernel);
/* Override the default handling of multi-kernel morphology results
* If 'Undefined' use the default method
* If 'None' (default for 'Convolve') re-iterate previous result
* Otherwise merge resulting images using compose method given.
* Default for 'HitAndMiss' is 'Lighten'.
*/
{ const char
*artifact;
compose = UndefinedCompositeOp; /* use default for method */
artifact = GetImageArtifact(image,"morphology:compose");
if ( artifact != (const char *) NULL)
compose = (CompositeOperator) ParseCommandOption(
MagickComposeOptions,MagickFalse,artifact);
}
/* Apply the Morphology */
morphology_image = MorphologyApply(image, channel, method, iterations,
curr_kernel, compose, bias, exception);
/* Cleanup and Exit */
if ( curr_kernel != kernel )
curr_kernel=DestroyKernelInfo(curr_kernel);
return(morphology_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateKernelInfo() rotates the kernel by the angle given.
%
% Currently it is restricted to 90 degree angles, of either 1D kernels
% or square kernels. And 'circular' rotations of 45 degrees for 3x3 kernels.
% It will ignore usless rotations for specific 'named' built-in kernels.
%
% The format of the RotateKernelInfo method is:
%
% void RotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is currently internal to this module only, but can be exported
% to other modules if needed.
*/
static void RotateKernelInfo(KernelInfo *kernel, double angle)
{
/* angle the lower kernels first */
if ( kernel->next != (KernelInfo *) NULL)
RotateKernelInfo(kernel->next, angle);
/* WARNING: Currently assumes the kernel (rightly) is horizontally symetrical
**
** TODO: expand beyond simple 90 degree rotates, flips and flops
*/
/* Modulus the angle */
angle = fmod(angle, 360.0);
if ( angle < 0 )
angle += 360.0;
if ( 337.5 < angle || angle <= 22.5 )
return; /* Near zero angle - no change! - At least not at this time */
/* Handle special cases */
switch (kernel->type) {
/* These built-in kernels are cylindrical kernels, rotating is useless */
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case DiskKernel:
case PeaksKernel:
case LaplacianKernel:
case ChebyshevKernel:
case ManhattanKernel:
case EuclideanKernel:
return;
/* These may be rotatable at non-90 angles in the future */
/* but simply rotating them in multiples of 90 degrees is useless */
case SquareKernel:
case DiamondKernel:
case PlusKernel:
case CrossKernel:
return;
/* These only allows a +/-90 degree rotation (by transpose) */
/* A 180 degree rotation is useless */
case BlurKernel:
if ( 135.0 < angle && angle <= 225.0 )
return;
if ( 225.0 < angle && angle <= 315.0 )
angle -= 180;
break;
default:
break;
}
/* Attempt rotations by 45 degrees -- 3x3 kernels only */
if ( 22.5 < fmod(angle,90.0) && fmod(angle,90.0) <= 67.5 )
{
if ( kernel->width == 3 && kernel->height == 3 )
{ /* Rotate a 3x3 square by 45 degree angle */
double t = kernel->values[0];
kernel->values[0] = kernel->values[3];
kernel->values[3] = kernel->values[6];
kernel->values[6] = kernel->values[7];
kernel->values[7] = kernel->values[8];
kernel->values[8] = kernel->values[5];
kernel->values[5] = kernel->values[2];
kernel->values[2] = kernel->values[1];
kernel->values[1] = t;
/* rotate non-centered origin */
if ( kernel->x != 1 || kernel->y != 1 ) {
ssize_t x,y;
x = (ssize_t) kernel->x-1;
y = (ssize_t) kernel->y-1;
if ( x == y ) x = 0;
else if ( x == 0 ) x = -y;
else if ( x == -y ) y = 0;
else if ( y == 0 ) y = x;
kernel->x = (ssize_t) x+1;
kernel->y = (ssize_t) y+1;
}
angle = fmod(angle+315.0, 360.0); /* angle reduced 45 degrees */
kernel->angle = fmod(kernel->angle+45.0, 360.0);
}
else
perror("Unable to rotate non-3x3 kernel by 45 degrees");
}
if ( 45.0 < fmod(angle, 180.0) && fmod(angle,180.0) <= 135.0 )
{
if ( kernel->width == 1 || kernel->height == 1 )
{ /* Do a transpose of a 1 dimensional kernel,
** which results in a fast 90 degree rotation of some type.
*/
ssize_t
t;
t = (ssize_t) kernel->width;
kernel->width = kernel->height;
kernel->height = (size_t) t;
t = kernel->x;
kernel->x = kernel->y;
kernel->y = t;
if ( kernel->width == 1 ) {
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
} else {
angle = fmod(angle+90.0, 360.0); /* angle increased 90 degrees */
kernel->angle = fmod(kernel->angle+270.0, 360.0);
}
}
else if ( kernel->width == kernel->height )
{ /* Rotate a square array of values by 90 degrees */
{ register size_t
i,j,x,y;
register double
*k,t;
k=kernel->values;
for( i=0, x=kernel->width-1; i<=x; i++, x--)
for( j=0, y=kernel->height-1; j<y; j++, y--)
{ t = k[i+j*kernel->width];
k[i+j*kernel->width] = k[j+x*kernel->width];
k[j+x*kernel->width] = k[x+y*kernel->width];
k[x+y*kernel->width] = k[y+i*kernel->width];
k[y+i*kernel->width] = t;
}
}
/* rotate the origin - relative to center of array */
{ register ssize_t x,y;
x = (ssize_t) (kernel->x*2-kernel->width+1);
y = (ssize_t) (kernel->y*2-kernel->height+1);
kernel->x = (ssize_t) ( -y +(ssize_t) kernel->width-1)/2;
kernel->y = (ssize_t) ( +x +(ssize_t) kernel->height-1)/2;
}
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
}
else
perror("Unable to rotate a non-square, non-linear kernel 90 degrees");
}
if ( 135.0 < angle && angle <= 225.0 )
{
/* For a 180 degree rotation - also know as a reflection
* This is actually a very very common operation!
* Basically all that is needed is a reversal of the kernel data!
* And a reflection of the origon
*/
double
t;
register double
*k;
size_t
i,
j;
k=kernel->values;
for ( i=0, j=kernel->width*kernel->height-1; i<j; i++, j--)
t=k[i], k[i]=k[j], k[j]=t;
kernel->x = (ssize_t) kernel->width - kernel->x - 1;
kernel->y = (ssize_t) kernel->height - kernel->y - 1;
angle = fmod(angle-180.0, 360.0); /* angle+180 degrees */
kernel->angle = fmod(kernel->angle+180.0, 360.0);
}
/* At this point angle should at least between -45 (315) and +45 degrees
* In the future some form of non-orthogonal angled rotates could be
* performed here, posibily with a linear kernel restriction.
*/
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e G e o m e t r y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleGeometryKernelInfo() takes a geometry argument string, typically
% provided as a "-set option:convolve:scale {geometry}" user setting,
% and modifies the kernel according to the parsed arguments of that setting.
%
% The first argument (and any normalization flags) are passed to
% ScaleKernelInfo() to scale/normalize the kernel. The second argument
% is then passed to UnityAddKernelInfo() to add a scled unity kernel
% into the scaled/normalized kernel.
%
% The format of the ScaleGeometryKernelInfo method is:
%
% void ScaleGeometryKernelInfo(KernelInfo *kernel,
% const double scaling_factor,const MagickStatusType normalize_flags)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% o geometry:
% The geometry string to parse, typically from the user provided
% "-set option:convolve:scale {geometry}" setting.
%
*/
MagickExport void ScaleGeometryKernelInfo (KernelInfo *kernel,
const char *geometry)
{
GeometryFlags
flags;
GeometryInfo
args;
SetGeometryInfo(&args);
flags = (GeometryFlags) ParseGeometry(geometry, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
if ( (flags & PercentValue) != 0 ) /* Handle Percentage flag*/
args.rho *= 0.01, args.sigma *= 0.01;
if ( (flags & RhoValue) == 0 ) /* Set Defaults for missing args */
args.rho = 1.0;
if ( (flags & SigmaValue) == 0 )
args.sigma = 0.0;
/* Scale/Normalize the input kernel */
ScaleKernelInfo(kernel, args.rho, flags);
/* Add Unity Kernel, for blending with original */
if ( (flags & SigmaValue) != 0 )
UnityAddKernelInfo(kernel, args.sigma);
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleKernelInfo() scales the given kernel list by the given amount, with or
% without normalization of the sum of the kernel values (as per given flags).
%
% By default (no flags given) the values within the kernel is scaled
% directly using given scaling factor without change.
%
% If either of the two 'normalize_flags' are given the kernel will first be
% normalized and then further scaled by the scaling factor value given.
%
% Kernel normalization ('normalize_flags' given) is designed to ensure that
% any use of the kernel scaling factor with 'Convolve' or 'Correlate'
% morphology methods will fall into -1.0 to +1.0 range. Note that for
% non-HDRI versions of IM this may cause images to have any negative results
% clipped, unless some 'bias' is used.
%
% More specifically. Kernels which only contain positive values (such as a
% 'Gaussian' kernel) will be scaled so that those values sum to +1.0,
% ensuring a 0.0 to +1.0 output range for non-HDRI images.
%
% For Kernels that contain some negative values, (such as 'Sharpen' kernels)
% the kernel will be scaled by the absolute of the sum of kernel values, so
% that it will generally fall within the +/- 1.0 range.
%
% For kernels whose values sum to zero, (such as 'Laplician' kernels) kernel
% will be scaled by just the sum of the postive values, so that its output
% range will again fall into the +/- 1.0 range.
%
% For special kernels designed for locating shapes using 'Correlate', (often
% only containing +1 and -1 values, representing foreground/brackground
% matching) a special normalization method is provided to scale the positive
% values separately to those of the negative values, so the kernel will be
% forced to become a zero-sum kernel better suited to such searches.
%
% WARNING: Correct normalization of the kernel assumes that the '*_range'
% attributes within the kernel structure have been correctly set during the
% kernels creation.
%
% NOTE: The values used for 'normalize_flags' have been selected specifically
% to match the use of geometry options, so that '!' means NormalizeValue, '^'
% means CorrelateNormalizeValue. All other GeometryFlags values are ignored.
%
% The format of the ScaleKernelInfo method is:
%
% void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,
% const MagickStatusType normalize_flags )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scaling_factor:
% multiply all values (after normalization) by this factor if not
% zero. If the kernel is normalized regardless of any flags.
%
% o normalize_flags:
% GeometryFlags defining normalization method to use.
% specifically: NormalizeValue, CorrelateNormalizeValue,
% and/or PercentValue
%
*/
MagickExport void ScaleKernelInfo(KernelInfo *kernel,
const double scaling_factor,const GeometryFlags normalize_flags)
{
register ssize_t
i;
register double
pos_scale,
neg_scale;
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
ScaleKernelInfo(kernel->next, scaling_factor, normalize_flags);
/* Normalization of Kernel */
pos_scale = 1.0;
if ( (normalize_flags&NormalizeValue) != 0 ) {
if ( fabs(kernel->positive_range + kernel->negative_range) >= MagickEpsilon )
/* non-zero-summing kernel (generally positive) */
pos_scale = fabs(kernel->positive_range + kernel->negative_range);
else
/* zero-summing kernel */
pos_scale = kernel->positive_range;
}
/* Force kernel into a normalized zero-summing kernel */
if ( (normalize_flags&CorrelateNormalizeValue) != 0 ) {
pos_scale = ( fabs(kernel->positive_range) >= MagickEpsilon )
? kernel->positive_range : 1.0;
neg_scale = ( fabs(kernel->negative_range) >= MagickEpsilon )
? -kernel->negative_range : 1.0;
}
else
neg_scale = pos_scale;
/* finialize scaling_factor for positive and negative components */
pos_scale = scaling_factor/pos_scale;
neg_scale = scaling_factor/neg_scale;
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
if ( ! IsNaN(kernel->values[i]) )
kernel->values[i] *= (kernel->values[i] >= 0) ? pos_scale : neg_scale;
/* convolution output range */
kernel->positive_range *= pos_scale;
kernel->negative_range *= neg_scale;
/* maximum and minimum values in kernel */
kernel->maximum *= (kernel->maximum >= 0.0) ? pos_scale : neg_scale;
kernel->minimum *= (kernel->minimum >= 0.0) ? pos_scale : neg_scale;
/* swap kernel settings if user's scaling factor is negative */
if ( scaling_factor < MagickEpsilon ) {
double t;
t = kernel->positive_range;
kernel->positive_range = kernel->negative_range;
kernel->negative_range = t;
t = kernel->maximum;
kernel->maximum = kernel->minimum;
kernel->minimum = 1;
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h o w K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShowKernelInfo() outputs the details of the given kernel defination to
% standard error, generally due to a users 'showKernel' option request.
%
% The format of the ShowKernelInfo method is:
%
% void ShowKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickExport void ShowKernelInfo(const KernelInfo *kernel)
{
const KernelInfo
*k;
size_t
c, i, u, v;
for (c=0, k=kernel; k != (KernelInfo *) NULL; c++, k=k->next ) {
(void) FormatLocaleFile(stderr, "Kernel");
if ( kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " #%lu", (unsigned long) c );
(void) FormatLocaleFile(stderr, " \"%s",
CommandOptionToMnemonic(MagickKernelOptions, k->type) );
if ( fabs(k->angle) >= MagickEpsilon )
(void) FormatLocaleFile(stderr, "@%lg", k->angle);
(void) FormatLocaleFile(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long)
k->width,(unsigned long) k->height,(long) k->x,(long) k->y);
(void) FormatLocaleFile(stderr,
" with values from %.*lg to %.*lg\n",
GetMagickPrecision(), k->minimum,
GetMagickPrecision(), k->maximum);
(void) FormatLocaleFile(stderr, "Forming a output range from %.*lg to %.*lg",
GetMagickPrecision(), k->negative_range,
GetMagickPrecision(), k->positive_range);
if ( fabs(k->positive_range+k->negative_range) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Zero-Summing)\n");
else if ( fabs(k->positive_range+k->negative_range-1.0) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Normalized)\n");
else
(void) FormatLocaleFile(stderr, " (Sum %.*lg)\n",
GetMagickPrecision(), k->positive_range+k->negative_range);
for (i=v=0; v < k->height; v++) {
(void) FormatLocaleFile(stderr, "%2lu:", (unsigned long) v );
for (u=0; u < k->width; u++, i++)
if ( IsNaN(k->values[i]) )
(void) FormatLocaleFile(stderr," %*s", GetMagickPrecision()+3, "nan");
else
(void) FormatLocaleFile(stderr," %*.*lg", GetMagickPrecision()+3,
GetMagickPrecision(), k->values[i]);
(void) FormatLocaleFile(stderr,"\n");
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n i t y A d d K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnityAddKernelInfo() Adds a given amount of the 'Unity' Convolution Kernel
% to the given pre-scaled and normalized Kernel. This in effect adds that
% amount of the original image into the resulting convolution kernel. This
% value is usually provided by the user as a percentage value in the
% 'convolve:scale' setting.
%
% The resulting effect is to convert the defined kernels into blended
% soft-blurs, unsharp kernels or into sharpening kernels.
%
% The format of the UnityAdditionKernelInfo method is:
%
% void UnityAdditionKernelInfo(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scale:
% scaling factor for the unity kernel to be added to
% the given kernel.
%
*/
MagickExport void UnityAddKernelInfo(KernelInfo *kernel,
const double scale)
{
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
UnityAddKernelInfo(kernel->next, scale);
/* Add the scaled unity kernel to the existing kernel */
kernel->values[kernel->x+kernel->y*kernel->width] += scale;
CalcKernelMetaData(kernel); /* recalculate the meta-data */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Z e r o K e r n e l N a n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroKernelNans() replaces any special 'nan' value that may be present in
% the kernel with a zero value. This is typically done when the kernel will
% be used in special hardware (GPU) convolution processors, to simply
% matters.
%
% The format of the ZeroKernelNans method is:
%
% void ZeroKernelNans (KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickExport void ZeroKernelNans(KernelInfo *kernel)
{
register size_t
i;
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
ZeroKernelNans(kernel->next);
for (i=0; i < (kernel->width*kernel->height); i++)
if ( IsNaN(kernel->values[i]) )
kernel->values[i] = 0.0;
return;
}
|
pvm-OpenMP-filas.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
main(int argc, char **argv) {
int N = atoi(argv[1]);
int i,j;
int m[N][N];
int v1[N],v2[N];
double start,end,elapsed;
if(argc < 2) {
fprintf(stderr,"Faltan argumentos\n");
exit(-1);
}
//Inicializamos
for(i = 0; i<N;i++){
v1[i]= i;
v2[i] = 0;
for(j=0;j<N;j++)
m[i][j] = i + j;
}
start = omp_get_wtime();
//Multiplicamos
//Declaramos j privada a cada hebra, para no pisarse en el otro bucle.
#pragma omp parallel for private(j)
for (i = 0; i < N; ++i)
for (j = 0; j < N; ++j)
v2[i] += m[i][j] * v1[j];
end = omp_get_wtime();
elapsed = end - start;
//Imprimimos
printf("Vector Resultante\n");
for(i = 0; i<N;i++)
printf("v2[%d] = %d\n",i,v2[i]);
printf("Tiempo(seg.):%11.9f\t / Tamaño Vectores:%u\n",elapsed,N);
}
|
GB_unop__log2_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__log2_fc64_fc64)
// op(A') function: GB (_unop_tran__log2_fc64_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = GB_clog2 (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_clog2 (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = GB_clog2 (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOG2 || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__log2_fc64_fc64)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = GB_clog2 (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = GB_clog2 (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__log2_fc64_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
constant_density_acoustic_time_scalar_2D_4.h | #ifndef __CDA_TIME_SCALAR_2D_4__
#define __CDA_TIME_SCALAR_2D_4__
#include <stdlib.h>
template< typename T, int ACCURACY >
void cda_time_scalar_2D_4( T* km1_u, int nr_km1_u, int nc_km1_u, // in - padded wavefield shape
T* k_Phix, int nr_k_Phix, int nc_k_Phix, // in - padded wavefield shape
T* k_Phiz, int nr_k_Phiz, int nc_k_Phiz, // in - padded wavefield shape
T* k_u, int nr_k_u, int nc_k_u, // in - padded wavefield shape
T* C, int nr_C, int nc_C, // in - padded wavefield shape
T* rhs, int nr_rhs, int nc_rhs, // in - padded wavefield shape
T* xlpml, int n_xlpml, // in - length is the number of nodes inside the padding that the pml value is defined.
T* xrpml, int n_xrpml, // in - length is the number of nodes inside the padding that the pml value is defined.
T* zlpml, int n_zlpml, // in - length is the number of nodes inside the padding that the pml value is defined.
T* zrpml, int n_zrpml, // in - length is the number of nodes inside the padding that the pml value is defined.
double const& dt, // in
double const& dx, // in
double const& dz, // in
int const& nx, // in
int const& nz, // in
T* kp1_Phix, int nr_kp1_Phix, int nc_kp1_Phix, // out
T* kp1_Phiz, int nr_kp1_Phiz, int nc_kp1_Phiz, // out
T* kp1_u, int nr_kp1_u, int nc_kp1_u ) // out
{
enum {MAX_FD_SHIFT = ACCURACY/2};
T lapU = 0.0;
// PML variable
T sigmax = 0.0;
T sigmaz = 0.0;
// Time delta variables
T dt2 = dt*dt;
// Loop/index variables
int idx;
int zstride = 1;
int xstride = nz;
int s = zstride;
int i, k;
// shared space step square variable
T dx2 = dx*dx;
T dz2 = dz*dz;
// private variables
//non derivatives
T fac1;
T fac2;
//derivatives
T dux , duz;
T dPhix, dPhiz;
char* NUM = getenv("OMP_NUM_THREADS");
int Num_Th = atoi (NUM);
#pragma omp parallel for private(sigmaz, sigmax, i, k, idx, dux, duz, dPhix, dPhiz, lapU, fac1, fac2) shared(dx, dx2, dz, dz2, nz, nx, kp1_Phix, kp1_Phiz, k_Phix, k_Phiz, n_zrpml, n_zlpml, n_xrpml, xrpml, xlpml, zrpml, zlpml, s, rhs, C, dt, dt2, km1_u, k_u, kp1_u) num_threads(Num_Th) collapse(2)
for(i=0; i < nx; ++i)
{
for(k=0; k < nz; k++)
{
idx = i*xstride + k;
kp1_Phix[idx] = 0.0;
kp1_Phiz[idx] = 0.0;
kp1_u[idx] = 0.0;
// This handles homogeneous Dirichlet BCs and non-updating in ghost regions.
if ((i == 0) || (i == nx-1)) continue;
if ((k == 0) || (k == nz-1)) continue;
lapU = 0.0;
// Do the X direction
// Left side
if (i==0)
{
//decentered derivative 2 ranks on the right
dux = ((1./12.)*0.0+(-2./3.)*0.0+0.0+(2./3.)*k_u[idx+nz]+(-1./12.)*k_u[idx+2*nz])/dx;
dPhix = ((1./12.)*0.0+(-2./3.)*0.0+0.0+(2./3.)*k_Phix[idx+nz]+(-1./12.)*k_Phix[idx+2*nz])/ dx;
lapU += ((-1./12.)*0.0+(4./3.)*0.0+(-5./2.)*k_u[idx]+(4./3.)*k_u[idx+nz]+(-1./12.)*k_u[idx+2*nz])/ dx2;
}
else if (i == 1)
{
//decentered derivative 1 rank on the right
dux = ((1./12.)*0.0 +(-2./3.)*k_u[idx-nz]+0.0+(2./3.)*k_u[idx+nz]+(-1./12.)*k_u[idx+2*nz])/ dx;
dPhix = ((1./12.)*0.0+(-2./3.)*k_Phix[idx-nz]+0.0+(2./3.)*k_Phix[idx+nz]+(-1./12.)*k_Phix[idx+2*nz])/ dx;
lapU += ((-1./12.)*0.0+(4./3.)*k_u[idx-nz]+(-5./2.)*k_u[idx]+(4./3.)*k_u[idx+nz]+(-1./12.)*k_u[idx+2*nz])/ dx2;
// Right side
}
else if (i == nx-1)
{
//decentered derivative 2 ranks on the left
dux = ((1./12.)*k_u[idx-2*nz]+(-2./3.)*k_u[idx-nz]+0.0+(2./3.)*0.0 +(-1./12.)*0.0)/ dx;
dPhix = ((1./12.)*k_Phix[idx-2*nz]+(-2./3.)*k_Phix[idx-nz]+0.0+(2./3.)*0.0+(-1./12.)*0.0) / dx;
lapU += ((-1./12.)*k_u[idx-2*nz]+(4./3.)*k_u[idx-nz]+(-5./2.)*k_u[idx]+(4./3.)*0.0+(-1./12.)*0.0)/ dx2;
}
else if (i == nx-2)
{
//decentered derivative 1 ranks on the left
dux = ((1./12.)*k_u[idx-2*nz]+(-2./3.)*k_u[idx-nz]+0.0+(2./3.)*k_u[idx+nz]+(-1./12.)*0.0)/ dx;
dPhix = ((1./12.)*k_Phix[idx-2*nz]+(-2./3.)*k_Phix[idx-nz]+0.0+(2./3.)*k_Phix[idx+nz]+(-1./12.)*0.0)/ dx;
lapU += ((-1./12.)*k_u[idx-2*nz]+(4./3.)*k_u[idx-nz]+(-5./2.)*k_u[idx]+(4./3.)*k_u[idx+nz]+(-1./12.)*0.0)/ dx2;
}
else
{
//classic centered derivative
dux = ((1./12.)*k_u[idx-2*nz]+(-2./3.)*k_u[idx-nz]+0.0+(2./3.)*k_u[idx+nz]+(-1./12.)*k_u[idx+2*nz])/ dx;
dPhix = ((1./12.)*k_Phix[idx-2*nz]+(-2./3.)*k_Phix[idx-nz]+0.0+(2./3.)*k_Phix[idx+nz]+(-1./12.)*k_Phix[idx+2*nz])/ dx;
lapU += ((-1./12.)*k_u[idx-2*nz]+(4./3.)*k_u[idx-nz]+(-5./2.)*k_u[idx]+(4./3.)*k_u[idx+nz]+(-1./12.)*k_u[idx+2*nz])/ dx2;
}
// Do the Z direction
// Left side
if (k==0)
{
//decentered derivative 2 ranks on the right
duz = ((1./12.)*0.0+(-2./3.)*0.0+0.0+(2./3.)*k_u[idx+s]+(-1./12.)*k_u[idx+2*s])/dz;
dPhiz = ((1./12.)*0.0+(-2./3.)*0.0+0.0+(2./3.)*k_Phiz[idx+s]+(-1./12.)*k_Phiz[idx+2*s])/ dz;
lapU += ((-1./12.)*0.0+(4./3.)*0.0+(-5./2.)*k_u[idx]+(4./3.)*k_u[idx+s]+(-1./12.)*k_u[idx+2*s])/ dz2;
}
else if (k == 1)
{
//decentered derivative 1 rank on the right
duz = ((1./12.)*0.0 +(-2./3.)*k_u[idx-s]+0.0+(2./3.)*k_u[idx+s]+(-1./12.)*k_u[idx+2*s])/ dz;
dPhiz = ((1./12.)*0.0+(-2./3.)*k_Phiz[idx-s]+0.0+(2./3.)*k_Phiz[idx+s]+(-1./12.)*k_Phiz[idx+2*s])/ dz;
lapU += ((-1./12.)*0.0+(4./3.)*k_u[idx-s]+(-5./2.)*k_u[idx]+(4./3.)*k_u[idx+s]+(-1./12.)*k_u[idx+2*s])/ dz2;
}
else if (k == nz-1)
{
//decentered derivative 2 ranks on the left
duz = ((1./12.)*k_u[idx-2*s]+(-2./3.)*k_u[idx-s]+0.0+(2./3.)*0.0 +(-1./12.)*0.0)/ dz;
dPhiz = ((1./12.)*k_Phiz[idx-2*s]+(-2./3.)*k_Phiz[idx-s]+0.0+(2./3.)*0.0+(-1./12.)*0.0) / dz;
lapU += ((-1./12.)*k_u[idx-2*s]+(4./3.)*k_u[idx-s]+(-5./2.)*k_u[idx]+(4./3.)*0.0+(-1./12.)*0.0)/ dz2;
}
else if (k == nz-2)
{
//decentered derivative 1 ranks on the left
duz = ((1./12.)*k_u[idx-2*s]+(-2./3.)*k_u[idx-s]+0.0+(2./3.)*k_u[idx+s]+(-1./12.)*0.0)/ dz;
dPhiz = ((1./12.)*k_Phiz[idx-2*s]+(-2./3.)*k_Phiz[idx-s]+0.0+(2./3.)*k_Phiz[idx+s]+(-1./12.)*0.0)/ dz;
lapU += ((-1./12.)*k_u[idx-2*s]+(4./3.)*k_u[idx-s]+(-5./2.)*k_u[idx]+(4./3.)*k_u[idx+s]+(-1./12.)*0.0)/ dz2;
}
else
{
//classic centered derivative
duz = ((1./12.)*k_u[idx-2*s]+(-2./3.)*k_u[idx-s]+0.0+(2./3.)*k_u[idx+s]+(-1./12.)*k_u[idx+2*s])/ dz;
dPhiz = ((1./12.)*k_Phiz[idx-2*s]+(-2./3.)*k_Phiz[idx-s]+0.0+(2./3.)*k_Phiz[idx+s]+(-1./12.)*k_Phiz[idx+2*s])/ dz;
lapU += ((-1./12.)*k_u[idx-2*s]+(4./3.)*k_u[idx-s]+(-5./2.)*k_u[idx]+(4./3.)*k_u[idx+s]+(-1./12.)*k_u[idx+2*s])/ dz2;
}
sigmax = 0.0;
sigmaz = 0.0;
// Check if in left PML-X
if((n_xlpml>0) && (i < n_xlpml))
{
sigmax = xlpml[i];
}
// Check if in right PML-X
else if((n_xrpml>0) && (i >= nx-n_xrpml))
{
sigmax = xrpml[n_xrpml-((nx-1)-i)];
}
// Check if in left PML-Z
if((n_zlpml>0) && (k < n_zlpml))
{
sigmaz = zlpml[k];
}
// Check if in right PML-Z
else if((n_zrpml>0) && (k >= nz-n_zrpml))
{
sigmaz = zrpml[n_zrpml-((nz-1)-k)]; // 0th element of the right pml array corresponds to n_zrpml'th node from the right boundary.
}
if((sigmaz != 0.0) || (sigmax != 0.0))
{
kp1_Phix[idx] = k_Phix[idx] - dt*sigmax*k_Phix[idx] + dt*(sigmaz-sigmax)*dux;
kp1_Phiz[idx] = k_Phiz[idx] - dt*sigmaz*k_Phiz[idx] + dt*(sigmax-sigmaz)*duz;
fac1 = (2.0*dt2 / (2.0 + dt*(sigmax+sigmaz)));
fac2 = (C[idx]*C[idx])*(rhs[idx]+lapU+dPhix+dPhiz) - (km1_u[idx]-2.0*k_u[idx])/dt2 + (sigmax+sigmaz)*km1_u[idx]/(2.0*dt) - (sigmax*sigmaz)*k_u[idx];
kp1_u[idx] = fac1 * fac2;
}
else
{
kp1_Phix[idx] = k_Phix[idx];
kp1_Phiz[idx] = k_Phiz[idx];
kp1_u[idx] = dt2*(C[idx]*C[idx])*(rhs[idx]+lapU+dPhix+dPhiz) - (km1_u[idx]-2.0*k_u[idx]);
}
}
}
};
template< typename T>
void cda_time_scalar_2D_OMP_4( T* km1_u, int nr_km1_u, int nc_km1_u, // in - padded wavefield shape
T* k_Phix, int nr_k_Phix, int nc_k_Phix, // in - padded wavefield shape
T* k_Phiz, int nr_k_Phiz, int nc_k_Phiz, // in - padded wavefield shape
T* k_u, int nr_k_u, int nc_k_u, // in - padded wavefield shape
T* C, int nr_C, int nc_C, // in - padded wavefield shape
T* rhs, int nr_rhs, int nc_rhs, // in - padded wavefield shape
T* xlpml, int n_xlpml, // in - length is the number of nodes inside the padding that the pml value is defined.
T* xrpml, int n_xrpml, // in - length is the number of nodes inside the padding that the pml value is defined.
T* zlpml, int n_zlpml, // in - length is the number of nodes inside the padding that the pml value is defined.
T* zrpml, int n_zrpml, // in - length is the number of nodes inside the padding that the pml value is defined.
double const& dt, // in
double const& dx, // in
double const& dz, // in
int const& nx, // in
int const& nz, // in
T* kp1_Phix, int nr_kp1_Phix, int nc_kp1_Phix, // out
T* kp1_Phiz, int nr_kp1_Phiz, int nc_kp1_Phiz, // out
T* kp1_u, int nr_kp1_u, int nc_kp1_u ) // out
{
cda_time_scalar_2D_4<T,4>( km1_u, nr_km1_u, nc_km1_u, // in - padded wavefield shape
k_Phix, nr_k_Phix, nc_k_Phix, // in - padded wavefield shape
k_Phiz, nr_k_Phiz, nc_k_Phiz, // in - padded wavefield shape
k_u, nr_k_u, nc_k_u, // in - padded wavefield shape
C, nr_C, nc_C, // in - padded wavefield shape
rhs, nr_rhs, nc_rhs, // in - padded wavefield shape
xlpml, n_xlpml, // in - length is the number of nodes inside the padding that the pml value is defined.
xrpml, n_xrpml, // in - length is the number of nodes inside the padding that the pml value is defined.
zlpml, n_zlpml, // in - length is the number of nodes inside the padding that the pml value is defined.
zrpml, n_zrpml, // in - length is the number of nodes inside the padding that the pml value is defined.
dt, // in
dx, // in
dz, // in
nx, // in
nz, // in
kp1_Phix, nr_kp1_Phix, nc_kp1_Phix, // out
kp1_Phiz, nr_kp1_Phiz, nc_kp1_Phiz, // out
kp1_u, nr_kp1_u, nc_kp1_u ); // out
}
#endif
|
attack_mp.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <math.h>
#include <gsl/gsl_matrix.h>
#include <gsl/gsl_rng.h>
#include <gsl/gsl_randist.h>
#include <gsl/gsl_blas.h>
#include <omp.h>
#include "util.h"
int main(int argc, char *argv[]) {
char* input_fileName1 = argv[1];
char* input_fileName2 = argv[2];
int N_doc_bg = atoi(argv[3]);
int N_kw = atoi(argv[4]);
int N_obs = atoi(argv[5]);
int N_iter = atoi(argv[6]);
char* output_fileName = argv[7];
int N_doc = 480000 / 2;
int* matrix = (int*) malloc(sizeof(int) * N_obs * N_obs);
int* matrix_bg = (int*) malloc(sizeof(int) * N_kw * N_kw);
int* matrix_padded = (int*) malloc(sizeof(int) * N_obs * N_obs);
int* true_index = (int*) malloc(sizeof(int) * N_kw);
int* permutation = (int*) malloc(sizeof(int) * N_obs);
gsl_matrix* matrix_obs;
for (int round = 0; round < 10; round++)
{
char input_fileName1_extend[40];
char input_fileName2_extend[40];
sprintf(input_fileName1_extend, "%s%d", input_fileName1, round);
sprintf(input_fileName2_extend, "%s%d", input_fileName2, round);
// Setup
struct timeval tv1,tv2;
gettimeofday(&tv1, NULL);
read_matrix(&true_index, &matrix_bg, 1.0*N_doc/N_doc_bg, N_kw, input_fileName2_extend);
read_matrix(&true_index, &matrix, 1.0, N_obs, input_fileName1_extend);
gettimeofday(&tv2, NULL);
printf("Reading done: %f.\n", (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec));
fflush(stdout);
// compute the maximum frequency of a keyword
int freq_max = 0;
for (int ii = 0; ii < N_kw; ii++)
if (matrix_bg[ii*N_kw+ii] > freq_max)
freq_max = matrix_bg[ii*N_kw+ii];
for (int ii = 0; ii < N_obs; ii++)
if (matrix[ii*N_obs+ii] > freq_max)
freq_max = matrix[ii*N_obs+ii];
printf("Max frequency: %d\n", freq_max);
for (int iter = 0; iter < 10; iter++)
{
printf("Run %d\n", iter);
matrix_obs = gsl_matrix_alloc(N_obs, N_obs);
gettimeofday(&tv1, NULL);
pad_matrix(&matrix_padded, &matrix, N_obs, N_doc, freq_max);
gettimeofday(&tv2, NULL);
printf("Padding done: %f.\n", (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec));
fflush(stdout);
gettimeofday(&tv1, NULL);
observe_matrix(matrix_obs, &matrix_padded, N_obs);
gettimeofday(&tv2, NULL);
printf("Observed matrix generated: %f.\n", (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec));
fflush(stdout);
// Permute observed matrix randomly and attack
gettimeofday(&tv1, NULL);
attack(matrix_obs, &matrix_bg, &permutation, N_kw, N_obs, N_doc, freq_max, N_iter);
gettimeofday(&tv2, NULL);
printf("Main attack done: %d.\n", (tv2.tv_sec - tv1.tv_sec));
fflush(stdout);
char output_fileName_full[40];
sprintf(output_fileName_full, "%s%d-%d", output_fileName, round, iter);
print_result(output_fileName_full, &permutation, &true_index, N_obs);
//sprintf(output_fileName_full, "%s%d-%d-full", output_fileName, round, iter);
//print_full_result(output_fileName_full, &permutation, &true_index, N_obs);
}
}
free(matrix);
free(matrix_padded);
gsl_matrix_free(matrix_obs);
return(0);
}
double log_score(int idx1, int idx2, gsl_matrix* matrix_obs, int** matrix, int** permutation, int N_kw, int N_doc, int freq_max)
{
if (idx1 == idx2)
return(0.0);
int idx1_m = (*permutation)[idx1];
int idx2_m = (*permutation)[idx2];
int n1 = 2*freq_max - (*matrix)[idx1_m*N_kw + idx1_m];
int n2 = 2*freq_max - (*matrix)[idx2_m*N_kw + idx2_m];
/*
int N_upper = 3 * sqrt((*matrix)[idx1_m*N_kw + idx2_m] * (N_doc - (*matrix)[idx1_m*N_kw + idx2_m]) / (double) N_doc);
int N_lower = (*matrix)[idx1_m*N_kw + idx2_m] - N_upper;
N_upper = (*matrix)[idx1_m*N_kw + idx2_m] + N_upper;
int count_total = (int) gsl_matrix_get(matrix_obs, idx1, idx2);
double score = 0;
double prob = (*matrix)[idx1_m*N_kw + idx2_m] / (double) N_doc;
for (int kk = N_lower; kk < N_upper+1; kk+=20)
score += gsl_ran_binomial_pdf(kk, prob, N_doc) * gsl_ran_hypergeometric_pdf(count_total - kk, n1, 2*N_doc - n1, n2);
*/
double mean = 1.0 * freq_max / N_doc * (n1 + n2);
double var = 1.0 * (*matrix)[idx1_m*N_kw + idx2_m] / N_doc * (N_doc - (*matrix)[idx1_m*N_kw + idx2_m]);
var += 1.0 * n1 / N_doc * freq_max * (2 * N_doc - 2*freq_max) / N_doc;
var += 1.0 * n2 / N_doc * freq_max * (2 * N_doc - 2*freq_max) / N_doc;
int count_total = (int) gsl_matrix_get(matrix_obs, idx1, idx2);
int N_upper = 3 * sqrt((*matrix)[idx1_m*N_kw + idx2_m] * (N_doc - (*matrix)[idx1_m*N_kw + idx2_m]) / (double) N_doc);
int N_lower = (*matrix)[idx1_m*N_kw + idx2_m] - N_upper;
N_upper = (*matrix)[idx1_m*N_kw + idx2_m] + N_upper;
mean += (*matrix)[idx1_m*N_kw + idx2_m];
var += (*matrix)[idx1_m*N_kw + idx2_m] / N_doc * (N_doc - (*matrix)[idx1_m*N_kw + idx2_m]);
double score = gsl_ran_gaussian_pdf(count_total - mean, sqrt(var));
if (score == 0)
return(-500.0);
return(log(score));
}
void attack(gsl_matrix* matrix_obs, int** matrix, int** permutation, int N_kw, int N_obs, int N_doc, int freq_max, int N_iter)
{
// Initialise data structures
double* score_matrix = (double*) malloc(sizeof(double) * N_obs * N_obs);
double* score_row1 = (double*) malloc(sizeof(double) * N_obs);
double* score_row2 = (double*) malloc(sizeof(double) * N_obs);
int* permutation_tmp = (int*) malloc(sizeof(int) * N_obs);
int* permutation_inv = (int*) malloc(sizeof(int) * N_kw);
// Initialise permutations
for (int ii = 0; ii < N_obs; ii++)
(*permutation)[ii] = ii;
for (int ii = 0; ii < N_obs; ii++)
permutation_tmp[ii] = ii;
for (int ii = 0; ii < N_kw; ii++)
permutation_inv[ii] = -1;
for (int ii = 0; ii < N_obs; ii++)
permutation_inv[permutation_tmp[ii]] = ii;
// Initialising RNG
const gsl_rng_type * T;
gsl_rng * r;
gsl_rng_env_setup();
T = gsl_rng_default;
r = gsl_rng_alloc (T);
struct timeval tv1,tv2;
gettimeofday(&tv1, NULL);
// Compute initial score
#pragma omp parallel for shared(score_matrix, matrix_obs, matrix)
for (int ii = 0; ii < N_obs * N_obs; ii++)
score_matrix[ii] = log_score((int) (ii / N_obs), ii % N_obs, matrix_obs, matrix, permutation, N_kw, N_doc, freq_max);
gettimeofday(&tv2, NULL);
printf("Initial score computed: %f.\n", (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec));
// Iterations of simulated annealing
double temp = (double) N_kw;
int N_stuck = 0;
for (int iter = 0; iter < N_iter; iter++)
{
/* Status code */
if (iter % (N_iter / 10) == 0)
{
gettimeofday(&tv1, NULL);
printf("Iteration: %d, %d, %d.\n", iter, N_stuck, (int) (tv1.tv_sec - tv2.tv_sec));
fflush(stdout);
gettimeofday(&tv2, NULL);
}
// used to be 20k
if (N_stuck >= 80000)
iter = N_iter;
/* Main code */
int idx1, idx2;
permutation_generation(&idx1, &idx2, &permutation_tmp, permutation, &permutation_inv, N_kw, N_obs);
int ii = 0;
#pragma omp parallel for shared(score_row1)
for (ii = 0; ii < N_obs; ii++)
score_row1[ii] = log_score(idx1, ii, matrix_obs, matrix, &permutation_tmp, N_kw, N_doc, freq_max);
if (idx2 >= 0)
#pragma omp parallel for shared(score_row2)
for (ii = 0; ii < N_obs; ii++)
score_row2[ii] = log_score(idx2, ii, matrix_obs, matrix, &permutation_tmp, N_kw, N_doc, freq_max);
double score_diff = 0;
for (int ii = 0; ii < N_obs; ii++)
score_diff += score_row1[ii];
for (int ii = 0; ii < N_obs; ii++)
score_diff -= score_matrix[idx1*N_obs + ii];
if (idx2 >= 0)
{
for (int ii = 0; ii < N_obs; ii++)
score_diff += score_row2[ii];
for (int ii = 0; ii < N_obs; ii++)
score_diff -= score_matrix[idx2*N_obs + ii];
}
// compute difference in score, with exponentiation
score_diff = score_diff / temp;
if (score_diff < -40)
score_diff = 0;
else if (score_diff > 0)
score_diff = 1.01;
else
score_diff = exp(score_diff);
if (gsl_ran_flat(r, 0, 1) < score_diff)
{
// Update the scores
for (int ii = 0; ii < N_obs; ii++)
score_matrix[idx1*N_obs + ii] = score_row1[ii];
for (int ii = 0; ii < N_obs; ii++)
score_matrix[ii*N_obs + idx1] = score_row1[ii];
if (idx2 >= 0)
{
for (int ii = 0; ii < N_obs; ii++)
score_matrix[idx2*N_obs + ii] = score_row2[ii];
for (int ii = 0; ii < N_obs; ii++)
score_matrix[ii*N_obs + idx2] = score_row2[ii];
}
// Update the permutation
permutation_inv[(*permutation)[idx1]] = -1;
(*permutation)[idx1] = permutation_tmp[idx1];
permutation_inv[permutation_tmp[idx1]] = idx1;
if (idx2 >= 0)
{
(*permutation)[idx2] = permutation_tmp[idx2];
permutation_inv[permutation_tmp[idx2]] = idx2;
}
N_stuck = 0;
}
else
{
// Update the permutation
permutation_tmp[idx1] = (*permutation)[idx1];
if (idx2 >= 0)
permutation_tmp[idx2] = (*permutation)[idx2];
N_stuck += 1;
}
temp *= 0.995;
}
free(score_matrix);
free(score_row1);
free(score_row2);
gsl_rng_free(r);
}
void print_result(char* output_fileName, int** permutation, int** true_index, int N_obs)
{
FILE* fp = fopen(output_fileName, "w");
int count = 0;
for (int ii = 0; ii < N_obs; ii++)
if ((*permutation)[ii] == (*true_index)[ii])
count++;
fprintf(fp, "%d\n", count);
fclose(fp);
printf("Success: %d/%d.\n", count, N_obs);
}
void print_full_result(char* output_fileName, int** permutation, int** true_index, int N_obs)
{
FILE* fp = fopen(output_fileName, "w");
int count = 0;
for (int ii = 0; ii < N_obs; ii++)
if ((*permutation)[ii] == (*true_index)[ii])
count++;
printf("Success: %d/%d.\n", count, N_obs);
fprintf(fp, "%d\n", count);
for (int ii = 0; ii < N_obs; ii++)
fprintf(fp, "%d,%d\n", (*permutation)[ii], (*true_index)[ii]);
fclose(fp);
} |
GB_binop__rminus_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__rminus_int8
// A.*B function (eWiseMult): GB_AemultB__rminus_int8
// A*D function (colscale): GB_AxD__rminus_int8
// D*A function (rowscale): GB_DxB__rminus_int8
// C+=B function (dense accum): GB_Cdense_accumB__rminus_int8
// C+=b function (dense accum): GB_Cdense_accumb__rminus_int8
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__rminus_int8
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__rminus_int8
// C=scalar+B GB_bind1st__rminus_int8
// C=scalar+B' GB_bind1st_tran__rminus_int8
// C=A+scalar GB_bind2nd__rminus_int8
// C=A'+scalar GB_bind2nd_tran__rminus_int8
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (bij - aij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (y - x) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RMINUS || GxB_NO_INT8 || GxB_NO_RMINUS_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__rminus_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__rminus_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__rminus_int8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__rminus_int8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__rminus_int8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__rminus_int8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__rminus_int8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__rminus_int8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__rminus_int8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = Bx [p] ;
Cx [p] = (bij - x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__rminus_int8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = Ax [p] ;
Cx [p] = (y - aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (aij - x) ; \
}
GrB_Info GB_bind1st_tran__rminus_int8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (y - aij) ; \
}
GrB_Info GB_bind2nd_tran__rminus_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
slic.c | /******************************************************************
* FUNNY STUFF HERE
*
* Author: Daniele Palossi
* Email: daniele.palossi@iis.ee.ethz.ch
*
******************************************************************/
#include "slic.h"
#ifdef CHECKSUMS
int checksums[] = {0,0,0,0,0} ;
#endif
int a = 800;
int b = 900;
void vl_slic_segment_strided(unsigned char * segmentation, // (5*3+1)*63 pixel
unsigned char * image, // (5*2+2)*63 pixel
vl_size width, // 63 pixel
vl_size height, // 5*2+2 pixel
vl_size numChannels, // 1
vl_size regionSize, // 5
vl_size regularization, // 100
vl_size minRegionSize, // 5
vl_size iteration) {
vl_index i, x, y, u, v, k, region ;
vl_uindex iter ;
vl_size const numRegionsX = (vl_size) fixedpt_toint(fixedpt_ceil(fixedpt_div(fixedpt_fromint(width), fixedpt_fromint(regionSize))));
vl_size const numRegionsY = (vl_size) fixedpt_toint(fixedpt_ceil(fixedpt_div(fixedpt_fromint(height), fixedpt_fromint(regionSize))));
vl_size const numRegions = numRegionsX * numRegionsY ;
vl_size const numPixels = width * height ;
fixedpt previousEnergy = INT_MAX ;
fixedpt startingEnergy ;
vl_size const maxNumIterations = 100 ;
extern int *edgeMap ;
// printf("%d %d\n",numRegionsX,numRegionsY) ;
/* VL_MALLOC allocations */
edgeMap = vl_calloc(numPixels, sizeof(int)) ;
masses = vl_malloc(sizeof(fixedpt) * (2 + numChannels) * numRegions * NUM_THDS) ;
centers = vl_malloc(sizeof(fixedpt) * (2 + numChannels) * numRegions * NUM_THDS) ;
/* ============================== STAGE 1: COMPUTE EDGE MAP (GRADIENT STRENGHT) ==============================*/
/* compute edge map (gradient strength) */
for(k = 0 ; k < (signed)numChannels ; ++k) {
int k_0 = k*numPixels ;
#ifdef OMP
#pragma omp parallel for default (none) \
shared(image,edgeMap) \
private(x) \
firstprivate(height,width,numChannels,k_0) \
num_threads(NUM_THDS)
#endif
for(y = 1 ; y < (signed)height-1 ; ++y) {
int y_0 = y * width ;
int y_1 = (y+1) * width ;
int y_2 = (y-1) * width ;
for(x = 1 ; x < (signed)width-1 ; ++x) {
unsigned char a = image[(x-1) + y_0 + k_0] ;
unsigned char b = image[(x+1) + y_0 + k_0] ;
unsigned char c = image[x + y_1 + k_0] ;
unsigned char d = image[x + y_2 + k_0] ;
edgeMap[x+y_0] += (a - b) * (a - b) + (c - d) * (c - d) ;
}
}
}
#ifdef LOG_GRADIENT
for(i=0; i<numPixels; i++)
printf("%d\n", edgeMap[i]) ;
#endif
#ifdef CHECKSUM_GRADIENT
for(i=0; i<numPixels; i++)
checksums[0] += edgeMap[i] ;
#endif
/* =================================== STAGE 2: INITIALIZE K-MEANS CENTERS ===================================*/
#ifdef OMP
#pragma omp parallel for default (none) \
shared(edgeMap,image,centers) \
private(i,u,x,y,k) \
firstprivate(numRegionsY,numRegionsX,regionSize,width,height,numChannels) \
num_threads(NUM_THDS)
#endif
for(v = 0; v < (signed)numRegionsY; ++v) {
for(u = 0; u < (signed)numRegionsX; ++u) {
vl_index xp ;
vl_index yp ;
int centerx = 0 ;
int centery = 0 ;
int minEdgeValue = INT_MAX ;
x = fixedpt_toint(fixedpt_round(fixedpt_mul(fixedpt_fromint(regionSize), (fixedpt_fromint(u) + FIXEDPT_ONE_HALF))));
y = fixedpt_toint(fixedpt_round(fixedpt_mul(fixedpt_fromint(regionSize), (fixedpt_fromint(v) + FIXEDPT_ONE_HALF))));
//if(u==0 && v==0) printf("%d %d\n",x,y); //3 3
x = VL_MAX(VL_MIN(x, (signed)width-1),0) ;
y = VL_MAX(VL_MIN(y, (signed)height-1),0) ;
/* search in a 3x3 neighborhood the smallest edge response centered on the region center*/
for(yp = VL_MAX(0, y-1) ; yp <= VL_MIN((signed)height-1, y+1) ; ++ yp) {
for(xp = VL_MAX(0, x-1) ; xp <= VL_MIN((signed)width-1, x+1) ; ++ xp) {
// if(u==1 && v==0) printf("%d %d\n",xp,yp);
int thisEdgeValue = edgeMap[(xp)+(yp)*width];
if (thisEdgeValue < minEdgeValue) {
minEdgeValue = thisEdgeValue ;
centerx = xp ;
centery = yp ;
}
}
}
/* initialize the new center at this location */
i = (u + v*numRegionsX)*3 ;
centers[i++] = fixedpt_fromint(centerx) ;
centers[i++] = fixedpt_fromint(centery) ;
// printf("%d %d\n",centerx,centery) ;
for (k = 0 ; k < (signed)numChannels ; ++k) {
centers[i++] = fixedpt_fromint((int)image[(centerx)+(centery)*width+(k)*numPixels]);
// printf("%d\n",image[(centerx)+(centery)*width+(k)*numPixels]) ;
}
}
}
#ifdef LOG_CENTERS
for(i=0; i<((2 + numChannels) * numRegions); i++)
fixedpt_print(centers[i]) ;
#endif
#ifdef CHECKSUM_CENTERS
for(i=0; i<((2 + numChannels) * numRegions); i++)
checksums[1] += fixedpt_toint(centers[i]) ;
#endif
/* ===================================== STAGE 3: RUN K-MEANS ITERATIONS =====================================*/
fixedpt one_over_regionSize = fixedpt_div(FIXEDPT_ONE, fixedpt_fromint(regionSize)) ;
fixedpt ten_thousand = fixedpt_frominttoHR(100000) ;
vl_index *u_array = vl_malloc((width+regionSize)*sizeof(vl_index)) ;
vl_index *v_array = vl_malloc((height+regionSize)*sizeof(vl_index)) ;
fixedpt factor = fixedpt_div(fixedpt_fromint(regularization), fixedpt_fromint(regionSize * regionSize)) ;
int off = (2 + numChannels)*numRegions ;
fixedpt *masses_ptr ;
fixedpt *centers_ptr ;
#ifdef OMP
#pragma omp parallel for default(none) shared(v_array,u_array) private(x) firstprivate(height,width,one_over_regionSize) num_threads(NUM_THDS)
#endif
for (y = 0 ; y < (signed)height ; y++)
v_array[y] = fixedpt_toint(fixedpt_floor(fixedpt_mul(fixedpt_fromint(y),one_over_regionSize) - FIXEDPT_ONE_HALF));
#ifdef OMP
#pragma omp parallel for default(none) shared(v_array,u_array) private(y) firstprivate(height,width,one_over_regionSize) num_threads(NUM_THDS)
#endif
for (x = 0 ; x < (signed)width ; x++)
u_array[x] = fixedpt_toint(fixedpt_floor(fixedpt_mul(fixedpt_fromint(x),one_over_regionSize) - FIXEDPT_ONE_HALF));
#ifdef LOG_KMEANS
#endif
#ifdef CHECKSUM_KMEANS
int checksum_kmeans_u = 0 ;
int checksum_kmeans_v = 0 ;
for(i=0; i<width; i++)
checksum_kmeans_u += u_array[i] ;
for(i=0; i<height; i++)
checksum_kmeans_v += v_array[i] ;
checksums[2] += (checksum_kmeans_v * width)+ (checksum_kmeans_u * height);
#endif
for (iter = 0 ; iter < maxNumIterations ; ++iter) {
fixedpt *energy_array = vl_calloc(NUM_THDS, sizeof(fixedpt)) ;
fixedpt energy = 0 ;
#ifdef OMP
#pragma omp parallel for default (none) \
shared(u_array,v_array,centers,image,segmentation,energy_array) \
private(x,k) \
firstprivate(height,width,iter,factor,numChannels) \
num_threads(NUM_THDS)
#endif
/* assign pixels to centers */
for (y = 0 ; y < (signed)height ; ++y) {
int thd_id =
#ifdef OMP
omp_get_thread_num() ;
#else
get_core_id() ;
#endif
for (x = 0 ; x < (signed)width ; ++x) {
vl_index up, vp ;
vl_index pixel = x + y * width ;
fixedpt minDistance = INT_MAX ;
vl_index u = u_array[x] ;
vl_index v = v_array[y] ;
// if(iter==0 && iteration==0) printf("%d %d - %d %d\n",x,y,u,v);
for (vp = VL_MAX(0, v) ; vp <= VL_MIN((signed)numRegionsY-1, v+1) ; ++vp) {
for (up = VL_MAX(0, u) ; up <= VL_MIN((signed)numRegionsX-1, u+1) ; ++up) {
vl_index region = up + vp * numRegionsX ;
fixedpt centerx = centers[(2 + numChannels) * region + 0] ;
fixedpt centery = centers[(2 + numChannels) * region + 1] ;
// if(iter==0 && iteration==0 && pixel==10) printf("%d %d %d\n",centerx,centery,region) ;
fixedpt spatial = fixedpt_mul((fixedpt_fromint(x) - centerx), (fixedpt_fromint(x) - centerx)) + fixedpt_mul((fixedpt_fromint(y) - centery), (fixedpt_fromint(y) - centery)) ;
fixedpt appearance = 0 ;
fixedpt distance ;
for (k = 0 ; k < (signed)numChannels ; ++k) {
fixedpt centerz = centers[(2 + numChannels) * region + k + 2] ;
fixedpt z = fixedpt_fromint((int)image[(x)+(y)*width+(k)*numPixels]);
appearance += fixedpt_mul((z - centerz), (z - centerz)) ;
}
distance = appearance + fixedpt_mul(factor, spatial) ;
// if(iter==0 && iteration==0) printf("%d %d\n",pixel,region) ;
// if(iter==0 && iteration==0 && pixel==10) printf("%d\n",distance) ;
if (minDistance > distance) {
minDistance = distance ;
segmentation[pixel] = (unsigned char) region ;
}
// if(iter==0 && iteration==0 && pixel>a && pixel<b)
// printf("[%d %d] %d %d %d %d %d\n",pixel,region,vp,up,centerx,centery,distance) ;
}
}
energy_array[thd_id] += fixedpt_to_HR(minDistance) ;
}
}
for(i=0; i<NUM_THDS; i++)
energy += energy_array[i];
/* check energy termination conditions */
if (iter == 0) {
startingEnergy = energy ;
} else {
if ((previousEnergy - energy) < fixedpt_div_HR((startingEnergy - energy), ten_thousand)) {
break ;
}
}
previousEnergy = energy ;
/* recompute centers */
// memset(masses, 0, sizeof(fixedpt) * width * height) ;
// memset(centers, 0, sizeof(fixedpt) * (2 + numChannels) * numRegions) ;
//#pragma omp parallel for default (none) shared(masses) private(i) firstprivate(width,height) num_threads(NUM_THDS)
// for(i=0; i<width * height; i++)
// masses[i] = 0 ;
#ifdef OMP
#pragma omp parallel for default (none) shared(centers,masses) firstprivate(numChannels,numRegions) num_threads(NUM_THDS)
#endif
for(i=0; i<(2 + numChannels)*numRegions*NUM_THDS; i++) {
centers[i] = 0 ;
masses[i] = 0 ;
}
#ifdef OMP
#pragma omp parallel for default (none) \
shared(centers,image,masses,segmentation) \
private(x,k,masses_ptr,centers_ptr) \
firstprivate(height,width,numChannels,off) \
num_threads(NUM_THDS)
#endif
for (y = 0 ; y < (signed)height ; ++y) {
masses_ptr = masses + off *
#ifdef OMP
omp_get_thread_num() ;
#else
get_core_id() ;
#endif
centers_ptr = centers + off *
#ifdef OMP
omp_get_thread_num() ;
#else
get_core_id() ;
#endif
for (x = 0 ; x < (signed)width ; ++x) {
vl_index pixel = x + y * width ;
vl_index region = segmentation[pixel] ;
masses_ptr[region] += FIXEDPT_ONE ;
// if(iteration==0 && region==1) printf("%d) %d %d\n",iter,x,y) ;
centers_ptr[region * (2 + numChannels) + 0] += x ;
centers_ptr[region * (2 + numChannels) + 1] += y ;
for (k = 0 ; k < (signed)numChannels ; ++k) {
centers_ptr[region * (2 + numChannels) + k + 2] += (int)image[(x)+(y)*width+(k)*numPixels] ;
}
}
}
#ifdef OMP
#pragma omp parallel for default (none) shared(centers,masses) firstprivate(numChannels,numRegions,off) num_threads(NUM_THDS)
#endif
for(i=0; i<(2 + numChannels)*numRegions; i++) {
int j ;
for(j=1; j<NUM_THDS; j++) {
masses[i] += masses[off*j+i] ;
centers[i] += centers[off*j+i] ;
}
}
#ifdef OMP
#pragma omp parallel for default (none) \
shared(centers,masses) \
private(i) \
firstprivate(numRegions,numChannels) \
num_threads(NUM_THDS)
#endif
for (region = 0 ; region < (signed)numRegions ; ++region) {
/* 1-e8, the smallest number in fixedpt is 1 = 0.000244140625 */
fixedpt mass = MAX(masses[region], 1) ;
for ( i = (2 + numChannels) * region ;
i < (signed)(2 + numChannels) * (region + 1) ;
++i) {
centers[i] = fixedpt_div(fixedpt_fromint(centers[i]), mass) ;
}
}
} // close Iter loop
if(iteration==0){
for(y=0; y<5; y++)
for(x=0; x<width; x++)
printf("[%d %d] :: %d\n",x,y,segmentation[y*width+x]);
// if(segmentation[y*width+x]<13) printf("[%d %d] :: %d\n",x,y,segmentation[y*width+x]);
// printf("[%d %d] :: %d\n",x,y,segmentation[y*63+x]);
}
#ifdef LOG_SEGMENTATION
for(i=0; i<numPixels; i++)
printf("%d\n", segmentation[i]) ;
#endif
#ifdef CHECKSUM_SEGMENTATION
int checksum_segmentation = 0 ;
for(i=0; i<numPixels; i++)
checksum_segmentation += segmentation[i] ;
if(checksum_segmentation == CHECKSUM_SEGMENTATION)
printf("CHECKSUM SEGMENTATION:\t\tSUCCESS [%d vs %d]\n", checksum_segmentation, CHECKSUM_SEGMENTATION) ;
else
printf("CHECKSUM SEGMENTATION:\t\tERROR [%d vs %d]\n", checksum_segmentation, CHECKSUM_SEGMENTATION) ;
#endif
vl_free(masses) ;
vl_free(centers) ;
vl_free(edgeMap) ;
#if 0
/* ==================================== STAGE 4: ELIMINATE SMALL REGIONS =====================================*/
unsigned char *cleaned = vl_calloc(numPixels, sizeof(unsigned char)) ;
// vl_uint16 *segment = vl_malloc(sizeof(vl_uint16) * numPixels * 2) ;
vl_uint16 *seg = vl_malloc(sizeof(vl_uint16) * TEMP_SEG_BUFFER * 2 * NUM_THDS) ;
vl_size segmentSize ;
unsigned char label ;
unsigned char cleanedLabel ;
vl_size numExpanded ;
vl_index const dx [] = {+1, -1, 0, 0} ;
vl_index const dy [] = { 0, 0, +1, -1} ;
vl_index direction ;
vl_index pixel ;
//#pragma omp parallel for default (none) \
//shared(segmentation,cleaned,seg) \
//private(x,y,label,numExpanded,segmentSize,pixel,cleanedLabel,direction) \
//firstprivate(height,width,minRegionSize) \
//num_threads(NUM_THDS)
for (y = 0 ; y < (signed)height ; ++y) {
for (x = 0 ; x < (signed)width ; ++x) {
vl_index pixel = x + y * width ;
if (cleaned[pixel]) continue ;
label = segmentation[pixel] ;
numExpanded = 0;
segmentSize = 0;
vl_uint16 *segment = seg + 100 *
#ifdef OMP
omp_get_thread_num() ;
#else
get_core_id() ;
#endif
// segment[segmentSize++] = pixel ;
segment[segmentSize*2] = x ;
segment[segmentSize*2+1] = y ;
segmentSize++ ;
/*
* find cleanedLabel as the label of an already cleaned
* region neighbor of this pixel
*/
cleanedLabel = label + 1 ;
cleaned[pixel] = label + 1 ;
// x = pixel % width ;
// y = pixel / width ;
for (direction = 0 ; direction < 4 ; ++direction) {
vl_index xp = x + dx[direction] ;
vl_index yp = y + dy[direction] ;
vl_index neighbor = xp + yp * width ;
if (0 <= xp &&
xp < (signed)width &&
0 <= yp && yp < (signed)height &&
cleaned[neighbor]) {
cleanedLabel = cleaned[neighbor] ;
}
}
/* expand the segment */
while (numExpanded < segmentSize) {
// vl_size open = segment[numExpanded++] ;
// vl_index x_2 = open % width ;
// vl_index y_2 = open / width ;
vl_index x_2 = segment[numExpanded*2] ;
vl_index y_2 = segment[numExpanded*2+1] ;
numExpanded++ ;
for (direction = 0 ; direction < 4 ; ++direction) {
vl_index xp = x_2 + dx[direction] ;
vl_index yp = y_2 + dy[direction] ;
vl_index neighbor = xp + yp * width ;
if ( 0 <= xp && xp < (signed)width &&
0 <= yp && yp < (signed)height &&
cleaned[neighbor] == 0 &&
segmentation[neighbor] == label) {
cleaned[neighbor] = label + 1 ;
// segment[segmentSize++] = neighbor ;
segment[segmentSize*2] = xp ;
segment[segmentSize*2+1] = yp ;
segmentSize++;
}
}
}
/* change label to cleanedLabel if the segment is too small */
if (segmentSize < minRegionSize) {
while (segmentSize > 0) {
// cleaned[segment[--segmentSize]] = cleanedLabel ;
segmentSize-- ;
vl_index seg_id = segment[segmentSize*2] + segment[segmentSize*2+1]*width ;
cleaned[seg_id] = cleanedLabel ;
}
}
} // close x
} // close y
#ifdef OMP
#pragma omp parallel for default (none) \
shared(segmentation,cleaned) \
firstprivate(numPixels) \
num_threads(NUM_THDS)
#endif
/* restore base 0 indexing of the regions */
for (pixel = 0 ; pixel < (signed)numPixels ; ++pixel){
cleaned[pixel] -- ;
segmentation[pixel] = cleaned[pixel];
}
vl_free(cleaned) ;
vl_free(seg) ;
#ifdef LOG_SEGMENTATION_FINAL
for(i=0; i<numPixels; i++)
printf("%d\n", segmentation[i]) ;
#endif
#ifdef CHECKSUM_SEGMENTATION_FINAL
int checksum_segmentation_final = 0 ;
for(i=0; i<numPixels; i++)
checksum_segmentation_final += segmentation[i] ;
if(checksum_segmentation_final == CHECKSUM_SEGMENTATION_FINAL)
printf("CHECKSUM FINAL SEGMENTATION:\tSUCCESS [%d vs %d]\n", checksum_segmentation_final, CHECKSUM_SEGMENTATION_FINAL) ;
else
printf("CHECKSUM FINAL SEGMENTATION:\tERROR [%d vs %d]\n", checksum_segmentation_final, CHECKSUM_SEGMENTATION_FINAL) ;
#endif
#endif
}
void vl_slic_segment(unsigned char * segmentation,
unsigned char * image,
vl_size width,
vl_size height,
vl_size numChannels,
vl_size regionSize,
vl_size regularization,
vl_size minRegionSize) {
vl_index i, x, y, u, v, k, region ;
vl_uindex iter ;
vl_size const numRegionsX = (vl_size) fixedpt_toint(fixedpt_ceil(fixedpt_div(fixedpt_fromint(width), fixedpt_fromint(regionSize))));
vl_size const numRegionsY = (vl_size) fixedpt_toint(fixedpt_ceil(fixedpt_div(fixedpt_fromint(height), fixedpt_fromint(regionSize))));
vl_size const numRegions = numRegionsX * numRegionsY ;
vl_size const numPixels = width * height ;
fixedpt *centers ;
int *edgeMap ;
fixedpt previousEnergy = INT_MAX ;
fixedpt startingEnergy ;
fixedpt *masses ;
vl_size const maxNumIterations = 100 ;
// #define atimage(x,y,k) image[(x)+(y)*width+(k)*numPixels]
// #define atEdgeMap(x,y) edgeMap[(x)+(y)*width]
/* VL_MALLOC allocations */
edgeMap = vl_calloc(numPixels, sizeof(int)) ;
masses = vl_malloc(sizeof(fixedpt) * (2 + numChannels) * numRegions * NUM_THDS) ;
centers = vl_malloc(sizeof(fixedpt) * (2 + numChannels) * numRegions * NUM_THDS) ;
/* ============================== STAGE 1: COMPUTE EDGE MAP (GRADIENT STRENGHT) ==============================*/
#ifdef PROFILING_LIB
reset_timer();
start_timer();
#endif
/* compute edge map (gradient strength) */
for(k = 0 ; k < (signed)numChannels ; ++k) {
int k_0 = k*numPixels ;
#ifdef OMP
#pragma omp parallel for default (none) \
shared(image,edgeMap) \
private(x) \
firstprivate(height,width,numChannels,k_0) \
num_threads(NUM_THDS)
#endif
for(y = 1 ; y < (signed)height-1 ; ++y) {
int y_0 = y * width ;
int y_1 = (y+1) * width ;
int y_2 = (y-1) * width ;
for(x = 1 ; x < (signed)width-1 ; ++x) {
unsigned char a = image[(x-1) + y_0 + k_0] ;
unsigned char b = image[(x+1) + y_0 + k_0] ;
unsigned char c = image[x + y_1 + k_0] ;
unsigned char d = image[x + y_2 + k_0] ;
edgeMap[x+y_0] += (a - b) * (a - b) + (c - d) * (c - d) ;
}
}
}
#ifdef PROFILING_LIB
stop_timer();
printf("STAGE 1:\t%d\n",get_time());
#endif
#ifdef LOG_GRADIENT
for(i=0; i<1000; i++) // <numPixels
printf("%d\n", edgeMap[i]) ;
#endif
#ifdef CHECKSUM_GRADIENT
int checksum_gradient = 0 ;
for(i=0; i<numPixels; i++)
checksum_gradient += edgeMap[i] ;
if(checksum_gradient == CHECKSUM_GRADIENT)
printf("CHECKSUM GRADIENT:\t\tSUCCESS [%d vs %d]\n", checksum_gradient, CHECKSUM_GRADIENT) ;
else
printf("CHECKSUM GRADIENT:\t\tERROR [%d vs %d]\n", checksum_gradient, CHECKSUM_GRADIENT) ;
#endif
/* =================================== STAGE 2: INITIALIZE K-MEANS CENTERS ===================================*/
#ifdef PROFILING_LIB
reset_timer();
start_timer();
#endif
#ifdef OMP
#pragma omp parallel for default (none) \
shared(edgeMap,image,centers) \
private(i,u,x,y,k) \
firstprivate(numRegionsY,numRegionsX,regionSize,width,height,numChannels) \
num_threads(NUM_THDS)
#endif
for(v = 0; v < (signed)numRegionsY; ++v) {
for(u = 0; u < (signed)numRegionsX; ++u) {
vl_index xp ;
vl_index yp ;
int centerx = 0 ;
int centery = 0 ;
int minEdgeValue = INT_MAX ;
x = fixedpt_toint(fixedpt_round(fixedpt_mul(fixedpt_fromint(regionSize), (fixedpt_fromint(u) + FIXEDPT_ONE_HALF))));
y = fixedpt_toint(fixedpt_round(fixedpt_mul(fixedpt_fromint(regionSize), (fixedpt_fromint(v) + FIXEDPT_ONE_HALF))));
//if(u==0 && v==1) printf("%d %d\n",x,y); //3 3
x = VL_MAX(VL_MIN(x, (signed)width-1),0) ;
y = VL_MAX(VL_MIN(y, (signed)height-1),0) ;
/* search in a 3x3 neighborhood the smallest edge response centered on the region center*/
for(yp = VL_MAX(0, y-1) ; yp <= VL_MIN((signed)height-1, y+1) ; ++ yp) {
for(xp = VL_MAX(0, x-1) ; xp <= VL_MIN((signed)width-1, x+1) ; ++ xp) {
// if(u==1 && v==0) printf("%d %d\n",xp,yp);
int thisEdgeValue = edgeMap[(xp)+(yp)*width];
if (thisEdgeValue < minEdgeValue) {
minEdgeValue = thisEdgeValue ;
centerx = xp ;
centery = yp ;
}
}
}
/* initialize the new center at this location */
i = (u + v*numRegionsX)*3 ;
centers[i++] = fixedpt_fromint(centerx) ;
centers[i++] = fixedpt_fromint(centery) ;
// printf("%d %d\n",centerx,centery) ;
for (k = 0 ; k < (signed)numChannels ; ++k) {
centers[i++] = fixedpt_fromint((int)image[(centerx)+(centery)*width+(k)*numPixels]);
// printf("%d\n",image[(centerx)+(centery)*width+(k)*numPixels]) ;
}
}
}
#ifdef PROFILING_LIB
stop_timer();
printf("STAGE 2:\t%d\n",get_time());
#endif
#ifdef LOG_CENTERS
for(i=0; i<((2 + numChannels) * numRegions); i++)
fixedpt_print(centers[i]) ;
#endif
#ifdef CHECKSUM_CENTERS
int checksum_centers = 0 ;
for(i=0; i<((2 + numChannels) * numRegions); i++)
checksum_centers += fixedpt_toint(centers[i]) ;
if(checksum_centers == CHECKSUM_CENTERS)
printf("CHECKSUM CENTERS:\t\tSUCCESS [%d vs %d]\n", checksum_centers, CHECKSUM_CENTERS) ;
else
printf("CHECKSUM CENTERS:\t\tERROR [%d vs %d]\n", checksum_centers, CHECKSUM_CENTERS) ;
#endif
/* ===================================== STAGE 3: RUN K-MEANS ITERATIONS =====================================*/
#ifdef PROFILING_LIB
reset_timer();
start_timer();
#endif
fixedpt one_over_regionSize = fixedpt_div(FIXEDPT_ONE, fixedpt_fromint(regionSize)) ;
fixedpt ten_thousand = fixedpt_frominttoHR(100000) ;
vl_index *u_array = vl_malloc((width+regionSize)*sizeof(vl_index)) ;
vl_index *v_array = vl_malloc((height+regionSize)*sizeof(vl_index)) ;
fixedpt factor = fixedpt_div(fixedpt_fromint(regularization), fixedpt_fromint(regionSize * regionSize)) ;
int off = (2 + numChannels)*numRegions ;
fixedpt *masses_ptr ;
fixedpt *centers_ptr ;
// int id ;
// i = 0 ;
//
// /* Init first ceil(regionSize/2) elements */
// for(id=regionSize; id>0; id-=2) {
// v_array[i] = -1 ;
// u_array[i] = -1 ;
// i++ ;
// }
//
// id = 0 ;
#ifdef OMP
#pragma omp parallel for default(none) shared(v_array,u_array) private(x) firstprivate(height,width,one_over_regionSize) num_threads(NUM_THDS)
#endif
// for (y = 0 ; y < (signed)height-i ; y+=regionSize) {
// for(k=0; k<regionSize; k++) {
// v_array[y+k+i] = id;
// }
// id++;
// }
for (y = 0 ; y < (signed)height ; y++)
v_array[y] = fixedpt_toint(fixedpt_floor(fixedpt_mul(fixedpt_fromint(y),one_over_regionSize) - FIXEDPT_ONE_HALF));
// id = 0 ;
#ifdef OMP
#pragma omp parallel for default(none) shared(v_array,u_array) private(y) firstprivate(height,width,one_over_regionSize) num_threads(NUM_THDS)
#endif
// for (x = 0 ; x < (signed)width-i ; x+=regionSize) {
// for(k=0; k<regionSize; k++) {
// u_array[x+k+i] = id;
// }
// id++;
// }
for (x = 0 ; x < (signed)width ; x++)
u_array[x] = fixedpt_toint(fixedpt_floor(fixedpt_mul(fixedpt_fromint(x),one_over_regionSize) - FIXEDPT_ONE_HALF));
// for (y = 0 ; y < (signed)height ; y++)
// printf("%d\n",v_array[y]);
// for (x = 0 ; x < (signed)width ; x++)
// printf("%d\n",u_array[x]);
#ifdef LOG_KMEANS
#endif
#ifdef CHECKSUM_KMEANS
int checksum_kmeans_u = 0 ;
int checksum_kmeans_v = 0 ;
for(i=0; i<height; i++)
checksum_kmeans_v += v_array[i] ;
for(i=0; i<width; i++)
checksum_kmeans_u += u_array[i] ;
int check = (checksum_kmeans_v * width)+ (checksum_kmeans_u * height);
if(check == CHECKSUM_KMEANS)
printf("CHECKSUM K-MEANS:\t\tSUCCESS [%d vs %d]\n", check, CHECKSUM_KMEANS) ;
else
printf("CHECKSUM K-MEANS:\t\tERROR [%d vs %d]\n", check, CHECKSUM_KMEANS) ;
#endif
for (iter = 0 ; iter < maxNumIterations ; ++iter) {
fixedpt *energy_array = vl_calloc(NUM_THDS, sizeof(fixedpt)) ;
fixedpt energy = 0 ;
#ifdef OMP
#pragma omp parallel for default (none) \
shared(u_array,v_array,centers,image,segmentation,energy_array) \
private(x,k) \
firstprivate(height,width,iter,factor,numChannels) \
num_threads(NUM_THDS)
#endif
/* assign pixels to centers */
for (y = 0 ; y < (signed)height ; ++y) {
int thd_id =
#ifdef OMP
omp_get_thread_num() ;
#else
get_core_id() ;
#endif
for (x = 0 ; x < (signed)width ; ++x) {
vl_index up, vp ;
vl_index pixel = x + y * width ;
fixedpt minDistance = INT_MAX ;
vl_index u = u_array[x] ;
vl_index v = v_array[y] ;
// if(iter==0 && y<12) printf("%d %d - %d %d\n",x,y,u,v);
for (vp = VL_MAX(0, v) ; vp <= VL_MIN((signed)numRegionsY-1, v+1) ; ++vp) {
for (up = VL_MAX(0, u) ; up <= VL_MIN((signed)numRegionsX-1, u+1) ; ++up) {
vl_index region = up + vp * numRegionsX ;
fixedpt centerx = centers[(2 + numChannels) * region + 0] ;
fixedpt centery = centers[(2 + numChannels) * region + 1] ;
// if(iter==0 && pixel==10) printf("%d %d %d\n",centerx,centery,region) ;
fixedpt spatial = fixedpt_mul((fixedpt_fromint(x) - centerx), (fixedpt_fromint(x) - centerx)) + fixedpt_mul((fixedpt_fromint(y) - centery), (fixedpt_fromint(y) - centery)) ;
fixedpt appearance = 0 ;
fixedpt distance ;
for (k = 0 ; k < (signed)numChannels ; ++k) {
fixedpt centerz = centers[(2 + numChannels) * region + k + 2] ;
fixedpt z = fixedpt_fromint((int)image[(x)+(y)*width+(k)*numPixels]);
appearance += fixedpt_mul((z - centerz), (z - centerz)) ;
}
distance = appearance + fixedpt_mul(factor, spatial) ;
// if(iter==0 && pixel<63*12) printf("%d %d\n",pixel,region) ;
// if(iter==0 && pixel==10) printf("%d\n",distance) ;
if (minDistance > distance) {
minDistance = distance ;
segmentation[pixel] = (unsigned char) region ;
}
// if(iter==0 && pixel>a && pixel<b)
// printf("[%d %d] %d %d %d %d %d\n",pixel,region,vp,up,centerx,centery,distance) ;
}
}
energy_array[thd_id] += fixedpt_to_HR(minDistance) ;
}
}
for(i=0; i<NUM_THDS; i++)
energy += energy_array[i];
/* check energy termination conditions */
if (iter == 0) {
startingEnergy = energy ;
} else {
if ((previousEnergy - energy) < fixedpt_div_HR((startingEnergy - energy), ten_thousand)) {
break ;
}
}
previousEnergy = energy ;
/* recompute centers */
// memset(masses, 0, sizeof(fixedpt) * width * height) ;
// memset(centers, 0, sizeof(fixedpt) * (2 + numChannels) * numRegions) ;
//#pragma omp parallel for default (none) shared(masses) private(i) firstprivate(width,height) num_threads(NUM_THDS)
// for(i=0; i<width * height; i++)
// masses[i] = 0 ;
#ifdef OMP
#pragma omp parallel for default (none) shared(centers,masses) firstprivate(numChannels,numRegions) num_threads(NUM_THDS)
#endif
for(i=0; i<(2 + numChannels)*numRegions*NUM_THDS; i++) {
centers[i] = 0 ;
masses[i] = 0 ;
}
#ifdef OMP
#pragma omp parallel for default (none) \
shared(centers,image,masses,segmentation) \
private(x,k,masses_ptr,centers_ptr) \
firstprivate(height,width,numChannels,off) \
num_threads(NUM_THDS)
#endif
for (y = 0 ; y < (signed)height ; ++y) {
masses_ptr = masses + off *
#ifdef OMP
omp_get_thread_num() ;
#else
get_core_id() ;
#endif
centers_ptr = centers + off *
#ifdef OMP
omp_get_thread_num() ;
#else
get_core_id() ;
#endif
for (x = 0 ; x < (signed)width ; ++x) {
vl_index pixel = x + y * width ;
vl_index region = segmentation[pixel] ;
masses_ptr[region] += FIXEDPT_ONE ;
// if(region==1) printf("%d) %d %d\n",iter,x,y) ;
centers_ptr[region * (2 + numChannels) + 0] += x ;
centers_ptr[region * (2 + numChannels) + 1] += y ;
for (k = 0 ; k < (signed)numChannels ; ++k) {
centers_ptr[region * (2 + numChannels) + k + 2] += (int)image[(x)+(y)*width+(k)*numPixels] ;
}
}
}
#ifdef OMP
#pragma omp parallel for default (none) shared(centers,masses) firstprivate(numChannels,numRegions,off) num_threads(NUM_THDS)
#endif
for(i=0; i<(2 + numChannels)*numRegions; i++) {
int j ;
for(j=1; j<NUM_THDS; j++) {
masses[i] += masses[off*j+i] ;
centers[i] += centers[off*j+i] ;
}
}
#ifdef OMP
#pragma omp parallel for default (none) \
shared(centers,masses) \
private(i) \
firstprivate(numRegions,numChannels) \
num_threads(NUM_THDS)
#endif
for (region = 0 ; region < (signed)numRegions ; ++region) {
/* 1-e8, the smallest number in fixedpt is 1 = 0.000244140625 */
fixedpt mass = MAX(masses[region], 1) ;
for ( i = (2 + numChannels) * region ;
i < (signed)(2 + numChannels) * (region + 1) ;
++i) {
centers[i] = fixedpt_div(fixedpt_fromint(centers[i]), mass) ;
}
}
} // close Iter loop
#ifdef LOG_SEGMENTATION
for(y=0; y<5; y++)
for(x=0; x<width; x++){
printf("[%d %d] :: %d\n",x,y,segmentation[y*width+x]);
// if(segmentation[y*width+x]<13) printf("[%d %d] :: %d\n",x,y,segmentation[y*width+x]);
}
#endif
#ifdef PROFILING_LIB
stop_timer();
printf("STAGE 3:\t%d\n",get_time());
#endif
#ifdef LOG_SEGMENTATION
for(i=0; i<numPixels; i++)
printf("%d\n", segmentation[i]) ;
#endif
#ifdef CHECKSUM_SEGMENTATION
int checksum_segmentation = 0 ;
for(i=0; i<numPixels; i++)
checksum_segmentation += segmentation[i] ;
if(checksum_segmentation == CHECKSUM_SEGMENTATION)
printf("CHECKSUM SEGMENTATION:\t\tSUCCESS [%d vs %d]\n", checksum_segmentation, CHECKSUM_SEGMENTATION) ;
else
printf("CHECKSUM SEGMENTATION:\t\tERROR [%d vs %d]\n", checksum_segmentation, CHECKSUM_SEGMENTATION) ;
#endif
vl_free(masses) ;
vl_free(centers) ;
vl_free(edgeMap) ;
/* ==================================== STAGE 4: ELIMINATE SMALL REGIONS =====================================*/
#ifdef PROFILING_LIB
reset_timer();
start_timer();
#endif
unsigned char *cleaned = vl_calloc(numPixels, sizeof(unsigned char)) ;
// vl_uint16 *segment = vl_malloc(sizeof(vl_uint16) * numPixels * 2) ;
vl_uint16 *seg = vl_malloc(sizeof(vl_uint16) * TEMP_SEG_BUFFER * 2 * NUM_THDS) ;
vl_size segmentSize ;
unsigned char label ;
unsigned char cleanedLabel ;
vl_size numExpanded ;
vl_index const dx [] = {+1, -1, 0, 0} ;
vl_index const dy [] = { 0, 0, +1, -1} ;
vl_index direction ;
vl_index pixel ;
//#pragma omp parallel for default (none) \
//shared(segmentation,cleaned,seg) \
//private(x,y,label,numExpanded,segmentSize,pixel,cleanedLabel,direction) \
//firstprivate(height,width,minRegionSize) \
//num_threads(NUM_THDS)
for (y = 0 ; y < (signed)height ; ++y) {
for (x = 0 ; x < (signed)width ; ++x) {
vl_index pixel = x + y * width ;
if (cleaned[pixel]) continue ;
label = segmentation[pixel] ;
numExpanded = 0;
segmentSize = 0;
vl_uint16 *segment = seg + 100 *
#ifdef OMP
omp_get_thread_num() ;
#else
get_core_id() ;
#endif
// segment[segmentSize++] = pixel ;
segment[segmentSize*2] = x ;
segment[segmentSize*2+1] = y ;
segmentSize++ ;
/*
* find cleanedLabel as the label of an already cleaned
* region neighbor of this pixel
*/
cleanedLabel = label + 1 ;
cleaned[pixel] = label + 1 ;
// x = pixel % width ;
// y = pixel / width ;
for (direction = 0 ; direction < 4 ; ++direction) {
vl_index xp = x + dx[direction] ;
vl_index yp = y + dy[direction] ;
vl_index neighbor = xp + yp * width ;
if (0 <= xp &&
xp < (signed)width &&
0 <= yp && yp < (signed)height &&
cleaned[neighbor]) {
cleanedLabel = cleaned[neighbor] ;
}
}
/* expand the segment */
while (numExpanded < segmentSize) {
// vl_size open = segment[numExpanded++] ;
// vl_index x_2 = open % width ;
// vl_index y_2 = open / width ;
vl_index x_2 = segment[numExpanded*2] ;
vl_index y_2 = segment[numExpanded*2+1] ;
numExpanded++ ;
for (direction = 0 ; direction < 4 ; ++direction) {
vl_index xp = x_2 + dx[direction] ;
vl_index yp = y_2 + dy[direction] ;
vl_index neighbor = xp + yp * width ;
if ( 0 <= xp && xp < (signed)width &&
0 <= yp && yp < (signed)height &&
cleaned[neighbor] == 0 &&
segmentation[neighbor] == label) {
cleaned[neighbor] = label + 1 ;
// segment[segmentSize++] = neighbor ;
segment[segmentSize*2] = xp ;
segment[segmentSize*2+1] = yp ;
segmentSize++;
}
}
}
/* change label to cleanedLabel if the segment is too small */
if (segmentSize < minRegionSize) {
while (segmentSize > 0) {
// cleaned[segment[--segmentSize]] = cleanedLabel ;
segmentSize-- ;
vl_index seg_id = segment[segmentSize*2] + segment[segmentSize*2+1]*width ;
cleaned[seg_id] = cleanedLabel ;
}
}
} // close x
} // close y
#ifdef OMP
#pragma omp parallel for default (none) \
shared(segmentation,cleaned) \
firstprivate(numPixels) \
num_threads(NUM_THDS)
#endif
/* restore base 0 indexing of the regions */
for (pixel = 0 ; pixel < (signed)numPixels ; ++pixel){
cleaned[pixel] -- ;
segmentation[pixel] = cleaned[pixel];
}
#ifdef PROFILING_LIB
stop_timer();
printf("STAGE 4:\t%d\n",get_time());
#endif
vl_free(cleaned) ;
vl_free(seg) ;
#ifdef LOG_SEGMENTATION_FINAL
for(i=0; i<numPixels; i++)
printf("%d\n", segmentation[i]) ;
#endif
#ifdef CHECKSUM_SEGMENTATION_FINAL
int checksum_segmentation_final = 0 ;
for(i=0; i<numPixels; i++)
checksum_segmentation_final += segmentation[i] ;
if(checksum_segmentation_final == CHECKSUM_SEGMENTATION_FINAL)
printf("CHECKSUM FINAL SEGMENTATION:\tSUCCESS [%d vs %d]\n", checksum_segmentation_final, CHECKSUM_SEGMENTATION_FINAL) ;
else
printf("CHECKSUM FINAL SEGMENTATION:\tERROR [%d vs %d]\n", checksum_segmentation_final, CHECKSUM_SEGMENTATION_FINAL) ;
#endif
}
|
calib.c | /* Copyright 2013-2016. The Regents of the University of California.
* Copyright 2016. Martin Uecker.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2012-2016 Martin Uecker <martin.uecker@med.uni-goettingen.de>
* 2013 Dara Bahri <dbahri123@gmail.com>
* 2015-2016 Siddharth Iyer <sid8795@gmail.com>
*
*
* Uecker M, Lai P, Murphy MJ, Virtue P, Elad M, Pauly JM, Vasanawala SS, Lustig M.
* ESPIRiT - An Eigenvalue Approach to Autocalibrating Parallel MRI: Where SENSE
* meets GRAPPA. Magn Reson Med, 71:990-1001 (2014)
*
* Iyer S, Ong F, Lustig M.
* Towards A Parameter Free ESPIRiT: Soft-Weighting For Robust Coil Sensitivity Estimation.
* Presented in the session: "New Frontiers In Image Reconstruction" at ISMRM 2016.
* http://www.ismrm.org/16/program_files/O86.htm
*
*/
#include <assert.h>
#include <complex.h>
#include <math.h>
#include <stdbool.h>
#include "num/multind.h"
#include "num/fft.h"
#include "num/flpmath.h"
#include "num/linalg.h"
#include "num/lapack.h"
#include "num/casorati.h"
#include "num/rand.h"
#include "misc/misc.h"
#include "misc/mri.h"
#include "misc/resize.h"
#include "misc/debug.h"
#include "misc/utils.h"
#include "calib/calmat.h"
#include "calib/cc.h"
#include "calib/softweight.h"
#include "calib.h"
#ifdef USE_CUDA
#include "calib/calibcu.h"
#endif
#if 0
#define CALMAT_SVD
#endif
#if 0
#define FLIP
#endif
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
static void eigen_herm3(int M, int N, float val[M], complex float matrix[N][N]) // ordering might be different to herm2
{
complex float mout[M][N];
for (int li = 0; li < N; li++)
for (int lj = 0; lj < li; lj++)
matrix[lj][li] = conj(matrix[li][lj]);
//mat_identity(M, N, mout);
orthiter(M, N, 30, val, mout, matrix);
for (int i = 0; i < M; i++)
for (int j = 0; j < N; j++)
matrix[i][j] = mout[i][j];
}
static float scurve(float x)
{
if (x <= -1.)
return 0.;
if (x >= 1.)
return 1.;
return 0.5 * (1. + 2. * x / (1. + powf(x, 2.)));
}
static float crop_weight_function(float crth, float val)
{
return scurve((sqrtf(val) - crth) / (1. - crth));
}
static float crop_thresh_function(float crth, float val)
{
return (val <= crth) ? 0. : 1.;
}
typedef float (*weight_function)(float crth, float val);
static void crop_weight(int N, const long dims[N], complex float* ptr, weight_function fun, float crth, const complex float* map)
{
long xx = dims[0];
long yy = dims[1];
long zz = dims[2];
long cc = dims[3];
long mm = dims[4];
assert(N >= 5);
assert(1 == md_calc_size(N - 5, dims + 5));
for (long m = 0; m < mm; m++) {
#pragma omp parallel for
for (long k = 0; k < zz; k++) {
for (long i = 0; i < yy; i++) {
for (long j = 0; j < xx; j++) {
float val = cabsf(map[((m * zz + k) * yy + i) * xx + j]);
for (long c = 0; c < cc; c++)
ptr[(((m * cc + c) * zz + k) * yy + i) * xx + j] *= fun(crth, val);
}
}
}
}
}
void crop_sens(const long dims[DIMS], complex float* ptr, bool soft, float crth, const complex float* map)
{
crop_weight(DIMS, dims, ptr, soft ? crop_weight_function : crop_thresh_function, crth, map);
}
/**
* sure_crop - This determines the crop-threshold to use as described in the talk: "Towards A Parameter
* Free ESPIRiT: Soft-Weighting For Robust Coil Sensitivity Estimation". This was given at the
* session: "New Frontiers In Image Reconstruction" at ISMRM 2016.
*
* Parameters:
* var - Estimated variance in data.
* evec_dims - The eigenvector dimensions.
* evec_data - The eigenvectors.
* eptr - The eigenvalues.
* calreg_dims - Dimension of the calibration region.
* calreg - Calibration data.
*/
static float sure_crop(float var, const long evec_dims[DIMS], complex float* evec_data, complex float* eptr, const long calreg_dims[DIMS], const complex float* calreg)
{
assert(1 == md_calc_size(DIMS - 5, evec_dims + 5));
assert(1 == md_calc_size(DIMS - 5, calreg_dims + 5));
long num_maps = evec_dims[4];
// Construct low-resolution image
long im_dims[5];
md_select_dims(5, 15, im_dims, evec_dims);
complex float* im = md_alloc_sameplace(5, im_dims, CFL_SIZE, calreg);
md_clear(5, im_dims, im, CFL_SIZE);
md_resize_center(5, im_dims, im, calreg_dims, calreg, CFL_SIZE);
ifftuc(5, im_dims, FFT_FLAGS, im, im);
// Temporary vector for crop dimensions
long cropdims[5];
md_select_dims(5, 15, cropdims, calreg_dims);
cropdims[4] = num_maps;
// Eigenvectors (M)
complex float* M = md_alloc_sameplace(5, evec_dims, CFL_SIZE, calreg);
md_copy(5, evec_dims, M, evec_data, CFL_SIZE);
// Temporary eigenvector holder to hold low resolution maps
complex float* LM = md_alloc_sameplace(5, evec_dims, CFL_SIZE, calreg);
// Temporary holder for projection calreg
complex float* TC = md_alloc_sameplace(5, calreg_dims, CFL_SIZE, calreg);
// Temporary holder to hold low resolution calib maps
complex float* CM = md_alloc_sameplace(5, cropdims, CFL_SIZE, calreg);
// Eigenvalues (W)
long W_dims[5];
md_select_dims(5, 23, W_dims, evec_dims);
complex float* W = md_alloc_sameplace(5, W_dims, CFL_SIZE, calreg);
md_copy(5, W_dims, W, eptr, CFL_SIZE);
// Place holder for the inner product result
complex float* ip = md_alloc_sameplace(5, W_dims, CFL_SIZE, calreg);
// Place holder for the projection result
complex float* proj = md_alloc_sameplace(5, im_dims, CFL_SIZE, calreg);
// Place holder for divergence term
long div_dims[5] = MD_INIT_ARRAY(5, 1);
complex float* div = md_alloc_sameplace(5, div_dims, CFL_SIZE, calreg);
// Calculating strides.
long str1_ip[5];
long str2_ip[5];
long stro_ip[5];
md_calc_strides(5, str1_ip, im_dims, CFL_SIZE);
md_calc_strides(5, str2_ip, evec_dims, CFL_SIZE);
md_calc_strides(5, stro_ip, W_dims, CFL_SIZE);
long str1_proj[5];
long str2_proj[5];
long stro_proj[5];
md_calc_strides(5, str1_proj, W_dims, CFL_SIZE);
md_calc_strides(5, str2_proj, evec_dims, CFL_SIZE);
md_calc_strides(5, stro_proj, im_dims, CFL_SIZE);
long str1_div[5];
long str2_div[5];
long stro_div[5];
md_calc_strides(5, str1_div, evec_dims, CFL_SIZE);
md_calc_strides(5, str2_div, evec_dims, CFL_SIZE);
md_calc_strides(5, stro_div, div_dims, CFL_SIZE);
long tdims_ip[5];
long tdims_proj[5];
for (int i = 0; i < 5; i++) {
assert((im_dims[i] == evec_dims[i]) || (1 == im_dims[i]) || (1 == evec_dims[i]));
assert((W_dims[i] == evec_dims[i]) || (1 == W_dims[i]) || (1 == evec_dims[i]));
tdims_ip[i] = (1 == im_dims[i]) ? evec_dims[i] : im_dims[i];
tdims_proj[i] = (1 == W_dims[i]) ? evec_dims[i] : W_dims[i];
}
// Starting parameter sweep with SURE.
float mse = -1.;
float old_mse = 0.;
float s = -0.1;
float c = 0.99;
long ctr1 = 0;
long ctr2 = 0;
debug_printf(DP_INFO, "---------------------------------------------\n");
debug_printf(DP_INFO, "| CTR1 | CTR2 | Crop | Est. MSE |\n");
debug_printf(DP_INFO, "---------------------------------------------\n");
while (fabs(s) > 1.E-4) {
ctr1++;
while ( (c < 0.999)
&& (c > 0.001)
&& ( (ctr2 <= 1)
|| (mse < old_mse))) {
ctr2++;
md_clear(5, W_dims, ip, CFL_SIZE);
md_clear(5, im_dims, proj, CFL_SIZE);
md_clear(5, div_dims, div, CFL_SIZE);
md_clear(5, evec_dims, M, CFL_SIZE);
md_clear(5, evec_dims, LM, CFL_SIZE);
md_clear(5, calreg_dims, TC, CFL_SIZE);
md_copy(5, evec_dims, M, evec_data, CFL_SIZE);
old_mse = mse;
mse = 0.;
crop_weight(5, evec_dims, M, crop_thresh_function, c, W);
md_zfmacc2(5, tdims_ip, stro_ip, ip, str1_ip, im, str2_ip, M); // Projection.
md_zfmac2(5, tdims_proj, stro_proj, proj, str1_proj, ip, str2_proj, M);
fftuc(5, im_dims, FFT_FLAGS, proj, proj); // Low res proj img.
md_resize_center(5, calreg_dims, TC, im_dims, proj, CFL_SIZE);
md_resize_center(5, im_dims, proj, calreg_dims, TC, CFL_SIZE);
ifftuc(5, im_dims, FFT_FLAGS, proj, proj);
for (long jdx = 0; jdx < md_calc_size(5, im_dims); jdx++)
mse += powf(cabsf(im[jdx] - proj[jdx]), 2.);
fftuc(5, evec_dims, FFT_FLAGS, LM, M); // low-res maps .
md_resize_center(5, cropdims, CM, evec_dims, LM, CFL_SIZE);
md_resize_center(5, evec_dims, LM, cropdims, CM, CFL_SIZE);
ifftuc(5, evec_dims, FFT_FLAGS, LM, LM);
md_zfmacc2(5, evec_dims, stro_div, div, str1_div, LM, str2_div, LM); // Calc SURE div using low res maps.
mse += 2. * var * crealf(*div);
if (ctr2 == 1)
debug_printf(DP_INFO, "| %4ld | %4ld | %0.4f | %0.12e |\n", ctr1, ctr2, c, mse);
else
debug_printf(DP_INFO, "| | %4ld | %0.4f | %0.12e |\n", ctr2, c, mse);
c = c + s;
}
c -= s;
ctr2 = 0;
s = -s / 2;
c += s;
}
c = c + s;
debug_printf(DP_INFO, "---------------------------------------------\n");
md_free(im);
md_free(TC);
md_free(CM);
md_free(M);
md_free(LM);
md_free(W);
md_free(ip);
md_free(proj);
md_free(div);
debug_printf(DP_DEBUG1, "Calculated c: %.4f\n", c);
return c;
}
void calone(const struct ecalib_conf* conf, const long cov_dims[4], complex float* imgcov, int SN, float svals[SN], const long calreg_dims[DIMS], const complex float* data)
{
assert(1 == md_calc_size(DIMS - 5, calreg_dims + 5));
#if 1
long nskerns_dims[5];
complex float* nskerns;
compute_kernels(conf, nskerns_dims, &nskerns, SN, svals, calreg_dims, data);
#else
long channels = calreg_dims[3];
long kx = conf->kdims[0];
long ky = conf->kdims[1];
long kz = conf->kdims[2];
long nskerns_dims[5] = { kx, ky, kz, channels, 0 };
long N = md_calc_size(4, nskerns_dims);
assert(N > 0);
nskerns_dims[4] = N;
complex float* nskerns = md_alloc(5, nskerns_dims, CFL_SIZE);
long nr_kernels = channels;
nskerns_dims[4] = channels;
spirit_kernel(nskerns_dims, nskerns, calreg_dims, data);
#endif
compute_imgcov(cov_dims, imgcov, nskerns_dims, nskerns);
md_free(nskerns);
}
/* calculate point-wise maps
*
*/
void eigenmaps(const long out_dims[DIMS], complex float* optr, complex float* eptr, const complex float* imgcov2, const long msk_dims[3], const bool* msk, bool orthiter, bool ecal_usegpu)
{
#ifdef USE_CUDA
if (ecal_usegpu) {
//FIXME cuda version should be able to return sensitivities for a subset of image-space points
assert(!msk);
eigenmapscu(out_dims, optr, eptr, imgcov2);
return;
}
#else
assert(!ecal_usegpu);
#endif
long channels = out_dims[3];
long maps = out_dims[4];
assert(DIMS >= 5);
assert(1 == md_calc_size(DIMS - 5, out_dims + 5));
assert(maps <= channels);
long xx = out_dims[0];
long yy = out_dims[1];
long zz = out_dims[2];
float scale = 1.; // for some reason, not
if (msk_dims) {
assert(msk_dims[0] == xx);
assert(msk_dims[1] == yy);
assert(msk_dims[2] == zz);
}
md_clear(5, out_dims, optr, CFL_SIZE);
#pragma omp parallel for collapse(3)
for (long k = 0; k < zz; k++) {
for (long j = 0; j < yy; j++) {
for (long i = 0; i < xx; i++) {
if (!msk || msk[i + xx * (j + yy * k)]) {
float val[channels];
complex float cov[channels][channels];
complex float tmp[channels * (channels + 1) / 2];
for (long l = 0; l < channels * (channels + 1) / 2; l++)
tmp[l] = imgcov2[((l * zz + k) * yy + j) * xx + i] / scale;
unpack_tri_matrix(channels, cov, tmp);
if (orthiter)
eigen_herm3(maps, channels, val, cov);
else
lapack_eig(channels, val, cov);
for (long u = 0; u < maps; u++) {
long ru = (orthiter ? maps : channels) - 1 - u;
for (long v = 0; v < channels; v++)
optr[((((u * channels + v) * zz + k) * yy + j) * xx + i)] = cov[ru][v];
if (NULL != eptr)
eptr[((u * zz + k) * yy + j) * xx + i] = val[ru];
}
}
}
}
}
}
void caltwo(const struct ecalib_conf* conf, const long out_dims[DIMS], complex float* out_data, complex float* emaps, const long in_dims[4], complex float* in_data, const long msk_dims[3], const bool* msk)
{
long xx = out_dims[0];
long yy = out_dims[1];
long zz = out_dims[2];
long xh = in_dims[0];
long yh = in_dims[1];
long zh = in_dims[2];
long channels = out_dims[3];
long cosize = channels * (channels + 1) / 2;
assert(DIMS >= 5);
assert(1 == md_calc_size(DIMS - 5, out_dims + 5));
assert(in_dims[3] == cosize);
long cov_dims[4] = { xh, yh, zh, cosize };
long covbig_dims[4] = { xx, yy, zz, cosize };
assert(((xx == 1) && (xh == 1)) || (xx >= xh));
assert(((yy == 1) && (yh == 1)) || (yy >= yh));
assert(((zz == 1) && (zh == 1)) || (zz >= zh));
assert((1 == xh) || (0 == xh % 2));
assert((1 == yh) || (0 == yh % 2));
assert((1 == zh) || (0 == zh % 2));
complex float* imgcov2 = md_alloc(4, covbig_dims, CFL_SIZE);
debug_printf(DP_DEBUG1, "Resize...\n");
sinc_zeropad(4, covbig_dims, imgcov2, cov_dims, in_data);
debug_printf(DP_DEBUG1, "Point-wise eigen-decomposition...\n");
eigenmaps(out_dims, out_data, emaps, imgcov2, msk_dims, msk, conf->orthiter, conf->usegpu);
md_free(imgcov2);
}
void calone_dims(const struct ecalib_conf* conf, long cov_dims[4], long channels)
{
long kx = conf->kdims[0];
long ky = conf->kdims[1];
long kz = conf->kdims[2];
cov_dims[0] = (1 == kx) ? 1 : (2 * kx);
cov_dims[1] = (1 == ky) ? 1 : (2 * ky);
cov_dims[2] = (1 == kz) ? 1 : (2 * kz);
cov_dims[3] = channels * (channels + 1) / 2;
}
const struct ecalib_conf ecalib_defaults = { { 6, 6, 6 }, 0.001, -1, -1., false, false, 0.8, true, false, -1., false, true, -1., false };
void calib2(const struct ecalib_conf* conf, const long out_dims[DIMS], complex float* out_data, complex float* eptr, int SN, float svals[SN], const long calreg_dims[DIMS], const complex float* data, const long msk_dims[3], const bool* msk)
{
long channels = calreg_dims[3];
long maps = out_dims[4];
assert(calreg_dims[3] == out_dims[3]);
assert(maps <= channels);
assert(1 == md_calc_size(DIMS - 5, out_dims + 5));
assert(1 == md_calc_size(DIMS - 5, calreg_dims + 5));
complex float rot[channels][channels];
if (conf->rotphase) {
// rotate the the phase with respect to the first principle component
long scc_dims[DIMS] = MD_INIT_ARRAY(DIMS, 1);
scc_dims[COIL_DIM] = channels;
scc_dims[MAPS_DIM] = channels;
scc(scc_dims, &rot[0][0], calreg_dims, data);
} else {
for (int i = 0; i < channels; i++)
for (int j = 0; j < channels; j++)
rot[i][j] = (i == j) ? 1. : 0.;
}
long cov_dims[4];
calone_dims(conf, cov_dims, channels);
complex float* imgcov = md_alloc(4, cov_dims, CFL_SIZE);
calone(conf, cov_dims, imgcov, SN, svals, calreg_dims, data);
caltwo(conf, out_dims, out_data, eptr, cov_dims, imgcov, msk_dims, msk);
/* Intensity and phase normalization similar as proposed
* for adaptive combine (Walsh's method) in
* Griswold et al., ISMRM 10:2410 (2002)
*/
if (conf->intensity) {
debug_printf(DP_DEBUG1, "Normalize...\n");
/* I think the reason this works is because inhomogeneity usually
* comes from only a few coil elements which are close. The l1-norm
* is more resilient against such outliers. -- Martin
*/
normalizel1(DIMS, COIL_FLAG, out_dims, out_data);
md_zsmul(DIMS, out_dims, out_data, out_data, sqrtf((float)channels));
}
float c = (conf->crop >= 0.) ? conf->crop : sure_crop(conf->var, out_dims, out_data, eptr, calreg_dims, data);
debug_printf(DP_DEBUG1, "Crop maps... (c = %.2f)\n", c);
crop_sens(out_dims, out_data, conf->softcrop, c, eptr);
debug_printf(DP_DEBUG1, "Fix phase...\n");
fixphase2(DIMS, out_dims, COIL_DIM, rot[0], out_data, out_data);
md_free(imgcov);
}
void calib(const struct ecalib_conf* conf, const long out_dims[DIMS], complex float* out_data, complex float* eptr, int SN, float svals[SN], const long calreg_dims[DIMS], const complex float* data)
{
calib2(conf, out_dims, out_data, eptr, SN, svals, calreg_dims, data, NULL, NULL);
}
static void perturb(const long dims[2], complex float* vecs, float amt)
{
complex float* noise = md_alloc(2, dims, CFL_SIZE);
md_gaussian_rand(2, dims, noise);
for (long j = 0; j < dims[1]; j++) {
float nrm = md_znorm(1, dims, noise + j * dims[0]);
complex float val = amt / nrm;
md_zsmul(1, dims, noise + j * dims[0], noise + j * dims[0], val);
}
md_zadd(2, dims, vecs, vecs, noise);
for (long j = 0; j < dims[1]; j++) {
float nrm = md_znorm(1, dims, vecs + j * dims[0]);
complex float val = 1 / nrm;
md_zsmul(1, dims, vecs + j * dims[0], vecs + j * dims[0], val);
}
md_free(noise);
}
static int number_of_kernels(const struct ecalib_conf* conf, int N, const float val[N])
{
int n = 0;
if (-1 != conf->numsv) {
n = conf->numsv;
assert(-1. == conf->percentsv);
assert(-1. == conf->threshold);
} else if (conf->percentsv != -1.) {
n = N * conf->percentsv / 100.;
assert(-1 == conf->numsv);
assert(-1. == conf->threshold);
} else {
assert(-1 == conf->numsv);
assert(-1. == conf->percentsv);
for (int i = 0; i < N; i++)
if (val[i] / val[0] > sqrtf(conf->threshold))
n++;
}
if (val[0] <= 0.)
error("No signal.\n");
debug_printf(DP_DEBUG1, "Using %d/%ld kernels (%.2f%%, last SV: %f%s).\n", n, N, (float)n / (float)N * 100., (n > 0) ? (val[n - 1] / val[0]) : 1., conf->weighting ? ", weighted" : "");
float tr = 0.;
for (int i = 0; i < N; i++) {
tr += powf(val[i], 2.);
debug_printf(DP_DEBUG3, "SVALS %f (%f)\n", val[i], val[i] / val[0]);
}
debug_printf(DP_DEBUG3, "\nTRACE: %f (%f)\n", tr, tr / (float)N);
assert(n <= N);
return n;
}
void compute_kernels(const struct ecalib_conf* conf, long nskerns_dims[5], complex float** nskerns_ptr, int SN, float val[SN], const long caldims[DIMS], const complex float* caldata)
{
assert(1 == md_calc_size(DIMS - 5, caldims + 5));
nskerns_dims[0] = conf->kdims[0];
nskerns_dims[1] = conf->kdims[1];
nskerns_dims[2] = conf->kdims[2];
nskerns_dims[3] = caldims[3];
long N = md_calc_size(4, nskerns_dims);
assert(N > 0);
nskerns_dims[4] = N;
complex float* nskerns = md_alloc(5, nskerns_dims, CFL_SIZE);
*nskerns_ptr = nskerns;
PTR_ALLOC(complex float[N][N], vec);
assert(NULL != val);
assert(SN == N);
debug_printf(DP_DEBUG1, "Build calibration matrix and SVD...\n");
#ifdef CALMAT_SVD
calmat_svd(conf->kdims, N, *vec, val, caldims, caldata);
if (conf->weighting)
soft_weight_singular_vectors(N, conf->var, conf->kdims, caldims, val, val);
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++)
#ifndef FLIP
nskerns[i * N + j] = ((*vec)[j][i]) * (conf->weighting ? val[i] : 1.);
#else
nskerns[i * N + j] = ((*vec)[j][N - 1 - i]) * (conf->weighting ? val[N - 1 - i] : 1.);
#endif
#else
covariance_function(conf->kdims, N, *vec, caldims, caldata);
debug_printf(DP_DEBUG1, "Eigen decomposition... (size: %ld)\n", N);
// we could apply Nystroem method here to speed it up
float tmp_val[N];
lapack_eig(N, tmp_val, *vec);
// reverse and square root, test for smaller null to avoid NaNs
for (int i = 0; i < N; i++)
val[i] = (tmp_val[N - 1 - i] < 0.) ? 0. : sqrtf(tmp_val[N - 1 - i]);
if (conf->weighting)
soft_weight_singular_vectors(N, conf-> var, conf->kdims, caldims, val, val);
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++)
#ifndef FLIP
nskerns[i * N + j] = (*vec)[N - 1 - i][j] * (conf->weighting ? val[i] : 1.); // flip
#else
nskerns[i * N + j] = (*vec)[i][j] * (conf->weighting ? val[N - 1 - i] : 1.); // flip
#endif
#endif
if (conf->perturb > 0.) {
long dims[2] = { N, N };
perturb(dims, nskerns, conf->perturb);
}
#ifndef FLIP
nskerns_dims[4] = number_of_kernels(conf, N, val);
#else
nskerns_dims[4] = N - number_of_kernels(conf, N, val);
#endif
PTR_FREE(vec);
}
void compute_imgcov(const long cov_dims[4], complex float* imgcov, const long nskerns_dims[5], const complex float* nskerns)
{
debug_printf(DP_DEBUG1, "Zeropad...\n");
long xh = cov_dims[0];
long yh = cov_dims[1];
long zh = cov_dims[2];
long kx = nskerns_dims[0];
long ky = nskerns_dims[1];
long kz = nskerns_dims[2];
long channels = nskerns_dims[3];
long nr_kernels = nskerns_dims[4];
long imgkern_dims[5] = { xh, yh, zh, channels, nr_kernels };
complex float* imgkern1 = md_alloc(5, imgkern_dims, CFL_SIZE);
complex float* imgkern2 = md_alloc(5, imgkern_dims, CFL_SIZE);
md_resize_center(5, imgkern_dims, imgkern1, nskerns_dims, nskerns, CFL_SIZE);
// resort array
debug_printf(DP_DEBUG1, "FFT (juggling)...\n");
long istr[5];
long mstr[5];
long idim[5] = { xh, yh, zh, channels, nr_kernels };
long mdim[5] = { nr_kernels, channels, xh, yh, zh };
md_calc_strides(5, istr, idim, CFL_SIZE);
md_calc_strides(5, mstr, mdim, CFL_SIZE);
long m2str[5] = { mstr[2], mstr[3], mstr[4], mstr[1], mstr[0] };
ifftmod(5, imgkern_dims, FFT_FLAGS, imgkern1, imgkern1);
ifft2(5, imgkern_dims, FFT_FLAGS, m2str, imgkern2, istr, imgkern1);
float scalesq = (kx * ky * kz) * (xh * yh * zh); // second part for FFT scaling
md_free(imgkern1);
debug_printf(DP_DEBUG1, "Calculate Gram matrix...\n");
int cosize = channels * (channels + 1) / 2;
assert(cov_dims[3] == cosize);
#pragma omp parallel for collapse(3)
for (int k = 0; k < zh; k++) {
for (int j = 0; j < yh; j++) {
for (int i = 0; i < xh; i++) {
complex float gram[cosize];
gram_matrix2(channels, gram, nr_kernels, (const complex float (*)[nr_kernels])(imgkern2 + ((k * yh + j) * xh + i) * (channels * nr_kernels)));
#ifdef FLIP
// add (scaled) identity matrix
for (int i = 0, l = 0; i < channels; i++)
for (int j = 0; j <= i; j++, l++)
gram[l] = ((i == j) ? (kx * ky * kz) : 0.) - gram[l];
#endif
for (int l = 0; l < cosize; l++)
imgcov[(((l * zh) + k) * yh + j) * xh + i] = gram[l] / scalesq;
}
}
}
md_free(imgkern2);
}
|
convolution_7x7_pack1to4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv7x7s2_pack1to4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out0 = top_blob.channel(p);
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f);
out0.fill(_bias0);
for (int q=0; q<inch; q++)
{
float* outptr0 = out0.row(0);
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* r3 = img0.row(3);
const float* r4 = img0.row(4);
const float* r5 = img0.row(5);
const float* r6 = img0.row(6);
const float* kptr = (const float*)kernel.channel(p).row(q);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
#if __aarch64__
for (; j+7<outw; j+=8)
{
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%0], #64 \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"// r0
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v16.4s, v24.4s, v0.s[0] \n"
"fmla v17.4s, v24.4s, v0.s[2] \n"
"fmla v18.4s, v24.4s, v1.s[0] \n"
"fmla v19.4s, v24.4s, v1.s[2] \n"
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0] \n"
"fmla v20.4s, v24.4s, v2.s[0] \n"
"fmla v21.4s, v24.4s, v2.s[2] \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[2] \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v4.4s, v5.4s}, [%1] \n"
"fmla v16.4s, v25.4s, v0.s[1] \n"
"fmla v17.4s, v25.4s, v0.s[3] \n"
"fmla v18.4s, v25.4s, v1.s[1] \n"
"fmla v19.4s, v25.4s, v1.s[3] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[3] \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"fmla v23.4s, v25.4s, v3.s[3] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v26.4s, v0.s[2] \n"
"fmla v17.4s, v26.4s, v1.s[0] \n"
"fmla v18.4s, v26.4s, v1.s[2] \n"
"fmla v19.4s, v26.4s, v2.s[0] \n"
"fmla v20.4s, v26.4s, v2.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[0] \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[0] \n"
"fmla v16.4s, v27.4s, v0.s[3] \n"
"fmla v17.4s, v27.4s, v1.s[1] \n"
"fmla v18.4s, v27.4s, v1.s[3] \n"
"fmla v19.4s, v27.4s, v2.s[1] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[1] \n"
"fmla v22.4s, v27.4s, v3.s[3] \n"
"fmla v23.4s, v27.4s, v4.s[1] \n"
"fmla v16.4s, v28.4s, v1.s[0] \n"
"fmla v17.4s, v28.4s, v1.s[2] \n"
"fmla v18.4s, v28.4s, v2.s[0] \n"
"fmla v19.4s, v28.4s, v2.s[2] \n"
"fmla v20.4s, v28.4s, v3.s[0] \n"
"fmla v21.4s, v28.4s, v3.s[2] \n"
"fmla v22.4s, v28.4s, v4.s[0] \n"
"fmla v23.4s, v28.4s, v4.s[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v16.4s, v29.4s, v1.s[1] \n"
"fmla v17.4s, v29.4s, v1.s[3] \n"
"fmla v18.4s, v29.4s, v2.s[1] \n"
"fmla v19.4s, v29.4s, v2.s[3] \n"
"fmla v20.4s, v29.4s, v3.s[1] \n"
"fmla v21.4s, v29.4s, v3.s[3] \n"
"fmla v22.4s, v29.4s, v4.s[1] \n"
"fmla v23.4s, v29.4s, v4.s[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%2], #64 \n"// r1
"fmla v16.4s, v30.4s, v1.s[2] \n"
"fmla v17.4s, v30.4s, v2.s[0] \n"
"fmla v18.4s, v30.4s, v2.s[2] \n"
"fmla v19.4s, v30.4s, v3.s[0] \n"
"fmla v20.4s, v30.4s, v3.s[2] \n"
"fmla v21.4s, v30.4s, v4.s[0] \n"
"fmla v22.4s, v30.4s, v4.s[2] \n"
"fmla v23.4s, v30.4s, v5.s[0] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v10.4s, v11.4s}, [%2] \n"
"fmla v16.4s, v24.4s, v6.s[0] \n"
"fmla v17.4s, v24.4s, v6.s[2] \n"
"fmla v18.4s, v24.4s, v7.s[0] \n"
"fmla v19.4s, v24.4s, v7.s[2] \n"
"fmla v20.4s, v24.4s, v8.s[0] \n"
"fmla v21.4s, v24.4s, v8.s[2] \n"
"fmla v22.4s, v24.4s, v9.s[0] \n"
"fmla v23.4s, v24.4s, v9.s[2] \n"
"fmla v16.4s, v25.4s, v6.s[1] \n"
"fmla v17.4s, v25.4s, v6.s[3] \n"
"fmla v18.4s, v25.4s, v7.s[1] \n"
"fmla v19.4s, v25.4s, v7.s[3] \n"
"fmla v20.4s, v25.4s, v8.s[1] \n"
"fmla v21.4s, v25.4s, v8.s[3] \n"
"fmla v22.4s, v25.4s, v9.s[1] \n"
"fmla v23.4s, v25.4s, v9.s[3] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v26.4s, v6.s[2] \n"
"fmla v17.4s, v26.4s, v7.s[0] \n"
"fmla v18.4s, v26.4s, v7.s[2] \n"
"fmla v19.4s, v26.4s, v8.s[0] \n"
"fmla v20.4s, v26.4s, v8.s[2] \n"
"fmla v21.4s, v26.4s, v9.s[0] \n"
"fmla v22.4s, v26.4s, v9.s[2] \n"
"fmla v23.4s, v26.4s, v10.s[0] \n"
"fmla v16.4s, v27.4s, v6.s[3] \n"
"fmla v17.4s, v27.4s, v7.s[1] \n"
"fmla v18.4s, v27.4s, v7.s[3] \n"
"fmla v19.4s, v27.4s, v8.s[1] \n"
"fmla v20.4s, v27.4s, v8.s[3] \n"
"fmla v21.4s, v27.4s, v9.s[1] \n"
"fmla v22.4s, v27.4s, v9.s[3] \n"
"fmla v23.4s, v27.4s, v10.s[1] \n"
"fmla v16.4s, v28.4s, v7.s[0] \n"
"fmla v17.4s, v28.4s, v7.s[2] \n"
"fmla v18.4s, v28.4s, v8.s[0] \n"
"fmla v19.4s, v28.4s, v8.s[2] \n"
"fmla v20.4s, v28.4s, v9.s[0] \n"
"fmla v21.4s, v28.4s, v9.s[2] \n"
"fmla v22.4s, v28.4s, v10.s[0] \n"
"fmla v23.4s, v28.4s, v10.s[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v16.4s, v29.4s, v7.s[1] \n"
"fmla v17.4s, v29.4s, v7.s[3] \n"
"fmla v18.4s, v29.4s, v8.s[1] \n"
"fmla v19.4s, v29.4s, v8.s[3] \n"
"fmla v20.4s, v29.4s, v9.s[1] \n"
"fmla v21.4s, v29.4s, v9.s[3] \n"
"fmla v22.4s, v29.4s, v10.s[1] \n"
"fmla v23.4s, v29.4s, v10.s[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"// r2
"fmla v16.4s, v30.4s, v7.s[2] \n"
"fmla v17.4s, v30.4s, v8.s[0] \n"
"fmla v18.4s, v30.4s, v8.s[2] \n"
"fmla v19.4s, v30.4s, v9.s[0] \n"
"fmla v20.4s, v30.4s, v9.s[2] \n"
"fmla v21.4s, v30.4s, v10.s[0] \n"
"fmla v22.4s, v30.4s, v10.s[2] \n"
"fmla v23.4s, v30.4s, v11.s[0] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v4.4s, v5.4s}, [%3] \n"
"fmla v16.4s, v24.4s, v0.s[0] \n"
"fmla v17.4s, v24.4s, v0.s[2] \n"
"fmla v18.4s, v24.4s, v1.s[0] \n"
"fmla v19.4s, v24.4s, v1.s[2] \n"
"fmla v20.4s, v24.4s, v2.s[0] \n"
"fmla v21.4s, v24.4s, v2.s[2] \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[2] \n"
"fmla v16.4s, v25.4s, v0.s[1] \n"
"fmla v17.4s, v25.4s, v0.s[3] \n"
"fmla v18.4s, v25.4s, v1.s[1] \n"
"fmla v19.4s, v25.4s, v1.s[3] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[3] \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"fmla v23.4s, v25.4s, v3.s[3] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v26.4s, v0.s[2] \n"
"fmla v17.4s, v26.4s, v1.s[0] \n"
"fmla v18.4s, v26.4s, v1.s[2] \n"
"fmla v19.4s, v26.4s, v2.s[0] \n"
"fmla v20.4s, v26.4s, v2.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[0] \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[0] \n"
"fmla v16.4s, v27.4s, v0.s[3] \n"
"fmla v17.4s, v27.4s, v1.s[1] \n"
"fmla v18.4s, v27.4s, v1.s[3] \n"
"fmla v19.4s, v27.4s, v2.s[1] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[1] \n"
"fmla v22.4s, v27.4s, v3.s[3] \n"
"fmla v23.4s, v27.4s, v4.s[1] \n"
"fmla v16.4s, v28.4s, v1.s[0] \n"
"fmla v17.4s, v28.4s, v1.s[2] \n"
"fmla v18.4s, v28.4s, v2.s[0] \n"
"fmla v19.4s, v28.4s, v2.s[2] \n"
"fmla v20.4s, v28.4s, v3.s[0] \n"
"fmla v21.4s, v28.4s, v3.s[2] \n"
"fmla v22.4s, v28.4s, v4.s[0] \n"
"fmla v23.4s, v28.4s, v4.s[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v16.4s, v29.4s, v1.s[1] \n"
"fmla v17.4s, v29.4s, v1.s[3] \n"
"fmla v18.4s, v29.4s, v2.s[1] \n"
"fmla v19.4s, v29.4s, v2.s[3] \n"
"fmla v20.4s, v29.4s, v3.s[1] \n"
"fmla v21.4s, v29.4s, v3.s[3] \n"
"fmla v22.4s, v29.4s, v4.s[1] \n"
"fmla v23.4s, v29.4s, v4.s[3] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%4], #64 \n"// r3
"fmla v16.4s, v30.4s, v1.s[2] \n"
"fmla v17.4s, v30.4s, v2.s[0] \n"
"fmla v18.4s, v30.4s, v2.s[2] \n"
"fmla v19.4s, v30.4s, v3.s[0] \n"
"fmla v20.4s, v30.4s, v3.s[2] \n"
"fmla v21.4s, v30.4s, v4.s[0] \n"
"fmla v22.4s, v30.4s, v4.s[2] \n"
"fmla v23.4s, v30.4s, v5.s[0] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v10.4s, v11.4s}, [%4] \n"
"fmla v16.4s, v24.4s, v6.s[0] \n"
"fmla v17.4s, v24.4s, v6.s[2] \n"
"fmla v18.4s, v24.4s, v7.s[0] \n"
"fmla v19.4s, v24.4s, v7.s[2] \n"
"fmla v20.4s, v24.4s, v8.s[0] \n"
"fmla v21.4s, v24.4s, v8.s[2] \n"
"fmla v22.4s, v24.4s, v9.s[0] \n"
"fmla v23.4s, v24.4s, v9.s[2] \n"
"fmla v16.4s, v25.4s, v6.s[1] \n"
"fmla v17.4s, v25.4s, v6.s[3] \n"
"fmla v18.4s, v25.4s, v7.s[1] \n"
"fmla v19.4s, v25.4s, v7.s[3] \n"
"fmla v20.4s, v25.4s, v8.s[1] \n"
"fmla v21.4s, v25.4s, v8.s[3] \n"
"fmla v22.4s, v25.4s, v9.s[1] \n"
"fmla v23.4s, v25.4s, v9.s[3] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v26.4s, v6.s[2] \n"
"fmla v17.4s, v26.4s, v7.s[0] \n"
"fmla v18.4s, v26.4s, v7.s[2] \n"
"fmla v19.4s, v26.4s, v8.s[0] \n"
"fmla v20.4s, v26.4s, v8.s[2] \n"
"fmla v21.4s, v26.4s, v9.s[0] \n"
"fmla v22.4s, v26.4s, v9.s[2] \n"
"fmla v23.4s, v26.4s, v10.s[0] \n"
"fmla v16.4s, v27.4s, v6.s[3] \n"
"fmla v17.4s, v27.4s, v7.s[1] \n"
"fmla v18.4s, v27.4s, v7.s[3] \n"
"fmla v19.4s, v27.4s, v8.s[1] \n"
"fmla v20.4s, v27.4s, v8.s[3] \n"
"fmla v21.4s, v27.4s, v9.s[1] \n"
"fmla v22.4s, v27.4s, v9.s[3] \n"
"fmla v23.4s, v27.4s, v10.s[1] \n"
"fmla v16.4s, v28.4s, v7.s[0] \n"
"fmla v17.4s, v28.4s, v7.s[2] \n"
"fmla v18.4s, v28.4s, v8.s[0] \n"
"fmla v19.4s, v28.4s, v8.s[2] \n"
"fmla v20.4s, v28.4s, v9.s[0] \n"
"fmla v21.4s, v28.4s, v9.s[2] \n"
"fmla v22.4s, v28.4s, v10.s[0] \n"
"fmla v23.4s, v28.4s, v10.s[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v16.4s, v29.4s, v7.s[1] \n"
"fmla v17.4s, v29.4s, v7.s[3] \n"
"fmla v18.4s, v29.4s, v8.s[1] \n"
"fmla v19.4s, v29.4s, v8.s[3] \n"
"fmla v20.4s, v29.4s, v9.s[1] \n"
"fmla v21.4s, v29.4s, v9.s[3] \n"
"fmla v22.4s, v29.4s, v10.s[1] \n"
"fmla v23.4s, v29.4s, v10.s[3] \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"// r4
"fmla v16.4s, v30.4s, v7.s[2] \n"
"fmla v17.4s, v30.4s, v8.s[0] \n"
"fmla v18.4s, v30.4s, v8.s[2] \n"
"fmla v19.4s, v30.4s, v9.s[0] \n"
"fmla v20.4s, v30.4s, v9.s[2] \n"
"fmla v21.4s, v30.4s, v10.s[0] \n"
"fmla v22.4s, v30.4s, v10.s[2] \n"
"fmla v23.4s, v30.4s, v11.s[0] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v4.4s, v5.4s}, [%5] \n"
"fmla v16.4s, v24.4s, v0.s[0] \n"
"fmla v17.4s, v24.4s, v0.s[2] \n"
"fmla v18.4s, v24.4s, v1.s[0] \n"
"fmla v19.4s, v24.4s, v1.s[2] \n"
"fmla v20.4s, v24.4s, v2.s[0] \n"
"fmla v21.4s, v24.4s, v2.s[2] \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[2] \n"
"fmla v16.4s, v25.4s, v0.s[1] \n"
"fmla v17.4s, v25.4s, v0.s[3] \n"
"fmla v18.4s, v25.4s, v1.s[1] \n"
"fmla v19.4s, v25.4s, v1.s[3] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[3] \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"fmla v23.4s, v25.4s, v3.s[3] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v26.4s, v0.s[2] \n"
"fmla v17.4s, v26.4s, v1.s[0] \n"
"fmla v18.4s, v26.4s, v1.s[2] \n"
"fmla v19.4s, v26.4s, v2.s[0] \n"
"fmla v20.4s, v26.4s, v2.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[0] \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[0] \n"
"fmla v16.4s, v27.4s, v0.s[3] \n"
"fmla v17.4s, v27.4s, v1.s[1] \n"
"fmla v18.4s, v27.4s, v1.s[3] \n"
"fmla v19.4s, v27.4s, v2.s[1] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[1] \n"
"fmla v22.4s, v27.4s, v3.s[3] \n"
"fmla v23.4s, v27.4s, v4.s[1] \n"
"fmla v16.4s, v28.4s, v1.s[0] \n"
"fmla v17.4s, v28.4s, v1.s[2] \n"
"fmla v18.4s, v28.4s, v2.s[0] \n"
"fmla v19.4s, v28.4s, v2.s[2] \n"
"fmla v20.4s, v28.4s, v3.s[0] \n"
"fmla v21.4s, v28.4s, v3.s[2] \n"
"fmla v22.4s, v28.4s, v4.s[0] \n"
"fmla v23.4s, v28.4s, v4.s[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v16.4s, v29.4s, v1.s[1] \n"
"fmla v17.4s, v29.4s, v1.s[3] \n"
"fmla v18.4s, v29.4s, v2.s[1] \n"
"fmla v19.4s, v29.4s, v2.s[3] \n"
"fmla v20.4s, v29.4s, v3.s[1] \n"
"fmla v21.4s, v29.4s, v3.s[3] \n"
"fmla v22.4s, v29.4s, v4.s[1] \n"
"fmla v23.4s, v29.4s, v4.s[3] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%6], #64 \n"// r5
"fmla v16.4s, v30.4s, v1.s[2] \n"
"fmla v17.4s, v30.4s, v2.s[0] \n"
"fmla v18.4s, v30.4s, v2.s[2] \n"
"fmla v19.4s, v30.4s, v3.s[0] \n"
"fmla v20.4s, v30.4s, v3.s[2] \n"
"fmla v21.4s, v30.4s, v4.s[0] \n"
"fmla v22.4s, v30.4s, v4.s[2] \n"
"fmla v23.4s, v30.4s, v5.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v10.4s, v11.4s}, [%6] \n"
"fmla v16.4s, v24.4s, v6.s[0] \n"
"fmla v17.4s, v24.4s, v6.s[2] \n"
"fmla v18.4s, v24.4s, v7.s[0] \n"
"fmla v19.4s, v24.4s, v7.s[2] \n"
"fmla v20.4s, v24.4s, v8.s[0] \n"
"fmla v21.4s, v24.4s, v8.s[2] \n"
"fmla v22.4s, v24.4s, v9.s[0] \n"
"fmla v23.4s, v24.4s, v9.s[2] \n"
"fmla v16.4s, v25.4s, v6.s[1] \n"
"fmla v17.4s, v25.4s, v6.s[3] \n"
"fmla v18.4s, v25.4s, v7.s[1] \n"
"fmla v19.4s, v25.4s, v7.s[3] \n"
"fmla v20.4s, v25.4s, v8.s[1] \n"
"fmla v21.4s, v25.4s, v8.s[3] \n"
"fmla v22.4s, v25.4s, v9.s[1] \n"
"fmla v23.4s, v25.4s, v9.s[3] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v26.4s, v6.s[2] \n"
"fmla v17.4s, v26.4s, v7.s[0] \n"
"fmla v18.4s, v26.4s, v7.s[2] \n"
"fmla v19.4s, v26.4s, v8.s[0] \n"
"fmla v20.4s, v26.4s, v8.s[2] \n"
"fmla v21.4s, v26.4s, v9.s[0] \n"
"fmla v22.4s, v26.4s, v9.s[2] \n"
"fmla v23.4s, v26.4s, v10.s[0] \n"
"fmla v16.4s, v27.4s, v6.s[3] \n"
"fmla v17.4s, v27.4s, v7.s[1] \n"
"fmla v18.4s, v27.4s, v7.s[3] \n"
"fmla v19.4s, v27.4s, v8.s[1] \n"
"fmla v20.4s, v27.4s, v8.s[3] \n"
"fmla v21.4s, v27.4s, v9.s[1] \n"
"fmla v22.4s, v27.4s, v9.s[3] \n"
"fmla v23.4s, v27.4s, v10.s[1] \n"
"fmla v16.4s, v28.4s, v7.s[0] \n"
"fmla v17.4s, v28.4s, v7.s[2] \n"
"fmla v18.4s, v28.4s, v8.s[0] \n"
"fmla v19.4s, v28.4s, v8.s[2] \n"
"fmla v20.4s, v28.4s, v9.s[0] \n"
"fmla v21.4s, v28.4s, v9.s[2] \n"
"fmla v22.4s, v28.4s, v10.s[0] \n"
"fmla v23.4s, v28.4s, v10.s[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v16.4s, v29.4s, v7.s[1] \n"
"fmla v17.4s, v29.4s, v7.s[3] \n"
"fmla v18.4s, v29.4s, v8.s[1] \n"
"fmla v19.4s, v29.4s, v8.s[3] \n"
"fmla v20.4s, v29.4s, v9.s[1] \n"
"fmla v21.4s, v29.4s, v9.s[3] \n"
"fmla v22.4s, v29.4s, v10.s[1] \n"
"fmla v23.4s, v29.4s, v10.s[3] \n"
"prfm pldl1keep, [%7, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%7], #64 \n"// r6
"fmla v16.4s, v30.4s, v7.s[2] \n"
"fmla v17.4s, v30.4s, v8.s[0] \n"
"fmla v18.4s, v30.4s, v8.s[2] \n"
"fmla v19.4s, v30.4s, v9.s[0] \n"
"fmla v20.4s, v30.4s, v9.s[2] \n"
"fmla v21.4s, v30.4s, v10.s[0] \n"
"fmla v22.4s, v30.4s, v10.s[2] \n"
"fmla v23.4s, v30.4s, v11.s[0] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v4.4s, v5.4s}, [%7] \n"
"fmla v16.4s, v24.4s, v0.s[0] \n"
"fmla v17.4s, v24.4s, v0.s[2] \n"
"fmla v18.4s, v24.4s, v1.s[0] \n"
"fmla v19.4s, v24.4s, v1.s[2] \n"
"fmla v20.4s, v24.4s, v2.s[0] \n"
"fmla v21.4s, v24.4s, v2.s[2] \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[2] \n"
"fmla v16.4s, v25.4s, v0.s[1] \n"
"fmla v17.4s, v25.4s, v0.s[3] \n"
"fmla v18.4s, v25.4s, v1.s[1] \n"
"fmla v19.4s, v25.4s, v1.s[3] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[3] \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"fmla v23.4s, v25.4s, v3.s[3] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v26.4s, v0.s[2] \n"
"fmla v17.4s, v26.4s, v1.s[0] \n"
"fmla v18.4s, v26.4s, v1.s[2] \n"
"fmla v19.4s, v26.4s, v2.s[0] \n"
"fmla v20.4s, v26.4s, v2.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[0] \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[0] \n"
"fmla v16.4s, v27.4s, v0.s[3] \n"
"fmla v17.4s, v27.4s, v1.s[1] \n"
"fmla v18.4s, v27.4s, v1.s[3] \n"
"fmla v19.4s, v27.4s, v2.s[1] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[1] \n"
"fmla v22.4s, v27.4s, v3.s[3] \n"
"fmla v23.4s, v27.4s, v4.s[1] \n"
"fmla v16.4s, v28.4s, v1.s[0] \n"
"fmla v17.4s, v28.4s, v1.s[2] \n"
"fmla v18.4s, v28.4s, v2.s[0] \n"
"fmla v19.4s, v28.4s, v2.s[2] \n"
"fmla v20.4s, v28.4s, v3.s[0] \n"
"fmla v21.4s, v28.4s, v3.s[2] \n"
"fmla v22.4s, v28.4s, v4.s[0] \n"
"fmla v23.4s, v28.4s, v4.s[2] \n"
"sub %0, %0, #64 \n"
"fmla v16.4s, v29.4s, v1.s[1] \n"
"fmla v17.4s, v29.4s, v1.s[3] \n"
"fmla v18.4s, v29.4s, v2.s[1] \n"
"fmla v19.4s, v29.4s, v2.s[3] \n"
"fmla v20.4s, v29.4s, v3.s[1] \n"
"fmla v21.4s, v29.4s, v3.s[3] \n"
"fmla v22.4s, v29.4s, v4.s[1] \n"
"fmla v23.4s, v29.4s, v4.s[3] \n"
"fmla v16.4s, v30.4s, v1.s[2] \n"
"fmla v17.4s, v30.4s, v2.s[0] \n"
"fmla v18.4s, v30.4s, v2.s[2] \n"
"fmla v19.4s, v30.4s, v3.s[0] \n"
"fmla v20.4s, v30.4s, v3.s[2] \n"
"fmla v21.4s, v30.4s, v4.s[0] \n"
"fmla v22.4s, v30.4s, v4.s[2] \n"
"fmla v23.4s, v30.4s, v5.s[0] \n"
"sub %8, %8, #784 \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%0], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(r5), // %6
"=r"(r6), // %7
"=r"(kptr) // %8
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(r5),
"7"(r6),
"8"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v24", "v25", "v26", "v27", "v28", "v29", "v30"
);
}
#endif // __aarch64__
for (; j+3<outw; j+=4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%0] \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1] \n"// r0
"add %1, %1, #32 \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v16.4s, v24.4s, v0.s[0] \n"
"fmla v17.4s, v24.4s, v0.s[2] \n"
"fmla v18.4s, v24.4s, v1.s[0] \n"
"fmla v19.4s, v24.4s, v1.s[2] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v25.4s, v0.s[1] \n"
"fmla v17.4s, v25.4s, v0.s[3] \n"
"fmla v18.4s, v25.4s, v1.s[1] \n"
"fmla v19.4s, v25.4s, v1.s[3] \n"
"fmla v16.4s, v26.4s, v0.s[2] \n"
"fmla v17.4s, v26.4s, v1.s[0] \n"
"fmla v18.4s, v26.4s, v1.s[2] \n"
"fmla v19.4s, v26.4s, v2.s[0] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2] \n"// r1
"add %2, %2, #32 \n"
"fmla v16.4s, v27.4s, v0.s[3] \n"
"fmla v17.4s, v27.4s, v1.s[1] \n"
"fmla v18.4s, v27.4s, v1.s[3] \n"
"fmla v19.4s, v27.4s, v2.s[1] \n"
"fmla v16.4s, v28.4s, v1.s[0] \n"
"fmla v17.4s, v28.4s, v1.s[2] \n"
"fmla v18.4s, v28.4s, v2.s[0] \n"
"fmla v19.4s, v28.4s, v2.s[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v16.4s, v29.4s, v1.s[1] \n"
"fmla v17.4s, v29.4s, v1.s[3] \n"
"fmla v18.4s, v29.4s, v2.s[1] \n"
"fmla v19.4s, v29.4s, v2.s[3] \n"
"fmla v16.4s, v30.4s, v1.s[2] \n"
"fmla v17.4s, v30.4s, v2.s[0] \n"
"fmla v18.4s, v30.4s, v2.s[2] \n"
"fmla v19.4s, v30.4s, v3.s[0] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v24.4s, v4.s[0] \n"
"fmla v17.4s, v24.4s, v4.s[2] \n"
"fmla v18.4s, v24.4s, v5.s[0] \n"
"fmla v19.4s, v24.4s, v5.s[2] \n"
"fmla v16.4s, v25.4s, v4.s[1] \n"
"fmla v17.4s, v25.4s, v4.s[3] \n"
"fmla v18.4s, v25.4s, v5.s[1] \n"
"fmla v19.4s, v25.4s, v5.s[3] \n"
"fmla v16.4s, v26.4s, v4.s[2] \n"
"fmla v17.4s, v26.4s, v5.s[0] \n"
"fmla v18.4s, v26.4s, v5.s[2] \n"
"fmla v19.4s, v26.4s, v6.s[0] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3] \n"// r2
"add %3, %3, #32 \n"
"fmla v16.4s, v27.4s, v4.s[3] \n"
"fmla v17.4s, v27.4s, v5.s[1] \n"
"fmla v18.4s, v27.4s, v5.s[3] \n"
"fmla v19.4s, v27.4s, v6.s[1] \n"
"fmla v16.4s, v28.4s, v5.s[0] \n"
"fmla v17.4s, v28.4s, v5.s[2] \n"
"fmla v18.4s, v28.4s, v6.s[0] \n"
"fmla v19.4s, v28.4s, v6.s[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v16.4s, v29.4s, v5.s[1] \n"
"fmla v17.4s, v29.4s, v5.s[3] \n"
"fmla v18.4s, v29.4s, v6.s[1] \n"
"fmla v19.4s, v29.4s, v6.s[3] \n"
"fmla v16.4s, v30.4s, v5.s[2] \n"
"fmla v17.4s, v30.4s, v6.s[0] \n"
"fmla v18.4s, v30.4s, v6.s[2] \n"
"fmla v19.4s, v30.4s, v7.s[0] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v24.4s, v0.s[0] \n"
"fmla v17.4s, v24.4s, v0.s[2] \n"
"fmla v18.4s, v24.4s, v1.s[0] \n"
"fmla v19.4s, v24.4s, v1.s[2] \n"
"fmla v16.4s, v25.4s, v0.s[1] \n"
"fmla v17.4s, v25.4s, v0.s[3] \n"
"fmla v18.4s, v25.4s, v1.s[1] \n"
"fmla v19.4s, v25.4s, v1.s[3] \n"
"fmla v16.4s, v26.4s, v0.s[2] \n"
"fmla v17.4s, v26.4s, v1.s[0] \n"
"fmla v18.4s, v26.4s, v1.s[2] \n"
"fmla v19.4s, v26.4s, v2.s[0] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4] \n"// r3
"add %4, %4, #32 \n"
"fmla v16.4s, v27.4s, v0.s[3] \n"
"fmla v17.4s, v27.4s, v1.s[1] \n"
"fmla v18.4s, v27.4s, v1.s[3] \n"
"fmla v19.4s, v27.4s, v2.s[1] \n"
"fmla v16.4s, v28.4s, v1.s[0] \n"
"fmla v17.4s, v28.4s, v1.s[2] \n"
"fmla v18.4s, v28.4s, v2.s[0] \n"
"fmla v19.4s, v28.4s, v2.s[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v16.4s, v29.4s, v1.s[1] \n"
"fmla v17.4s, v29.4s, v1.s[3] \n"
"fmla v18.4s, v29.4s, v2.s[1] \n"
"fmla v19.4s, v29.4s, v2.s[3] \n"
"fmla v16.4s, v30.4s, v1.s[2] \n"
"fmla v17.4s, v30.4s, v2.s[0] \n"
"fmla v18.4s, v30.4s, v2.s[2] \n"
"fmla v19.4s, v30.4s, v3.s[0] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v24.4s, v4.s[0] \n"
"fmla v17.4s, v24.4s, v4.s[2] \n"
"fmla v18.4s, v24.4s, v5.s[0] \n"
"fmla v19.4s, v24.4s, v5.s[2] \n"
"fmla v16.4s, v25.4s, v4.s[1] \n"
"fmla v17.4s, v25.4s, v4.s[3] \n"
"fmla v18.4s, v25.4s, v5.s[1] \n"
"fmla v19.4s, v25.4s, v5.s[3] \n"
"fmla v16.4s, v26.4s, v4.s[2] \n"
"fmla v17.4s, v26.4s, v5.s[0] \n"
"fmla v18.4s, v26.4s, v5.s[2] \n"
"fmla v19.4s, v26.4s, v6.s[0] \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5] \n"// r4
"add %5, %5, #32 \n"
"fmla v16.4s, v27.4s, v4.s[3] \n"
"fmla v17.4s, v27.4s, v5.s[1] \n"
"fmla v18.4s, v27.4s, v5.s[3] \n"
"fmla v19.4s, v27.4s, v6.s[1] \n"
"fmla v16.4s, v28.4s, v5.s[0] \n"
"fmla v17.4s, v28.4s, v5.s[2] \n"
"fmla v18.4s, v28.4s, v6.s[0] \n"
"fmla v19.4s, v28.4s, v6.s[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v16.4s, v29.4s, v5.s[1] \n"
"fmla v17.4s, v29.4s, v5.s[3] \n"
"fmla v18.4s, v29.4s, v6.s[1] \n"
"fmla v19.4s, v29.4s, v6.s[3] \n"
"fmla v16.4s, v30.4s, v5.s[2] \n"
"fmla v17.4s, v30.4s, v6.s[0] \n"
"fmla v18.4s, v30.4s, v6.s[2] \n"
"fmla v19.4s, v30.4s, v7.s[0] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v24.4s, v0.s[0] \n"
"fmla v17.4s, v24.4s, v0.s[2] \n"
"fmla v18.4s, v24.4s, v1.s[0] \n"
"fmla v19.4s, v24.4s, v1.s[2] \n"
"fmla v16.4s, v25.4s, v0.s[1] \n"
"fmla v17.4s, v25.4s, v0.s[3] \n"
"fmla v18.4s, v25.4s, v1.s[1] \n"
"fmla v19.4s, v25.4s, v1.s[3] \n"
"fmla v16.4s, v26.4s, v0.s[2] \n"
"fmla v17.4s, v26.4s, v1.s[0] \n"
"fmla v18.4s, v26.4s, v1.s[2] \n"
"fmla v19.4s, v26.4s, v2.s[0] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6] \n"// r5
"add %6, %6, #32 \n"
"fmla v16.4s, v27.4s, v0.s[3] \n"
"fmla v17.4s, v27.4s, v1.s[1] \n"
"fmla v18.4s, v27.4s, v1.s[3] \n"
"fmla v19.4s, v27.4s, v2.s[1] \n"
"fmla v16.4s, v28.4s, v1.s[0] \n"
"fmla v17.4s, v28.4s, v1.s[2] \n"
"fmla v18.4s, v28.4s, v2.s[0] \n"
"fmla v19.4s, v28.4s, v2.s[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v16.4s, v29.4s, v1.s[1] \n"
"fmla v17.4s, v29.4s, v1.s[3] \n"
"fmla v18.4s, v29.4s, v2.s[1] \n"
"fmla v19.4s, v29.4s, v2.s[3] \n"
"fmla v16.4s, v30.4s, v1.s[2] \n"
"fmla v17.4s, v30.4s, v2.s[0] \n"
"fmla v18.4s, v30.4s, v2.s[2] \n"
"fmla v19.4s, v30.4s, v3.s[0] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v24.4s, v4.s[0] \n"
"fmla v17.4s, v24.4s, v4.s[2] \n"
"fmla v18.4s, v24.4s, v5.s[0] \n"
"fmla v19.4s, v24.4s, v5.s[2] \n"
"fmla v16.4s, v25.4s, v4.s[1] \n"
"fmla v17.4s, v25.4s, v4.s[3] \n"
"fmla v18.4s, v25.4s, v5.s[1] \n"
"fmla v19.4s, v25.4s, v5.s[3] \n"
"fmla v16.4s, v26.4s, v4.s[2] \n"
"fmla v17.4s, v26.4s, v5.s[0] \n"
"fmla v18.4s, v26.4s, v5.s[2] \n"
"fmla v19.4s, v26.4s, v6.s[0] \n"
"prfm pldl1keep, [%7, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%7] \n"// r6
"add %7, %7, #32 \n"
"fmla v16.4s, v27.4s, v4.s[3] \n"
"fmla v17.4s, v27.4s, v5.s[1] \n"
"fmla v18.4s, v27.4s, v5.s[3] \n"
"fmla v19.4s, v27.4s, v6.s[1] \n"
"fmla v16.4s, v28.4s, v5.s[0] \n"
"fmla v17.4s, v28.4s, v5.s[2] \n"
"fmla v18.4s, v28.4s, v6.s[0] \n"
"fmla v19.4s, v28.4s, v6.s[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v16.4s, v29.4s, v5.s[1] \n"
"fmla v17.4s, v29.4s, v5.s[3] \n"
"fmla v18.4s, v29.4s, v6.s[1] \n"
"fmla v19.4s, v29.4s, v6.s[3] \n"
"fmla v16.4s, v30.4s, v5.s[2] \n"
"fmla v17.4s, v30.4s, v6.s[0] \n"
"fmla v18.4s, v30.4s, v6.s[2] \n"
"fmla v19.4s, v30.4s, v7.s[0] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v24.4s, v0.s[0] \n"
"fmla v17.4s, v24.4s, v0.s[2] \n"
"fmla v18.4s, v24.4s, v1.s[0] \n"
"fmla v19.4s, v24.4s, v1.s[2] \n"
"fmla v16.4s, v25.4s, v0.s[1] \n"
"fmla v17.4s, v25.4s, v0.s[3] \n"
"fmla v18.4s, v25.4s, v1.s[1] \n"
"fmla v19.4s, v25.4s, v1.s[3] \n"
"fmla v16.4s, v26.4s, v0.s[2] \n"
"fmla v17.4s, v26.4s, v1.s[0] \n"
"fmla v18.4s, v26.4s, v1.s[2] \n"
"fmla v19.4s, v26.4s, v2.s[0] \n"
"fmla v16.4s, v27.4s, v0.s[3] \n"
"fmla v17.4s, v27.4s, v1.s[1] \n"
"fmla v18.4s, v27.4s, v1.s[3] \n"
"fmla v19.4s, v27.4s, v2.s[1] \n"
"fmla v16.4s, v28.4s, v1.s[0] \n"
"fmla v17.4s, v28.4s, v1.s[2] \n"
"fmla v18.4s, v28.4s, v2.s[0] \n"
"fmla v19.4s, v28.4s, v2.s[2] \n"
"fmla v16.4s, v29.4s, v1.s[1] \n"
"fmla v17.4s, v29.4s, v1.s[3] \n"
"fmla v18.4s, v29.4s, v2.s[1] \n"
"fmla v19.4s, v29.4s, v2.s[3] \n"
"fmla v16.4s, v30.4s, v1.s[2] \n"
"fmla v17.4s, v30.4s, v2.s[0] \n"
"fmla v18.4s, v30.4s, v2.s[2] \n"
"fmla v19.4s, v30.4s, v3.s[0] \n"
"sub %8, %8, #784 \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(r5), // %6
"=r"(r6), // %7
"=r"(kptr) // %8
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(r5),
"7"(r6),
"8"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v24", "v25", "v26", "v27", "v28", "v29", "v30"
);
#else // __aarch64__
asm volatile(
"pld [%0, #512] \n"
"vldm %0, {d24-d31} \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1]! \n"// r0
"pld [%8, #512] \n"
"vldm %8!, {d10-d17} \n"
"vmla.f32 q12, q5, d0[0] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q14, q5, d2[0] \n"
"vmla.f32 q15, q5, d3[0] \n"
"pld [%1, #192] \n"
"vld1.f32 {d4-d6}, [%1] \n"
"vmla.f32 q12, q6, d0[1] \n"
"vmla.f32 q13, q6, d1[1] \n"
"vmla.f32 q14, q6, d2[1] \n"
"vmla.f32 q15, q6, d3[1] \n"
"pld [%8, #384] \n"
"vldm %8!, {d18-d23} \n"
"vmla.f32 q12, q7, d1[0] \n"
"vmla.f32 q13, q7, d2[0] \n"
"vmla.f32 q14, q7, d3[0] \n"
"vmla.f32 q15, q7, d4[0] \n"
"vmla.f32 q12, q8, d1[1] \n"
"vmla.f32 q13, q8, d2[1] \n"
"vmla.f32 q14, q8, d3[1] \n"
"vmla.f32 q15, q8, d4[1] \n"
"vmla.f32 q12, q9, d2[0] \n"
"vmla.f32 q13, q9, d3[0] \n"
"vmla.f32 q14, q9, d4[0] \n"
"vmla.f32 q15, q9, d5[0] \n"
"pld [%8, #512] \n"
"vldm %8!, {d10-d17} \n"
"vmla.f32 q12, q10, d2[1] \n"
"vmla.f32 q13, q10, d3[1] \n"
"vmla.f32 q14, q10, d4[1] \n"
"vmla.f32 q15, q10, d5[1] \n"
"vmla.f32 q12, q11, d3[0] \n"
"vmla.f32 q13, q11, d4[0] \n"
"pld [%2, #256] \n"
"vld1.f32 {d0-d3}, [%2]! \n"// r1
"vmla.f32 q14, q11, d5[0] \n"
"vmla.f32 q15, q11, d6[0] \n"
"vmla.f32 q12, q5, d0[0] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q14, q5, d2[0] \n"
"vmla.f32 q15, q5, d3[0] \n"
"pld [%2, #192] \n"
"vld1.f32 {d4-d6}, [%2] \n"
"vmla.f32 q12, q6, d0[1] \n"
"vmla.f32 q13, q6, d1[1] \n"
"vmla.f32 q14, q6, d2[1] \n"
"vmla.f32 q15, q6, d3[1] \n"
"pld [%8, #384] \n"
"vldm %8!, {d18-d23} \n"
"vmla.f32 q12, q7, d1[0] \n"
"vmla.f32 q13, q7, d2[0] \n"
"vmla.f32 q14, q7, d3[0] \n"
"vmla.f32 q15, q7, d4[0] \n"
"vmla.f32 q12, q8, d1[1] \n"
"vmla.f32 q13, q8, d2[1] \n"
"vmla.f32 q14, q8, d3[1] \n"
"vmla.f32 q15, q8, d4[1] \n"
"vmla.f32 q12, q9, d2[0] \n"
"vmla.f32 q13, q9, d3[0] \n"
"vmla.f32 q14, q9, d4[0] \n"
"vmla.f32 q15, q9, d5[0] \n"
"pld [%8, #512] \n"
"vldm %8!, {d10-d17} \n"
"vmla.f32 q12, q10, d2[1] \n"
"vmla.f32 q13, q10, d3[1] \n"
"vmla.f32 q14, q10, d4[1] \n"
"vmla.f32 q15, q10, d5[1] \n"
"vmla.f32 q12, q11, d3[0] \n"
"vmla.f32 q13, q11, d4[0] \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3]! \n"// r2
"vmla.f32 q14, q11, d5[0] \n"
"vmla.f32 q15, q11, d6[0] \n"
"vmla.f32 q12, q5, d0[0] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q14, q5, d2[0] \n"
"vmla.f32 q15, q5, d3[0] \n"
"pld [%3, #192] \n"
"vld1.f32 {d4-d6}, [%3] \n"
"vmla.f32 q12, q6, d0[1] \n"
"vmla.f32 q13, q6, d1[1] \n"
"vmla.f32 q14, q6, d2[1] \n"
"vmla.f32 q15, q6, d3[1] \n"
"pld [%8, #384] \n"
"vldm %8!, {d18-d23} \n"
"vmla.f32 q12, q7, d1[0] \n"
"vmla.f32 q13, q7, d2[0] \n"
"vmla.f32 q14, q7, d3[0] \n"
"vmla.f32 q15, q7, d4[0] \n"
"vmla.f32 q12, q8, d1[1] \n"
"vmla.f32 q13, q8, d2[1] \n"
"vmla.f32 q14, q8, d3[1] \n"
"vmla.f32 q15, q8, d4[1] \n"
"vmla.f32 q12, q9, d2[0] \n"
"vmla.f32 q13, q9, d3[0] \n"
"vmla.f32 q14, q9, d4[0] \n"
"vmla.f32 q15, q9, d5[0] \n"
"pld [%8, #512] \n"
"vldm %8!, {d10-d17} \n"
"vmla.f32 q12, q10, d2[1] \n"
"vmla.f32 q13, q10, d3[1] \n"
"vmla.f32 q14, q10, d4[1] \n"
"vmla.f32 q15, q10, d5[1] \n"
"vmla.f32 q12, q11, d3[0] \n"
"vmla.f32 q13, q11, d4[0] \n"
"pld [%4, #256] \n"
"vld1.f32 {d0-d3}, [%4]! \n"// r3
"vmla.f32 q14, q11, d5[0] \n"
"vmla.f32 q15, q11, d6[0] \n"
"vmla.f32 q12, q5, d0[0] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q14, q5, d2[0] \n"
"vmla.f32 q15, q5, d3[0] \n"
"pld [%4, #192] \n"
"vld1.f32 {d4-d6}, [%4] \n"
"vmla.f32 q12, q6, d0[1] \n"
"vmla.f32 q13, q6, d1[1] \n"
"vmla.f32 q14, q6, d2[1] \n"
"vmla.f32 q15, q6, d3[1] \n"
"pld [%8, #384] \n"
"vldm %8!, {d18-d23} \n"
"vmla.f32 q12, q7, d1[0] \n"
"vmla.f32 q13, q7, d2[0] \n"
"vmla.f32 q14, q7, d3[0] \n"
"vmla.f32 q15, q7, d4[0] \n"
"vmla.f32 q12, q8, d1[1] \n"
"vmla.f32 q13, q8, d2[1] \n"
"vmla.f32 q14, q8, d3[1] \n"
"vmla.f32 q15, q8, d4[1] \n"
"vmla.f32 q12, q9, d2[0] \n"
"vmla.f32 q13, q9, d3[0] \n"
"vmla.f32 q14, q9, d4[0] \n"
"vmla.f32 q15, q9, d5[0] \n"
"pld [%8, #512] \n"
"vldm %8!, {d10-d17} \n"
"vmla.f32 q12, q10, d2[1] \n"
"vmla.f32 q13, q10, d3[1] \n"
"vmla.f32 q14, q10, d4[1] \n"
"vmla.f32 q15, q10, d5[1] \n"
"vmla.f32 q12, q11, d3[0] \n"
"vmla.f32 q13, q11, d4[0] \n"
"pld [%5, #256] \n"
"vld1.f32 {d0-d3}, [%5]! \n"// r4
"vmla.f32 q14, q11, d5[0] \n"
"vmla.f32 q15, q11, d6[0] \n"
"vmla.f32 q12, q5, d0[0] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q14, q5, d2[0] \n"
"vmla.f32 q15, q5, d3[0] \n"
"pld [%5, #192] \n"
"vld1.f32 {d4-d6}, [%5] \n"
"vmla.f32 q12, q6, d0[1] \n"
"vmla.f32 q13, q6, d1[1] \n"
"vmla.f32 q14, q6, d2[1] \n"
"vmla.f32 q15, q6, d3[1] \n"
"pld [%8, #384] \n"
"vldm %8!, {d18-d23} \n"
"vmla.f32 q12, q7, d1[0] \n"
"vmla.f32 q13, q7, d2[0] \n"
"vmla.f32 q14, q7, d3[0] \n"
"vmla.f32 q15, q7, d4[0] \n"
"vmla.f32 q12, q8, d1[1] \n"
"vmla.f32 q13, q8, d2[1] \n"
"vmla.f32 q14, q8, d3[1] \n"
"vmla.f32 q15, q8, d4[1] \n"
"vmla.f32 q12, q9, d2[0] \n"
"vmla.f32 q13, q9, d3[0] \n"
"vmla.f32 q14, q9, d4[0] \n"
"vmla.f32 q15, q9, d5[0] \n"
"pld [%8, #512] \n"
"vldm %8!, {d10-d17} \n"
"vmla.f32 q12, q10, d2[1] \n"
"vmla.f32 q13, q10, d3[1] \n"
"vmla.f32 q14, q10, d4[1] \n"
"vmla.f32 q15, q10, d5[1] \n"
"vmla.f32 q12, q11, d3[0] \n"
"vmla.f32 q13, q11, d4[0] \n"
"pld [%6, #256] \n"
"vld1.f32 {d0-d3}, [%6]! \n"// r5
"vmla.f32 q14, q11, d5[0] \n"
"vmla.f32 q15, q11, d6[0] \n"
"vmla.f32 q12, q5, d0[0] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q14, q5, d2[0] \n"
"vmla.f32 q15, q5, d3[0] \n"
"pld [%6, #192] \n"
"vld1.f32 {d4-d6}, [%6] \n"
"vmla.f32 q12, q6, d0[1] \n"
"vmla.f32 q13, q6, d1[1] \n"
"vmla.f32 q14, q6, d2[1] \n"
"vmla.f32 q15, q6, d3[1] \n"
"pld [%8, #384] \n"
"vldm %8!, {d18-d23} \n"
"vmla.f32 q12, q7, d1[0] \n"
"vmla.f32 q13, q7, d2[0] \n"
"vmla.f32 q14, q7, d3[0] \n"
"vmla.f32 q15, q7, d4[0] \n"
"vmla.f32 q12, q8, d1[1] \n"
"vmla.f32 q13, q8, d2[1] \n"
"vmla.f32 q14, q8, d3[1] \n"
"vmla.f32 q15, q8, d4[1] \n"
"vmla.f32 q12, q9, d2[0] \n"
"vmla.f32 q13, q9, d3[0] \n"
"vmla.f32 q14, q9, d4[0] \n"
"vmla.f32 q15, q9, d5[0] \n"
"pld [%8, #512] \n"
"vldm %8!, {d10-d17} \n"
"vmla.f32 q12, q10, d2[1] \n"
"vmla.f32 q13, q10, d3[1] \n"
"vmla.f32 q14, q10, d4[1] \n"
"vmla.f32 q15, q10, d5[1] \n"
"vmla.f32 q12, q11, d3[0] \n"
"vmla.f32 q13, q11, d4[0] \n"
"pld [%7, #256] \n"
"vld1.f32 {d0-d3}, [%7]! \n"// r6
"vmla.f32 q14, q11, d5[0] \n"
"vmla.f32 q15, q11, d6[0] \n"
"vmla.f32 q12, q5, d0[0] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q14, q5, d2[0] \n"
"vmla.f32 q15, q5, d3[0] \n"
"pld [%7, #192] \n"
"vld1.f32 {d4-d6}, [%7] \n"
"vmla.f32 q12, q6, d0[1] \n"
"vmla.f32 q13, q6, d1[1] \n"
"vmla.f32 q14, q6, d2[1] \n"
"vmla.f32 q15, q6, d3[1] \n"
"pld [%8, #384] \n"
"vldm %8!, {d18-d23} \n"
"vmla.f32 q12, q7, d1[0] \n"
"vmla.f32 q13, q7, d2[0] \n"
"vmla.f32 q14, q7, d3[0] \n"
"vmla.f32 q15, q7, d4[0] \n"
"vmla.f32 q12, q8, d1[1] \n"
"vmla.f32 q13, q8, d2[1] \n"
"vmla.f32 q14, q8, d3[1] \n"
"vmla.f32 q15, q8, d4[1] \n"
"vmla.f32 q12, q9, d2[0] \n"
"vmla.f32 q13, q9, d3[0] \n"
"vmla.f32 q14, q9, d4[0] \n"
"vmla.f32 q15, q9, d5[0] \n"
"vmla.f32 q12, q10, d2[1] \n"
"vmla.f32 q13, q10, d3[1] \n"
"vmla.f32 q14, q10, d4[1] \n"
"vmla.f32 q15, q10, d5[1] \n"
"vmla.f32 q12, q11, d3[0] \n"
"vmla.f32 q13, q11, d4[0] \n"
"vmla.f32 q14, q11, d5[0] \n"
"vmla.f32 q15, q11, d6[0] \n"
"sub %8, %8, #784 \n"
"vstm %0!, {d24-d31} \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(r5), // %6
"=r"(r6), // %7
"=r"(kptr) // %8
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(r5),
"7"(r6),
"8"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
}
for (; j+1<outw; j+=2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v16.4s, v17.4s}, [%0] \n"
"prfm pldl1keep, [%1, #384] \n"
"ld1 {v0.4s, v1.4s, v2.4s}, [%1] \n"// r0
"add %1, %1, #16 \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmul v18.4s, v24.4s, v0.s[0] \n"
"fmul v19.4s, v24.4s, v0.s[2] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v25.4s, v0.s[1] \n"
"fmla v17.4s, v25.4s, v0.s[3] \n"
"fmla v18.4s, v26.4s, v0.s[2] \n"
"fmla v19.4s, v26.4s, v1.s[0] \n"
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v4.4s, v5.4s, v6.4s}, [%2] \n"// r1
"add %2, %2, #16 \n"
"fmla v16.4s, v27.4s, v0.s[3] \n"
"fmla v17.4s, v27.4s, v1.s[1] \n"
"fmla v18.4s, v28.4s, v1.s[0] \n"
"fmla v19.4s, v28.4s, v1.s[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v16.4s, v29.4s, v1.s[1] \n"
"fmla v17.4s, v29.4s, v1.s[3] \n"
"fmla v18.4s, v30.4s, v1.s[2] \n"
"fmla v19.4s, v30.4s, v2.s[0] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v24.4s, v4.s[0] \n"
"fmla v17.4s, v24.4s, v4.s[2] \n"
"fmla v18.4s, v25.4s, v4.s[1] \n"
"fmla v19.4s, v25.4s, v4.s[3] \n"
"fmla v16.4s, v26.4s, v4.s[2] \n"
"fmla v17.4s, v26.4s, v5.s[0] \n"
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v0.4s, v1.4s, v2.4s}, [%3] \n"// r2
"add %3, %3, #16 \n"
"fmla v18.4s, v27.4s, v4.s[3] \n"
"fmla v19.4s, v27.4s, v5.s[1] \n"
"fmla v16.4s, v28.4s, v5.s[0] \n"
"fmla v17.4s, v28.4s, v5.s[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v18.4s, v29.4s, v5.s[1] \n"
"fmla v19.4s, v29.4s, v5.s[3] \n"
"fmla v16.4s, v30.4s, v5.s[2] \n"
"fmla v17.4s, v30.4s, v6.s[0] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v18.4s, v24.4s, v0.s[0] \n"
"fmla v19.4s, v24.4s, v0.s[2] \n"
"fmla v16.4s, v25.4s, v0.s[1] \n"
"fmla v17.4s, v25.4s, v0.s[3] \n"
"fmla v18.4s, v26.4s, v0.s[2] \n"
"fmla v19.4s, v26.4s, v1.s[0] \n"
"prfm pldl1keep, [%4, #384] \n"
"ld1 {v4.4s, v5.4s, v6.4s}, [%4] \n"// r3
"add %4, %4, #16 \n"
"fmla v16.4s, v27.4s, v0.s[3] \n"
"fmla v17.4s, v27.4s, v1.s[1] \n"
"fmla v18.4s, v28.4s, v1.s[0] \n"
"fmla v19.4s, v28.4s, v1.s[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v16.4s, v29.4s, v1.s[1] \n"
"fmla v17.4s, v29.4s, v1.s[3] \n"
"fmla v18.4s, v30.4s, v1.s[2] \n"
"fmla v19.4s, v30.4s, v2.s[0] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v24.4s, v4.s[0] \n"
"fmla v17.4s, v24.4s, v4.s[2] \n"
"fmla v18.4s, v25.4s, v4.s[1] \n"
"fmla v19.4s, v25.4s, v4.s[3] \n"
"fmla v16.4s, v26.4s, v4.s[2] \n"
"fmla v17.4s, v26.4s, v5.s[0] \n"
"prfm pldl1keep, [%5, #384] \n"
"ld1 {v0.4s, v1.4s, v2.4s}, [%5] \n"// r4
"add %5, %5, #16 \n"
"fmla v18.4s, v27.4s, v4.s[3] \n"
"fmla v19.4s, v27.4s, v5.s[1] \n"
"fmla v16.4s, v28.4s, v5.s[0] \n"
"fmla v17.4s, v28.4s, v5.s[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v18.4s, v29.4s, v5.s[1] \n"
"fmla v19.4s, v29.4s, v5.s[3] \n"
"fmla v16.4s, v30.4s, v5.s[2] \n"
"fmla v17.4s, v30.4s, v6.s[0] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v18.4s, v24.4s, v0.s[0] \n"
"fmla v19.4s, v24.4s, v0.s[2] \n"
"fmla v16.4s, v25.4s, v0.s[1] \n"
"fmla v17.4s, v25.4s, v0.s[3] \n"
"fmla v18.4s, v26.4s, v0.s[2] \n"
"fmla v19.4s, v26.4s, v1.s[0] \n"
"prfm pldl1keep, [%6, #384] \n"
"ld1 {v4.4s, v5.4s, v6.4s}, [%6] \n"// r5
"add %6, %6, #16 \n"
"fmla v16.4s, v27.4s, v0.s[3] \n"
"fmla v17.4s, v27.4s, v1.s[1] \n"
"fmla v18.4s, v28.4s, v1.s[0] \n"
"fmla v19.4s, v28.4s, v1.s[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v16.4s, v29.4s, v1.s[1] \n"
"fmla v17.4s, v29.4s, v1.s[3] \n"
"fmla v18.4s, v30.4s, v1.s[2] \n"
"fmla v19.4s, v30.4s, v2.s[0] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v24.4s, v4.s[0] \n"
"fmla v17.4s, v24.4s, v4.s[2] \n"
"fmla v18.4s, v25.4s, v4.s[1] \n"
"fmla v19.4s, v25.4s, v4.s[3] \n"
"fmla v16.4s, v26.4s, v4.s[2] \n"
"fmla v17.4s, v26.4s, v5.s[0] \n"
"prfm pldl1keep, [%7, #384] \n"
"ld1 {v0.4s, v1.4s, v2.4s}, [%7] \n"// r6
"add %7, %7, #16 \n"
"fmla v18.4s, v27.4s, v4.s[3] \n"
"fmla v19.4s, v27.4s, v5.s[1] \n"
"fmla v16.4s, v28.4s, v5.s[0] \n"
"fmla v17.4s, v28.4s, v5.s[2] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v18.4s, v29.4s, v5.s[1] \n"
"fmla v19.4s, v29.4s, v5.s[3] \n"
"fmla v16.4s, v30.4s, v5.s[2] \n"
"fmla v17.4s, v30.4s, v6.s[0] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v18.4s, v24.4s, v0.s[0] \n"
"fmla v19.4s, v24.4s, v0.s[2] \n"
"fmla v16.4s, v25.4s, v0.s[1] \n"
"fmla v17.4s, v25.4s, v0.s[3] \n"
"fmla v18.4s, v26.4s, v0.s[2] \n"
"fmla v19.4s, v26.4s, v1.s[0] \n"
"fmla v16.4s, v27.4s, v0.s[3] \n"
"fmla v17.4s, v27.4s, v1.s[1] \n"
"fmla v18.4s, v28.4s, v1.s[0] \n"
"fmla v19.4s, v28.4s, v1.s[2] \n"
"fmla v16.4s, v29.4s, v1.s[1] \n"
"fmla v17.4s, v29.4s, v1.s[3] \n"
"fmla v18.4s, v30.4s, v1.s[2] \n"
"fmla v19.4s, v30.4s, v2.s[0] \n"
"fadd v16.4s, v16.4s, v18.4s \n"
"fadd v17.4s, v17.4s, v19.4s \n"
"sub %8, %8, #784 \n"
"st1 {v16.4s, v17.4s}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(r5), // %6
"=r"(r6), // %7
"=r"(kptr) // %8
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(r5),
"7"(r6),
"8"(kptr)
: "memory", "v0", "v1", "v2", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v24", "v25", "v26", "v27", "v28", "v29", "v30"
);
#else // __aarch64__
asm volatile(
"pld [%0, #256] \n"
"vld1.f32 {d28-d31}, [%0 :128] \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1]! \n"// r0
"vld1.f32 {d8[0]}, [%1] \n"
"pld [%8, #512] \n"
"vldm %8!, {d10-d17} \n"
"vmul.f32 q12, q5, d0[0] \n"
"vmul.f32 q13, q5, d1[0] \n"
"vmla.f32 q14, q6, d0[1] \n"
"vmla.f32 q15, q6, d1[1] \n"
"pld [%8, #384] \n"
"vldm %8!, {d18-d23} \n"
"vmla.f32 q12, q7, d1[0] \n"
"vmla.f32 q13, q7, d2[0] \n"
"pld [%2, #256] \n"
"vld1.f32 {d4-d7}, [%2]! \n"// r1
"vld1.f32 {d9[0]}, [%2] \n"
"vmla.f32 q14, q8, d1[1] \n"
"vmla.f32 q15, q8, d2[1] \n"
"vmla.f32 q12, q9, d2[0] \n"
"vmla.f32 q13, q9, d3[0] \n"
"pld [%8, #512] \n"
"vldm %8!, {d10-d17} \n"
"vmla.f32 q14, q10, d2[1] \n"
"vmla.f32 q15, q10, d3[1] \n"
"vmla.f32 q12, q11, d3[0] \n"
"vmla.f32 q13, q11, d8[0] \n"
"pld [%8, #384] \n"
"vldm %8!, {d18-d23} \n"
"vmla.f32 q14, q5, d4[0] \n"
"vmla.f32 q15, q5, d5[0] \n"
"vmla.f32 q12, q6, d4[1] \n"
"vmla.f32 q13, q6, d5[1] \n"
"vmla.f32 q14, q7, d5[0] \n"
"vmla.f32 q15, q7, d6[0] \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3]! \n"// r2
"vld1.f32 {d8[0]}, [%3] \n"
"vmla.f32 q12, q8, d5[1] \n"
"vmla.f32 q13, q8, d6[1] \n"
"vmla.f32 q14, q9, d6[0] \n"
"vmla.f32 q15, q9, d7[0] \n"
"pld [%8, #512] \n"
"vldm %8!, {d10-d17} \n"
"vmla.f32 q12, q10, d6[1] \n"
"vmla.f32 q13, q10, d7[1] \n"
"vmla.f32 q14, q11, d7[0] \n"
"vmla.f32 q15, q11, d9[0] \n"
"pld [%8, #384] \n"
"vldm %8!, {d18-d23} \n"
"vmla.f32 q12, q5, d0[0] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q14, q6, d0[1] \n"
"vmla.f32 q15, q6, d1[1] \n"
"vmla.f32 q12, q7, d1[0] \n"
"vmla.f32 q13, q7, d2[0] \n"
"pld [%4, #256] \n"
"vld1.f32 {d4-d7}, [%4]! \n"// r3
"vld1.f32 {d9[0]}, [%4] \n"
"vmla.f32 q14, q8, d1[1] \n"
"vmla.f32 q15, q8, d2[1] \n"
"vmla.f32 q12, q9, d2[0] \n"
"vmla.f32 q13, q9, d3[0] \n"
"pld [%8, #512] \n"
"vldm %8!, {d10-d17} \n"
"vmla.f32 q14, q10, d2[1] \n"
"vmla.f32 q15, q10, d3[1] \n"
"vmla.f32 q12, q11, d3[0] \n"
"vmla.f32 q13, q11, d8[0] \n"
"pld [%8, #384] \n"
"vldm %8!, {d18-d23} \n"
"vmla.f32 q14, q5, d4[0] \n"
"vmla.f32 q15, q5, d5[0] \n"
"vmla.f32 q12, q6, d4[1] \n"
"vmla.f32 q13, q6, d5[1] \n"
"vmla.f32 q14, q7, d5[0] \n"
"vmla.f32 q15, q7, d6[0] \n"
"pld [%5, #256] \n"
"vld1.f32 {d0-d3}, [%5]! \n"// r4
"vld1.f32 {d8[0]}, [%5] \n"
"vmla.f32 q12, q8, d5[1] \n"
"vmla.f32 q13, q8, d6[1] \n"
"vmla.f32 q14, q9, d6[0] \n"
"vmla.f32 q15, q9, d7[0] \n"
"pld [%8, #512] \n"
"vldm %8!, {d10-d17} \n"
"vmla.f32 q12, q10, d6[1] \n"
"vmla.f32 q13, q10, d7[1] \n"
"vmla.f32 q14, q11, d7[0] \n"
"vmla.f32 q15, q11, d9[0] \n"
"pld [%8, #384] \n"
"vldm %8!, {d18-d23} \n"
"vmla.f32 q12, q5, d0[0] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q14, q6, d0[1] \n"
"vmla.f32 q15, q6, d1[1] \n"
"vmla.f32 q12, q7, d1[0] \n"
"vmla.f32 q13, q7, d2[0] \n"
"pld [%6, #256] \n"
"vld1.f32 {d4-d7}, [%6]! \n"// r5
"vld1.f32 {d9[0]}, [%6] \n"
"vmla.f32 q14, q8, d1[1] \n"
"vmla.f32 q15, q8, d2[1] \n"
"vmla.f32 q12, q9, d2[0] \n"
"vmla.f32 q13, q9, d3[0] \n"
"pld [%8, #512] \n"
"vldm %8!, {d10-d17} \n"
"vmla.f32 q14, q10, d2[1] \n"
"vmla.f32 q15, q10, d3[1] \n"
"vmla.f32 q12, q11, d3[0] \n"
"vmla.f32 q13, q11, d8[0] \n"
"pld [%8, #384] \n"
"vldm %8!, {d18-d23} \n"
"vmla.f32 q14, q5, d4[0] \n"
"vmla.f32 q15, q5, d5[0] \n"
"vmla.f32 q12, q6, d4[1] \n"
"vmla.f32 q13, q6, d5[1] \n"
"vmla.f32 q14, q7, d5[0] \n"
"vmla.f32 q15, q7, d6[0] \n"
"pld [%7, #256] \n"
"vld1.f32 {d0-d3}, [%7]! \n"// r6
"vld1.f32 {d8[0]}, [%7] \n"
"vmla.f32 q12, q8, d5[1] \n"
"vmla.f32 q13, q8, d6[1] \n"
"vmla.f32 q14, q9, d6[0] \n"
"vmla.f32 q15, q9, d7[0] \n"
"pld [%8, #512] \n"
"vldm %8!, {d10-d17} \n"
"vmla.f32 q12, q10, d6[1] \n"
"vmla.f32 q13, q10, d7[1] \n"
"vmla.f32 q14, q11, d7[0] \n"
"vmla.f32 q15, q11, d9[0] \n"
"pld [%8, #384] \n"
"vldm %8!, {d18-d23} \n"
"vmla.f32 q12, q5, d0[0] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q14, q6, d0[1] \n"
"vmla.f32 q15, q6, d1[1] \n"
"sub %1, %1, #16 \n"
"sub %2, %2, #16 \n"
"vmla.f32 q12, q7, d1[0] \n"
"vmla.f32 q13, q7, d2[0] \n"
"vmla.f32 q14, q8, d1[1] \n"
"vmla.f32 q15, q8, d2[1] \n"
"sub %8, %8, #784 \n"
"vmla.f32 q12, q9, d2[0] \n"
"vmla.f32 q13, q9, d3[0] \n"
"vmla.f32 q14, q10, d2[1] \n"
"vmla.f32 q15, q10, d3[1] \n"
"sub %3, %3, #16 \n"
"sub %4, %4, #16 \n"
"vmla.f32 q12, q11, d3[0] \n"
"vmla.f32 q13, q11, d8[0] \n"
"sub %5, %5, #16 \n"
"sub %6, %6, #16 \n"
"vadd.f32 q14, q14, q12 \n"
"vadd.f32 q15, q15, q13 \n"
"sub %7, %7, #16 \n"
"vst1.f32 {d28-d31}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(r5), // %6
"=r"(r6), // %7
"=r"(kptr) // %8
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(r5),
"7"(r6),
"8"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
}
for (; j<outw; j++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v16.4s}, [%0] \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v0.4s, v1.4s}, [%1] \n"// r0
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmul v17.4s, v24.4s, v0.s[0] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmul v18.4s, v25.4s, v0.s[1] \n"
"fmul v19.4s, v26.4s, v0.s[2] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v4.4s, v5.4s}, [%2] \n"// r1
"fmla v16.4s, v27.4s, v0.s[3] \n"
"fmla v17.4s, v28.4s, v1.s[0] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v18.4s, v29.4s, v1.s[1] \n"
"fmla v19.4s, v30.4s, v1.s[2] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v24.4s, v4.s[0] \n"
"fmla v17.4s, v25.4s, v4.s[1] \n"
"fmla v18.4s, v26.4s, v4.s[2] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4s, v1.4s}, [%3] \n"// r2
"fmla v19.4s, v27.4s, v4.s[3] \n"
"fmla v16.4s, v28.4s, v5.s[0] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v17.4s, v29.4s, v5.s[1] \n"
"fmla v18.4s, v30.4s, v5.s[2] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v19.4s, v24.4s, v0.s[0] \n"
"fmla v16.4s, v25.4s, v0.s[1] \n"
"fmla v17.4s, v26.4s, v0.s[2] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v4.4s, v5.4s}, [%4] \n"// r3
"fmla v18.4s, v27.4s, v0.s[3] \n"
"fmla v19.4s, v28.4s, v1.s[0] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v16.4s, v29.4s, v1.s[1] \n"
"fmla v17.4s, v30.4s, v1.s[2] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v18.4s, v24.4s, v4.s[0] \n"
"fmla v19.4s, v25.4s, v4.s[1] \n"
"fmla v16.4s, v26.4s, v4.s[2] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4s, v1.4s}, [%5] \n"// r4
"fmla v17.4s, v27.4s, v4.s[3] \n"
"fmla v18.4s, v28.4s, v5.s[0] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v19.4s, v29.4s, v5.s[1] \n"
"fmla v16.4s, v30.4s, v5.s[2] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v17.4s, v24.4s, v0.s[0] \n"
"fmla v18.4s, v25.4s, v0.s[1] \n"
"fmla v19.4s, v26.4s, v0.s[2] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v4.4s, v5.4s}, [%6] \n"// r5
"fmla v16.4s, v27.4s, v0.s[3] \n"
"fmla v17.4s, v28.4s, v1.s[0] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v18.4s, v29.4s, v1.s[1] \n"
"fmla v19.4s, v30.4s, v1.s[2] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v16.4s, v24.4s, v4.s[0] \n"
"fmla v17.4s, v25.4s, v4.s[1] \n"
"fmla v18.4s, v26.4s, v4.s[2] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v0.4s, v1.4s}, [%7] \n"// r6
"fmla v19.4s, v27.4s, v4.s[3] \n"
"fmla v16.4s, v28.4s, v5.s[0] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%8], #64 \n"
"fmla v17.4s, v29.4s, v5.s[1] \n"
"fmla v18.4s, v30.4s, v5.s[2] \n"
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%8], #48 \n"
"fmla v19.4s, v24.4s, v0.s[0] \n"
"fmla v16.4s, v25.4s, v0.s[1] \n"
"fmla v17.4s, v26.4s, v0.s[2] \n"
"add %1, %1, #8 \n"
"add %2, %2, #8 \n"
"fmla v18.4s, v27.4s, v0.s[3] \n"
"fmla v19.4s, v28.4s, v1.s[0] \n"
"fmla v16.4s, v29.4s, v1.s[1] \n"
"fmla v17.4s, v30.4s, v1.s[2] \n"
"add %3, %3, #8 \n"
"add %4, %4, #8 \n"
"fadd v18.4s, v18.4s, v19.4s \n"
"add %5, %5, #8 \n"
"fadd v16.4s, v16.4s, v17.4s \n"
"add %6, %6, #8 \n"
"add %7, %7, #8 \n"
"fadd v16.4s, v16.4s, v18.4s \n"
"sub %8, %8, #784 \n"
"st1 {v16.4s}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(r5), // %6
"=r"(r6), // %7
"=r"(kptr) // %8
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(r5),
"7"(r6),
"8"(kptr)
: "memory", "v0", "v1", "v4", "v5", "v16", "v17", "v18", "v19", "v24", "v25", "v26", "v27", "v28", "v29", "v30"
);
#else // __aarch64__
asm volatile(
"pld [%0, #128] \n"
"vld1.f32 {d8-d9}, [%0 :128] \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1] \n"// r0
"pld [%8, #512] \n"
"vldm %8!, {d16-d23} \n"
"vmul.f32 q5, q8, d0[0] \n"
"vmul.f32 q6, q9, d0[1] \n"
"pld [%8, #384] \n"
"vldm %8!, {d24-d29} \n"
"vmul.f32 q7, q10, d1[0] \n"
"vmla.f32 q4, q11, d1[1] \n"
"pld [%2, #256] \n"
"vld1.f32 {d4-d7}, [%2] \n"// r1
"vmla.f32 q5, q12, d2[0] \n"
"pld [%8, #512] \n"
"vldm %8!, {d16-d23} \n"
"vmla.f32 q6, q13, d2[1] \n"
"vmla.f32 q7, q14, d3[0] \n"
"pld [%8, #384] \n"
"vldm %8!, {d24-d29} \n"
"vmla.f32 q4, q8, d4[0] \n"
"vmla.f32 q5, q9, d4[1] \n"
"vmla.f32 q6, q10, d5[0] \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3] \n"// r2
"vmla.f32 q7, q11, d5[1] \n"
"vmla.f32 q4, q12, d6[0] \n"
"pld [%8, #512] \n"
"vldm %8!, {d16-d23} \n"
"vmla.f32 q5, q13, d6[1] \n"
"vmla.f32 q6, q14, d7[0] \n"
"pld [%8, #384] \n"
"vldm %8!, {d24-d29} \n"
"vmla.f32 q7, q8, d0[0] \n"
"vmla.f32 q4, q9, d0[1] \n"
"vmla.f32 q5, q10, d1[0] \n"
"pld [%4, #256] \n"
"vld1.f32 {d4-d7}, [%4] \n"// r3
"vmla.f32 q6, q11, d1[1] \n"
"vmla.f32 q7, q12, d2[0] \n"
"pld [%8, #512] \n"
"vldm %8!, {d16-d23} \n"
"vmla.f32 q4, q13, d2[1] \n"
"vmla.f32 q5, q14, d3[0] \n"
"pld [%8, #384] \n"
"vldm %8!, {d24-d29} \n"
"vmla.f32 q6, q8, d4[0] \n"
"vmla.f32 q7, q9, d4[1] \n"
"vmla.f32 q4, q10, d5[0] \n"
"pld [%5, #256] \n"
"vld1.f32 {d0-d3}, [%5] \n"// r4
"vmla.f32 q5, q11, d5[1] \n"
"vmla.f32 q6, q12, d6[0] \n"
"pld [%8, #512] \n"
"vldm %8!, {d16-d23} \n"
"vmla.f32 q7, q13, d6[1] \n"
"vmla.f32 q4, q14, d7[0] \n"
"pld [%8, #384] \n"
"vldm %8!, {d24-d29} \n"
"vmla.f32 q5, q8, d0[0] \n"
"vmla.f32 q6, q9, d0[1] \n"
"vmla.f32 q7, q10, d1[0] \n"
"pld [%6, #256] \n"
"vld1.f32 {d4-d7}, [%6] \n"// r5
"vmla.f32 q4, q11, d1[1] \n"
"vmla.f32 q5, q12, d2[0] \n"
"pld [%8, #512] \n"
"vldm %8!, {d16-d23} \n"
"vmla.f32 q6, q13, d2[1] \n"
"vmla.f32 q7, q14, d3[0] \n"
"pld [%8, #384] \n"
"vldm %8!, {d24-d29} \n"
"vmla.f32 q4, q8, d4[0] \n"
"vmla.f32 q5, q9, d4[1] \n"
"vmla.f32 q6, q10, d5[0] \n"
"pld [%7, #256] \n"
"vld1.f32 {d0-d3}, [%7] \n"// r6
"vmla.f32 q7, q11, d5[1] \n"
"vmla.f32 q4, q12, d6[0] \n"
"pld [%8, #512] \n"
"vldm %8!, {d16-d23} \n"
"vmla.f32 q5, q13, d6[1] \n"
"vmla.f32 q6, q14, d7[0] \n"
"pld [%8, #384] \n"
"vldm %8!, {d24-d29} \n"
"vmla.f32 q7, q8, d0[0] \n"
"vmla.f32 q4, q9, d0[1] \n"
"add %1, %1, #8 \n"
"add %2, %2, #8 \n"
"vmla.f32 q5, q10, d1[0] \n"
"vmla.f32 q6, q11, d1[1] \n"
"sub %8, %8, #784 \n"
"vmla.f32 q7, q12, d2[0] \n"
"vmla.f32 q4, q13, d2[1] \n"
"vmla.f32 q5, q14, d3[0] \n"
"add %3, %3, #8 \n"
"add %4, %4, #8 \n"
"vadd.f32 q6, q6, q7 \n"
"add %5, %5, #8 \n"
"vadd.f32 q4, q4, q5 \n"
"add %6, %6, #8 \n"
"vadd.f32 q4, q4, q6 \n"
"add %7, %7, #8 \n"
"vst1.f32 {d8-d9}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(r5), // %6
"=r"(r6), // %7
"=r"(kptr) // %8
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(r5),
"7"(r6),
"8"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14"
);
#endif // __aarch64__
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
r5 += tailstep;
r6 += tailstep;
}
}
}
}
|
resample.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% RRRR EEEEE SSSSS AAA M M PPPP L EEEEE %
% R R E SS A A MM MM P P L E %
% RRRR EEE SSS AAAAA M M M PPPP L EEE %
% R R E SS A A M M P L E %
% R R EEEEE SSSSS A A M M P LLLLL EEEEE %
% %
% %
% MagickCore Pixel Resampling Methods %
% %
% Software Design %
% Cristy %
% Anthony Thyssen %
% August 2007 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/color-private.h"
#include "magick/cache.h"
#include "magick/draw.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/log.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/pixel.h"
#include "magick/pixel-private.h"
#include "magick/quantum.h"
#include "magick/random_.h"
#include "magick/resample.h"
#include "magick/resize.h"
#include "magick/resize-private.h"
#include "magick/resource_.h"
#include "magick/transform.h"
#include "magick/signature-private.h"
#include "magick/token.h"
#include "magick/utility.h"
#include "magick/option.h"
/*
EWA Resampling Options
*/
/* select ONE resampling method */
#define EWA 1 /* Normal EWA handling - raw or clamped */
/* if 0 then use "High Quality EWA" */
#define EWA_CLAMP 1 /* EWA Clamping from Nicolas Robidoux */
#define FILTER_LUT 1 /* Use a LUT rather then direct filter calls */
/* output debugging information */
#define DEBUG_ELLIPSE 0 /* output ellipse info for debug */
#define DEBUG_HIT_MISS 0 /* output hit/miss pixels (as gnuplot commands) */
#define DEBUG_NO_PIXEL_HIT 0 /* Make pixels that fail to hit anything - RED */
#if ! FILTER_DIRECT
#define WLUT_WIDTH 1024 /* size of the filter cache */
#endif
/*
Typedef declarations.
*/
struct _ResampleFilter
{
CacheView
*view;
Image
*image;
ExceptionInfo
*exception;
MagickBooleanType
debug;
/* Information about image being resampled */
ssize_t
image_area;
InterpolatePixelMethod
interpolate;
VirtualPixelMethod
virtual_pixel;
FilterTypes
filter;
/* processing settings needed */
MagickBooleanType
limit_reached,
do_interpolate,
average_defined;
MagickPixelPacket
average_pixel;
/* current ellipitical area being resampled around center point */
double
A, B, C,
Vlimit, Ulimit, Uwidth, slope;
#if FILTER_LUT
/* LUT of weights for filtered average in elliptical area */
double
filter_lut[WLUT_WIDTH];
#else
/* Use a Direct call to the filter functions */
ResizeFilter
*filter_def;
double
F;
#endif
/* the practical working support of the filter */
double
support;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e R e s a m p l e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireResampleFilter() initializes the information resample needs do to a
% scaled lookup of a color from an image, using area sampling.
%
% The algorithm is based on a Elliptical Weighted Average, where the pixels
% found in a large elliptical area is averaged together according to a
% weighting (filter) function. For more details see "Fundamentals of Texture
% Mapping and Image Warping" a master's thesis by Paul.S.Heckbert, June 17,
% 1989. Available for free from, http://www.cs.cmu.edu/~ph/
%
% As EWA resampling (or any sort of resampling) can require a lot of
% calculations to produce a distorted scaling of the source image for each
% output pixel, the ResampleFilter structure generated holds that information
% between individual image resampling.
%
% This function will make the appropriate AcquireVirtualCacheView() calls
% to view the image, calling functions do not need to open a cache view.
%
% Usage Example...
% resample_filter=AcquireResampleFilter(image,exception);
% SetResampleFilter(resample_filter, GaussianFilter, 1.0);
% for (y=0; y < (ssize_t) image->rows; y++) {
% for (x=0; x < (ssize_t) image->columns; x++) {
% u= ....; v= ....;
% ScaleResampleFilter(resample_filter, ... scaling vectors ...);
% (void) ResamplePixelColor(resample_filter,u,v,&pixel);
% ... assign resampled pixel value ...
% }
% }
% DestroyResampleFilter(resample_filter);
%
% The format of the AcquireResampleFilter method is:
%
% ResampleFilter *AcquireResampleFilter(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ResampleFilter *AcquireResampleFilter(const Image *image,
ExceptionInfo *exception)
{
register ResampleFilter
*resample_filter;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
resample_filter=(ResampleFilter *) AcquireQuantumMemory(1,
sizeof(*resample_filter));
if (resample_filter == (ResampleFilter *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(resample_filter,0,sizeof(*resample_filter));
resample_filter->exception=exception;
resample_filter->image=ReferenceImage((Image *) image);
resample_filter->view=AcquireVirtualCacheView(resample_filter->image,exception);
resample_filter->debug=IsEventLogging();
resample_filter->signature=MagickCoreSignature;
resample_filter->image_area=(ssize_t) (image->columns*image->rows);
resample_filter->average_defined = MagickFalse;
/* initialise the resampling filter settings */
SetResampleFilter(resample_filter, image->filter, image->blur);
(void) SetResampleFilterInterpolateMethod(resample_filter,
image->interpolate);
(void) SetResampleFilterVirtualPixelMethod(resample_filter,
GetImageVirtualPixelMethod(image));
return(resample_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y R e s a m p l e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyResampleFilter() finalizes and cleans up the resampling
% resample_filter as returned by AcquireResampleFilter(), freeing any memory
% or other information as needed.
%
% The format of the DestroyResampleFilter method is:
%
% ResampleFilter *DestroyResampleFilter(ResampleFilter *resample_filter)
%
% A description of each parameter follows:
%
% o resample_filter: resampling information structure
%
*/
MagickExport ResampleFilter *DestroyResampleFilter(
ResampleFilter *resample_filter)
{
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
assert(resample_filter->image != (Image *) NULL);
if (resample_filter->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
resample_filter->image->filename);
resample_filter->view=DestroyCacheView(resample_filter->view);
resample_filter->image=DestroyImage(resample_filter->image);
#if ! FILTER_LUT
resample_filter->filter_def=DestroyResizeFilter(resample_filter->filter_def);
#endif
resample_filter->signature=(~MagickCoreSignature);
resample_filter=(ResampleFilter *) RelinquishMagickMemory(resample_filter);
return(resample_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s a m p l e P i x e l C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResamplePixelColor() samples the pixel values surrounding the location
% given using an elliptical weighted average, at the scale previously
% calculated, and in the most efficent manner possible for the
% VirtualPixelMethod setting.
%
% The format of the ResamplePixelColor method is:
%
% MagickBooleanType ResamplePixelColor(ResampleFilter *resample_filter,
% const double u0,const double v0,MagickPixelPacket *pixel)
%
% A description of each parameter follows:
%
% o resample_filter: the resample filter.
%
% o u0,v0: A double representing the center of the area to resample,
% The distortion transformed transformed x,y coordinate.
%
% o pixel: the resampled pixel is returned here.
%
*/
MagickExport MagickBooleanType ResamplePixelColor(
ResampleFilter *resample_filter,const double u0,const double v0,
MagickPixelPacket *pixel)
{
MagickBooleanType
status;
ssize_t u,v, v1, v2, uw, hit;
double u1;
double U,V,Q,DQ,DDQ;
double divisor_c,divisor_m;
register double weight;
register const PixelPacket *pixels;
register const IndexPacket *indexes;
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
status=MagickTrue;
/* GetMagickPixelPacket(resample_filter->image,pixel); */
if ( resample_filter->do_interpolate ) {
status=InterpolateMagickPixelPacket(resample_filter->image,
resample_filter->view,resample_filter->interpolate,u0,v0,pixel,
resample_filter->exception);
return(status);
}
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "u0=%lf; v0=%lf;\n", u0, v0);
#endif
/*
Does resample area Miss the image Proper?
If and that area a simple solid color - then simply return that color!
This saves a lot of calculation when resampling outside the bounds of
the source image.
However it probably should be expanded to image bounds plus the filters
scaled support size.
*/
hit = 0;
switch ( resample_filter->virtual_pixel ) {
case BackgroundVirtualPixelMethod:
case ConstantVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case WhiteVirtualPixelMethod:
case MaskVirtualPixelMethod:
if ( resample_filter->limit_reached
|| u0 + resample_filter->Ulimit < 0.0
|| u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0
|| v0 + resample_filter->Vlimit < 0.0
|| v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0
)
hit++;
break;
case UndefinedVirtualPixelMethod:
case EdgeVirtualPixelMethod:
if ( ( u0 + resample_filter->Ulimit < 0.0 && v0 + resample_filter->Vlimit < 0.0 )
|| ( u0 + resample_filter->Ulimit < 0.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0
&& v0 + resample_filter->Vlimit < 0.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 )
)
hit++;
break;
case HorizontalTileVirtualPixelMethod:
if ( v0 + resample_filter->Vlimit < 0.0
|| v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0
)
hit++; /* outside the horizontally tiled images. */
break;
case VerticalTileVirtualPixelMethod:
if ( u0 + resample_filter->Ulimit < 0.0
|| u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0
)
hit++; /* outside the vertically tiled images. */
break;
case DitherVirtualPixelMethod:
if ( ( u0 + resample_filter->Ulimit < -32.0 && v0 + resample_filter->Vlimit < -32.0 )
|| ( u0 + resample_filter->Ulimit < -32.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+31.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+31.0
&& v0 + resample_filter->Vlimit < -32.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+31.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+31.0 )
)
hit++;
break;
case TileVirtualPixelMethod:
case MirrorVirtualPixelMethod:
case RandomVirtualPixelMethod:
case HorizontalTileEdgeVirtualPixelMethod:
case VerticalTileEdgeVirtualPixelMethod:
case CheckerTileVirtualPixelMethod:
/* resampling of area is always needed - no VP limits */
break;
}
if ( hit ) {
/* The area being resampled is simply a solid color
* just return a single lookup color.
*
* Should this return the users requested interpolated color?
*/
status=InterpolateMagickPixelPacket(resample_filter->image,
resample_filter->view,IntegerInterpolatePixel,u0,v0,pixel,
resample_filter->exception);
return(status);
}
/*
When Scaling limits reached, return an 'averaged' result.
*/
if ( resample_filter->limit_reached ) {
switch ( resample_filter->virtual_pixel ) {
/* This is always handled by the above, so no need.
case BackgroundVirtualPixelMethod:
case ConstantVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case GrayVirtualPixelMethod,
case WhiteVirtualPixelMethod
case MaskVirtualPixelMethod:
*/
case UndefinedVirtualPixelMethod:
case EdgeVirtualPixelMethod:
case DitherVirtualPixelMethod:
case HorizontalTileEdgeVirtualPixelMethod:
case VerticalTileEdgeVirtualPixelMethod:
/* We need an average edge pixel, from the correct edge!
How should I calculate an average edge color?
Just returning an averaged neighbourhood,
works well in general, but falls down for TileEdge methods.
This needs to be done properly!!!!!!
*/
status=InterpolateMagickPixelPacket(resample_filter->image,
resample_filter->view,AverageInterpolatePixel,u0,v0,pixel,
resample_filter->exception);
break;
case HorizontalTileVirtualPixelMethod:
case VerticalTileVirtualPixelMethod:
/* just return the background pixel - Is there a better way? */
status=InterpolateMagickPixelPacket(resample_filter->image,
resample_filter->view,IntegerInterpolatePixel,-1.0,-1.0,pixel,
resample_filter->exception);
break;
case TileVirtualPixelMethod:
case MirrorVirtualPixelMethod:
case RandomVirtualPixelMethod:
case CheckerTileVirtualPixelMethod:
default:
/* generate a average color of the WHOLE image */
if ( resample_filter->average_defined == MagickFalse ) {
Image
*average_image;
CacheView
*average_view;
GetMagickPixelPacket(resample_filter->image,(MagickPixelPacket *)
&resample_filter->average_pixel);
resample_filter->average_defined=MagickTrue;
/* Try to get an averaged pixel color of whole image */
average_image=ResizeImage(resample_filter->image,1,1,BoxFilter,1.0,
resample_filter->exception);
if (average_image == (Image *) NULL)
{
*pixel=resample_filter->average_pixel; /* FAILED */
break;
}
average_view=AcquireVirtualCacheView(average_image,
&average_image->exception);
pixels=(PixelPacket *)GetCacheViewVirtualPixels(average_view,0,0,1,1,
resample_filter->exception);
if (pixels == (const PixelPacket *) NULL) {
average_view=DestroyCacheView(average_view);
average_image=DestroyImage(average_image);
*pixel=resample_filter->average_pixel; /* FAILED */
break;
}
indexes=(IndexPacket *) GetCacheViewAuthenticIndexQueue(average_view);
SetMagickPixelPacket(resample_filter->image,pixels,indexes,
&(resample_filter->average_pixel));
average_view=DestroyCacheView(average_view);
average_image=DestroyImage(average_image);
if ( resample_filter->virtual_pixel == CheckerTileVirtualPixelMethod )
{
/* CheckerTile is a alpha blend of the image's average pixel
color and the current background color */
/* image's average pixel color */
weight = QuantumScale*((MagickRealType)(QuantumRange-
resample_filter->average_pixel.opacity));
resample_filter->average_pixel.red *= weight;
resample_filter->average_pixel.green *= weight;
resample_filter->average_pixel.blue *= weight;
divisor_c = weight;
/* background color */
weight = QuantumScale*((MagickRealType)(QuantumRange-
resample_filter->image->background_color.opacity));
resample_filter->average_pixel.red +=
weight*resample_filter->image->background_color.red;
resample_filter->average_pixel.green +=
weight*resample_filter->image->background_color.green;
resample_filter->average_pixel.blue +=
weight*resample_filter->image->background_color.blue;
resample_filter->average_pixel.opacity +=
resample_filter->image->background_color.opacity;
divisor_c += weight;
/* alpha blend */
resample_filter->average_pixel.red /= divisor_c;
resample_filter->average_pixel.green /= divisor_c;
resample_filter->average_pixel.blue /= divisor_c;
resample_filter->average_pixel.opacity /= 2; /* 50% blend */
}
}
*pixel=resample_filter->average_pixel;
break;
}
return(status);
}
/*
Initialize weighted average data collection
*/
hit = 0;
divisor_c = 0.0;
divisor_m = 0.0;
pixel->red = pixel->green = pixel->blue = 0.0;
if (pixel->matte != MagickFalse) pixel->opacity = 0.0;
if (pixel->colorspace == CMYKColorspace) pixel->index = 0.0;
/*
Determine the parellelogram bounding box fitted to the ellipse
centered at u0,v0. This area is bounding by the lines...
*/
v1 = (ssize_t)ceil(v0 - resample_filter->Vlimit); /* range of scan lines */
v2 = (ssize_t)floor(v0 + resample_filter->Vlimit);
/* scan line start and width accross the parallelogram */
u1 = u0 + (v1-v0)*resample_filter->slope - resample_filter->Uwidth;
uw = (ssize_t)(2.0*resample_filter->Uwidth)+1;
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "v1=%ld; v2=%ld\n", (long)v1, (long)v2);
(void) FormatLocaleFile(stderr, "u1=%ld; uw=%ld\n", (long)u1, (long)uw);
#else
# define DEBUG_HIT_MISS 0 /* only valid if DEBUG_ELLIPSE is enabled */
#endif
/*
Do weighted resampling of all pixels, within the scaled ellipse,
bound by a Parellelogram fitted to the ellipse.
*/
DDQ = 2*resample_filter->A;
for( v=v1; v<=v2; v++ ) {
#if DEBUG_HIT_MISS
long uu = ceil(u1); /* actual pixel location (for debug only) */
(void) FormatLocaleFile(stderr, "# scan line from pixel %ld, %ld\n", (long)uu, (long)v);
#endif
u = (ssize_t)ceil(u1); /* first pixel in scanline */
u1 += resample_filter->slope; /* start of next scan line */
/* location of this first pixel, relative to u0,v0 */
U = (double)u-u0;
V = (double)v-v0;
/* Q = ellipse quotent ( if Q<F then pixel is inside ellipse) */
Q = (resample_filter->A*U + resample_filter->B*V)*U + resample_filter->C*V*V;
DQ = resample_filter->A*(2.0*U+1) + resample_filter->B*V;
/* get the scanline of pixels for this v */
pixels=GetCacheViewVirtualPixels(resample_filter->view,u,v,(size_t) uw,
1,resample_filter->exception);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
indexes=GetCacheViewVirtualIndexQueue(resample_filter->view);
/* count up the weighted pixel colors */
for( u=0; u<uw; u++ ) {
weight = 0;
#if FILTER_LUT
/* Note that the ellipse has been pre-scaled so F = WLUT_WIDTH */
if ( Q < (double)WLUT_WIDTH ) {
weight = resample_filter->filter_lut[(int)Q];
#else
/* Note that the ellipse has been pre-scaled so F = support^2 */
if ( Q < (double)resample_filter->F ) {
weight = GetResizeFilterWeight(resample_filter->filter_def,
sqrt(Q)); /* a SquareRoot! Arrggghhhhh... */
#endif
if (pixel->matte != MagickFalse)
pixel->opacity += weight*pixels->opacity;
divisor_m += weight;
if (pixel->matte != MagickFalse)
weight *= QuantumScale*((MagickRealType)(QuantumRange-pixels->opacity));
pixel->red += weight*pixels->red;
pixel->green += weight*pixels->green;
pixel->blue += weight*pixels->blue;
if (pixel->colorspace == CMYKColorspace)
pixel->index += weight*(*indexes);
divisor_c += weight;
hit++;
#if DEBUG_HIT_MISS
/* mark the pixel according to hit/miss of the ellipse */
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n",
(long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1);
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n",
(long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1);
} else {
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n",
(long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1);
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n",
(long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1);
}
uu++;
#else
}
#endif
pixels++;
indexes++;
Q += DQ;
DQ += DDQ;
}
}
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "Hit=%ld; Total=%ld;\n", (long)hit, (long)uw*(v2-v1) );
#endif
/*
Result sanity check -- this should NOT happen
*/
if ( hit == 0 || divisor_m <= MagickEpsilon || divisor_c <= MagickEpsilon ) {
/* not enough pixels, or bad weighting in resampling,
resort to direct interpolation */
#if DEBUG_NO_PIXEL_HIT
pixel->opacity = pixel->red = pixel->green = pixel->blue = 0;
pixel->red = QuantumRange; /* show pixels for which EWA fails */
#else
status=InterpolateMagickPixelPacket(resample_filter->image,
resample_filter->view,resample_filter->interpolate,u0,v0,pixel,
resample_filter->exception);
#endif
return status;
}
/*
Finialize results of resampling
*/
divisor_m = 1.0/divisor_m;
if (pixel->matte != MagickFalse)
pixel->opacity = (MagickRealType) ClampToQuantum(divisor_m*pixel->opacity);
divisor_c = 1.0/divisor_c;
pixel->red = (MagickRealType) ClampToQuantum(divisor_c*pixel->red);
pixel->green = (MagickRealType) ClampToQuantum(divisor_c*pixel->green);
pixel->blue = (MagickRealType) ClampToQuantum(divisor_c*pixel->blue);
if (pixel->colorspace == CMYKColorspace)
pixel->index = (MagickRealType) ClampToQuantum(divisor_c*pixel->index);
return(MagickTrue);
}
#if EWA && EWA_CLAMP
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
- C l a m p U p A x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClampUpAxes() function converts the input vectors into a major and
% minor axis unit vectors, and their magnitude. This allows us to
% ensure that the ellipse generated is never smaller than the unit
% circle and thus never too small for use in EWA resampling.
%
% This purely mathematical 'magic' was provided by Professor Nicolas
% Robidoux and his Masters student Chantal Racette.
%
% Reference: "We Recommend Singular Value Decomposition", David Austin
% http://www.ams.org/samplings/feature-column/fcarc-svd
%
% By generating major and minor axis vectors, we can actually use the
% ellipse in its "canonical form", by remapping the dx,dy of the
% sampled point into distances along the major and minor axis unit
% vectors.
%
% Reference: http://en.wikipedia.org/wiki/Ellipse#Canonical_form
*/
static inline void ClampUpAxes(const double dux,
const double dvx,
const double duy,
const double dvy,
double *major_mag,
double *minor_mag,
double *major_unit_x,
double *major_unit_y,
double *minor_unit_x,
double *minor_unit_y)
{
/*
* ClampUpAxes takes an input 2x2 matrix
*
* [ a b ] = [ dux duy ]
* [ c d ] = [ dvx dvy ]
*
* and computes from it the major and minor axis vectors [major_x,
* major_y] and [minor_x,minor_y] of the smallest ellipse containing
* both the unit disk and the ellipse which is the image of the unit
* disk by the linear transformation
*
* [ dux duy ] [S] = [s]
* [ dvx dvy ] [T] = [t]
*
* (The vector [S,T] is the difference between a position in output
* space and [X,Y]; the vector [s,t] is the difference between a
* position in input space and [x,y].)
*/
/*
* Output:
*
* major_mag is the half-length of the major axis of the "new"
* ellipse.
*
* minor_mag is the half-length of the minor axis of the "new"
* ellipse.
*
* major_unit_x is the x-coordinate of the major axis direction vector
* of both the "old" and "new" ellipses.
*
* major_unit_y is the y-coordinate of the major axis direction vector.
*
* minor_unit_x is the x-coordinate of the minor axis direction vector.
*
* minor_unit_y is the y-coordinate of the minor axis direction vector.
*
* Unit vectors are useful for computing projections, in particular,
* to compute the distance between a point in output space and the
* center of a unit disk in output space, using the position of the
* corresponding point [s,t] in input space. Following the clamping,
* the square of this distance is
*
* ( ( s * major_unit_x + t * major_unit_y ) / major_mag )^2
* +
* ( ( s * minor_unit_x + t * minor_unit_y ) / minor_mag )^2
*
* If such distances will be computed for many [s,t]'s, it makes
* sense to actually compute the reciprocal of major_mag and
* minor_mag and multiply them by the above unit lengths.
*
* Now, if you want to modify the input pair of tangent vectors so
* that it defines the modified ellipse, all you have to do is set
*
* newdux = major_mag * major_unit_x
* newdvx = major_mag * major_unit_y
* newduy = minor_mag * minor_unit_x = minor_mag * -major_unit_y
* newdvy = minor_mag * minor_unit_y = minor_mag * major_unit_x
*
* and use these tangent vectors as if they were the original ones.
* Usually, this is a drastic change in the tangent vectors even if
* the singular values are not clamped; for example, the minor axis
* vector always points in a direction which is 90 degrees
* counterclockwise from the direction of the major axis vector.
*/
/*
* Discussion:
*
* GOAL: Fix things so that the pullback, in input space, of a disk
* of radius r in output space is an ellipse which contains, at
* least, a disc of radius r. (Make this hold for any r>0.)
*
* ESSENCE OF THE METHOD: Compute the product of the first two
* factors of an SVD of the linear transformation defining the
* ellipse and make sure that both its columns have norm at least 1.
* Because rotations and reflexions map disks to themselves, it is
* not necessary to compute the third (rightmost) factor of the SVD.
*
* DETAILS: Find the singular values and (unit) left singular
* vectors of Jinv, clampling up the singular values to 1, and
* multiply the unit left singular vectors by the new singular
* values in order to get the minor and major ellipse axis vectors.
*
* Image resampling context:
*
* The Jacobian matrix of the transformation at the output point
* under consideration is defined as follows:
*
* Consider the transformation (x,y) -> (X,Y) from input locations
* to output locations. (Anthony Thyssen, elsewhere in resample.c,
* uses the notation (u,v) -> (x,y).)
*
* The Jacobian matrix of the transformation at (x,y) is equal to
*
* J = [ A, B ] = [ dX/dx, dX/dy ]
* [ C, D ] [ dY/dx, dY/dy ]
*
* that is, the vector [A,C] is the tangent vector corresponding to
* input changes in the horizontal direction, and the vector [B,D]
* is the tangent vector corresponding to input changes in the
* vertical direction.
*
* In the context of resampling, it is natural to use the inverse
* Jacobian matrix Jinv because resampling is generally performed by
* pulling pixel locations in the output image back to locations in
* the input image. Jinv is
*
* Jinv = [ a, b ] = [ dx/dX, dx/dY ]
* [ c, d ] [ dy/dX, dy/dY ]
*
* Note: Jinv can be computed from J with the following matrix
* formula:
*
* Jinv = 1/(A*D-B*C) [ D, -B ]
* [ -C, A ]
*
* What we do is modify Jinv so that it generates an ellipse which
* is as close as possible to the original but which contains the
* unit disk. This can be accomplished as follows:
*
* Let
*
* Jinv = U Sigma V^T
*
* be an SVD decomposition of Jinv. (The SVD is not unique, but the
* final ellipse does not depend on the particular SVD.)
*
* We could clamp up the entries of the diagonal matrix Sigma so
* that they are at least 1, and then set
*
* Jinv = U newSigma V^T.
*
* However, we do not need to compute V for the following reason:
* V^T is an orthogonal matrix (that is, it represents a combination
* of rotations and reflexions) so that it maps the unit circle to
* itself. For this reason, the exact value of V does not affect the
* final ellipse, and we can choose V to be the identity
* matrix. This gives
*
* Jinv = U newSigma.
*
* In the end, we return the two diagonal entries of newSigma
* together with the two columns of U.
*/
/*
* ClampUpAxes was written by Nicolas Robidoux and Chantal Racette
* of Laurentian University with insightful suggestions from Anthony
* Thyssen and funding from the National Science and Engineering
* Research Council of Canada. It is distinguished from its
* predecessors by its efficient handling of degenerate cases.
*
* The idea of clamping up the EWA ellipse's major and minor axes so
* that the result contains the reconstruction kernel filter support
* is taken from Andreas Gustaffson's Masters thesis "Interactive
* Image Warping", Helsinki University of Technology, Faculty of
* Information Technology, 59 pages, 1993 (see Section 3.6).
*
* The use of the SVD to clamp up the singular values of the
* Jacobian matrix of the pullback transformation for EWA resampling
* is taken from the astrophysicist Craig DeForest. It is
* implemented in his PDL::Transform code (PDL = Perl Data
* Language).
*/
const double a = dux;
const double b = duy;
const double c = dvx;
const double d = dvy;
/*
* n is the matrix Jinv * transpose(Jinv). Eigenvalues of n are the
* squares of the singular values of Jinv.
*/
const double aa = a*a;
const double bb = b*b;
const double cc = c*c;
const double dd = d*d;
/*
* Eigenvectors of n are left singular vectors of Jinv.
*/
const double n11 = aa+bb;
const double n12 = a*c+b*d;
const double n21 = n12;
const double n22 = cc+dd;
const double det = a*d-b*c;
const double twice_det = det+det;
const double frobenius_squared = n11+n22;
const double discriminant =
(frobenius_squared+twice_det)*(frobenius_squared-twice_det);
/*
* In exact arithmetic, discriminant can't be negative. In floating
* point, it can, because of the bad conditioning of SVD
* decompositions done through the associated normal matrix.
*/
const double sqrt_discriminant =
sqrt(discriminant > 0.0 ? discriminant : 0.0);
/*
* s1 is the largest singular value of the inverse Jacobian
* matrix. In other words, its reciprocal is the smallest singular
* value of the Jacobian matrix itself.
* If s1 = 0, both singular values are 0, and any orthogonal pair of
* left and right factors produces a singular decomposition of Jinv.
*/
/*
* Initially, we only compute the squares of the singular values.
*/
const double s1s1 = 0.5*(frobenius_squared+sqrt_discriminant);
/*
* s2 the smallest singular value of the inverse Jacobian
* matrix. Its reciprocal is the largest singular value of the
* Jacobian matrix itself.
*/
const double s2s2 = 0.5*(frobenius_squared-sqrt_discriminant);
const double s1s1minusn11 = s1s1-n11;
const double s1s1minusn22 = s1s1-n22;
/*
* u1, the first column of the U factor of a singular decomposition
* of Jinv, is a (non-normalized) left singular vector corresponding
* to s1. It has entries u11 and u21. We compute u1 from the fact
* that it is an eigenvector of n corresponding to the eigenvalue
* s1^2.
*/
const double s1s1minusn11_squared = s1s1minusn11*s1s1minusn11;
const double s1s1minusn22_squared = s1s1minusn22*s1s1minusn22;
/*
* The following selects the largest row of n-s1^2 I as the one
* which is used to find the eigenvector. If both s1^2-n11 and
* s1^2-n22 are zero, n-s1^2 I is the zero matrix. In that case,
* any vector is an eigenvector; in addition, norm below is equal to
* zero, and, in exact arithmetic, this is the only case in which
* norm = 0. So, setting u1 to the simple but arbitrary vector [1,0]
* if norm = 0 safely takes care of all cases.
*/
const double temp_u11 =
( (s1s1minusn11_squared>=s1s1minusn22_squared) ? n12 : s1s1minusn22 );
const double temp_u21 =
( (s1s1minusn11_squared>=s1s1minusn22_squared) ? s1s1minusn11 : n21 );
const double norm = sqrt(temp_u11*temp_u11+temp_u21*temp_u21);
/*
* Finalize the entries of first left singular vector (associated
* with the largest singular value).
*/
const double u11 = ( (norm>0.0) ? temp_u11/norm : 1.0 );
const double u21 = ( (norm>0.0) ? temp_u21/norm : 0.0 );
/*
* Clamp the singular values up to 1.
*/
*major_mag = ( (s1s1<=1.0) ? 1.0 : sqrt(s1s1) );
*minor_mag = ( (s2s2<=1.0) ? 1.0 : sqrt(s2s2) );
/*
* Return the unit major and minor axis direction vectors.
*/
*major_unit_x = u11;
*major_unit_y = u21;
*minor_unit_x = -u21;
*minor_unit_y = u11;
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e R e s a m p l e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleResampleFilter() does all the calculations needed to resample an image
% at a specific scale, defined by two scaling vectors. This not using
% a orthogonal scaling, but two distorted scaling vectors, to allow the
% generation of a angled ellipse.
%
% As only two deritive scaling vectors are used the center of the ellipse
% must be the center of the lookup. That is any curvature that the
% distortion may produce is discounted.
%
% The input vectors are produced by either finding the derivitives of the
% distortion function, or the partial derivitives from a distortion mapping.
% They do not need to be the orthogonal dx,dy scaling vectors, but can be
% calculated from other derivatives. For example you could use dr,da/r
% polar coordinate vector scaling vectors
%
% If u,v = DistortEquation(x,y) OR u = Fu(x,y); v = Fv(x,y)
% Then the scaling vectors are determined from the deritives...
% du/dx, dv/dx and du/dy, dv/dy
% If the resulting scaling vectors is othogonally aligned then...
% dv/dx = 0 and du/dy = 0
% Producing an othogonally alligned ellipse in source space for the area to
% be resampled.
%
% Note that scaling vectors are different to argument order. Argument order
% is the general order the deritives are extracted from the distortion
% equations, and not the scaling vectors. As such the middle two vaules
% may be swapped from what you expect. Caution is advised.
%
% WARNING: It is assumed that any SetResampleFilter() method call will
% always be performed before the ScaleResampleFilter() method, so that the
% size of the ellipse will match the support for the resampling filter being
% used.
%
% The format of the ScaleResampleFilter method is:
%
% void ScaleResampleFilter(const ResampleFilter *resample_filter,
% const double dux,const double duy,const double dvx,const double dvy)
%
% A description of each parameter follows:
%
% o resample_filter: the resampling resample_filterrmation defining the
% image being resampled
%
% o dux,duy,dvx,dvy:
% The deritives or scaling vectors defining the EWA ellipse.
% NOTE: watch the order, which is based on the order deritives
% are usally determined from distortion equations (see above).
% The middle two values may need to be swapped if you are thinking
% in terms of scaling vectors.
%
*/
MagickExport void ScaleResampleFilter(ResampleFilter *resample_filter,
const double dux,const double duy,const double dvx,const double dvy)
{
double A,B,C,F;
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
resample_filter->limit_reached = MagickFalse;
/* A 'point' filter forces use of interpolation instead of area sampling */
if ( resample_filter->filter == PointFilter )
return; /* EWA turned off - nothing to do */
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "# -----\n" );
(void) FormatLocaleFile(stderr, "dux=%lf; dvx=%lf; duy=%lf; dvy=%lf;\n",
dux, dvx, duy, dvy);
#endif
/* Find Ellipse Coefficents such that
A*u^2 + B*u*v + C*v^2 = F
With u,v relative to point around which we are resampling.
And the given scaling dx,dy vectors in u,v space
du/dx,dv/dx and du/dy,dv/dy
*/
#if EWA
/* Direct conversion of derivatives into elliptical coefficients
However when magnifying images, the scaling vectors will be small
resulting in a ellipse that is too small to sample properly.
As such we need to clamp the major/minor axis to a minumum of 1.0
to prevent it getting too small.
*/
#if EWA_CLAMP
{ double major_mag,
minor_mag,
major_x,
major_y,
minor_x,
minor_y;
ClampUpAxes(dux,dvx,duy,dvy, &major_mag, &minor_mag,
&major_x, &major_y, &minor_x, &minor_y);
major_x *= major_mag; major_y *= major_mag;
minor_x *= minor_mag; minor_y *= minor_mag;
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "major_x=%lf; major_y=%lf; minor_x=%lf; minor_y=%lf;\n",
major_x, major_y, minor_x, minor_y);
#endif
A = major_y*major_y+minor_y*minor_y;
B = -2.0*(major_x*major_y+minor_x*minor_y);
C = major_x*major_x+minor_x*minor_x;
F = major_mag*minor_mag;
F *= F; /* square it */
}
#else /* raw unclamped EWA */
A = dvx*dvx+dvy*dvy;
B = -2.0*(dux*dvx+duy*dvy);
C = dux*dux+duy*duy;
F = dux*dvy-duy*dvx;
F *= F; /* square it */
#endif /* EWA_CLAMP */
#else /* HQ_EWA */
/*
This Paul Heckbert's "Higher Quality EWA" formula, from page 60 in his
thesis, which adds a unit circle to the elliptical area so as to do both
Reconstruction and Prefiltering of the pixels in the resampling. It also
means it is always likely to have at least 4 pixels within the area of the
ellipse, for weighted averaging. No scaling will result with F == 4.0 and
a circle of radius 2.0, and F smaller than this means magnification is
being used.
NOTE: This method produces a very blury result at near unity scale while
producing perfect results for strong minitification and magnifications.
However filter support is fixed to 2.0 (no good for Windowed Sinc filters)
*/
A = dvx*dvx+dvy*dvy+1;
B = -2.0*(dux*dvx+duy*dvy);
C = dux*dux+duy*duy+1;
F = A*C - B*B/4;
#endif
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "A=%lf; B=%lf; C=%lf; F=%lf\n", A,B,C,F);
/* Figure out the various information directly about the ellipse.
This information currently not needed at this time, but may be
needed later for better limit determination.
It is also good to have as a record for future debugging
*/
{ double alpha, beta, gamma, Major, Minor;
double Eccentricity, Ellipse_Area, Ellipse_Angle;
alpha = A+C;
beta = A-C;
gamma = sqrt(beta*beta + B*B );
if ( alpha - gamma <= MagickEpsilon )
Major= MagickMaximumValue;
else
Major= sqrt(2*F/(alpha - gamma));
Minor = sqrt(2*F/(alpha + gamma));
(void) FormatLocaleFile(stderr, "# Major=%lf; Minor=%lf\n", Major, Minor );
/* other information about ellipse include... */
Eccentricity = Major/Minor;
Ellipse_Area = MagickPI*Major*Minor;
Ellipse_Angle = atan2(B, A-C);
(void) FormatLocaleFile(stderr, "# Angle=%lf Area=%lf\n",
(double) RadiansToDegrees(Ellipse_Angle), Ellipse_Area);
}
#endif
/* If one or both of the scaling vectors is impossibly large
(producing a very large raw F value), we may as well not bother
doing any form of resampling since resampled area is very large.
In this case some alternative means of pixel sampling, such as
the average of the whole image is needed to get a reasonable
result. Calculate only as needed.
*/
if ( (4*A*C - B*B) > MagickMaximumValue ) {
resample_filter->limit_reached = MagickTrue;
return;
}
/* Scale ellipse to match the filters support
(that is, multiply F by the square of the support)
Simplier to just multiply it by the support twice!
*/
F *= resample_filter->support;
F *= resample_filter->support;
/* Orthogonal bounds of the ellipse */
resample_filter->Ulimit = sqrt(C*F/(A*C-0.25*B*B));
resample_filter->Vlimit = sqrt(A*F/(A*C-0.25*B*B));
/* Horizontally aligned parallelogram fitted to Ellipse */
resample_filter->Uwidth = sqrt(F/A); /* Half of the parallelogram width */
resample_filter->slope = -B/(2.0*A); /* Reciprocal slope of the parallelogram */
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "Ulimit=%lf; Vlimit=%lf; UWidth=%lf; Slope=%lf;\n",
resample_filter->Ulimit, resample_filter->Vlimit,
resample_filter->Uwidth, resample_filter->slope );
#endif
/* Check the absolute area of the parallelogram involved.
* This limit needs more work, as it is too slow for larger images
* with tiled views of the horizon.
*/
if ( (resample_filter->Uwidth * resample_filter->Vlimit)
> (4.0*resample_filter->image_area)) {
resample_filter->limit_reached = MagickTrue;
return;
}
/* Scale ellipse formula to directly index the Filter Lookup Table */
{ register double scale;
#if FILTER_LUT
/* scale so that F = WLUT_WIDTH; -- hardcoded */
scale = (double)WLUT_WIDTH/F;
#else
/* scale so that F = resample_filter->F (support^2) */
scale = resample_filter->F/F;
#endif
resample_filter->A = A*scale;
resample_filter->B = B*scale;
resample_filter->C = C*scale;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t R e s a m p l e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetResampleFilter() set the resampling filter lookup table based on a
% specific filter. Note that the filter is used as a radial filter not as a
% two pass othogonally aligned resampling filter.
%
% The format of the SetResampleFilter method is:
%
% void SetResampleFilter(ResampleFilter *resample_filter,
% const FilterTypes filter,const double blur)
%
% A description of each parameter follows:
%
% o resample_filter: resampling resample_filterrmation structure
%
% o filter: the resize filter for elliptical weighting LUT
%
% o blur: filter blur factor (radial scaling) for elliptical weighting LUT
%
*/
MagickExport void SetResampleFilter(ResampleFilter *resample_filter,
const FilterTypes filter,const double blur)
{
ResizeFilter
*resize_filter;
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
resample_filter->do_interpolate = MagickFalse;
resample_filter->filter = filter;
/* Default cylindrical filter is a Cubic Keys filter */
if ( filter == UndefinedFilter )
resample_filter->filter = RobidouxFilter;
if ( resample_filter->filter == PointFilter ) {
resample_filter->do_interpolate = MagickTrue;
return; /* EWA turned off - nothing more to do */
}
resize_filter = AcquireResizeFilter(resample_filter->image,
resample_filter->filter,blur,MagickTrue,resample_filter->exception);
if (resize_filter == (ResizeFilter *) NULL) {
(void) ThrowMagickException(resample_filter->exception,GetMagickModule(),
ModuleError, "UnableToSetFilteringValue",
"Fall back to Interpolated 'Point' filter");
resample_filter->filter = PointFilter;
resample_filter->do_interpolate = MagickTrue;
return; /* EWA turned off - nothing more to do */
}
/* Get the practical working support for the filter,
* after any API call blur factors have been accoded for.
*/
#if EWA
resample_filter->support = GetResizeFilterSupport(resize_filter);
#else
resample_filter->support = 2.0; /* fixed support size for HQ-EWA */
#endif
#if FILTER_LUT
/* Fill the LUT with the weights from the selected filter function */
{ register int
Q;
double
r_scale;
/* Scale radius so the filter LUT covers the full support range */
r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH);
for(Q=0; Q<WLUT_WIDTH; Q++)
resample_filter->filter_lut[Q] = (double)
GetResizeFilterWeight(resize_filter,sqrt((double)Q)*r_scale);
/* finished with the resize filter */
resize_filter = DestroyResizeFilter(resize_filter);
}
#else
/* save the filter and the scaled ellipse bounds needed for filter */
resample_filter->filter_def = resize_filter;
resample_filter->F = resample_filter->support*resample_filter->support;
#endif
/*
Adjust the scaling of the default unit circle
This assumes that any real scaling changes will always
take place AFTER the filter method has been initialized.
*/
ScaleResampleFilter(resample_filter, 1.0, 0.0, 0.0, 1.0);
#if 0
/*
This is old code kept as a reference only. Basically it generates
a Gaussian bell curve, with sigma = 0.5 if the support is 2.0
Create Normal Gaussian 2D Filter Weighted Lookup Table.
A normal EWA guassual lookup would use exp(Q*ALPHA)
where Q = distance squared from 0.0 (center) to 1.0 (edge)
and ALPHA = -4.0*ln(2.0) ==> -2.77258872223978123767
The table is of length 1024, and equates to support radius of 2.0
thus needs to be scaled by ALPHA*4/1024 and any blur factor squared
The it comes from reference code provided by Fred Weinhaus.
*/
r_scale = -2.77258872223978123767/(WLUT_WIDTH*blur*blur);
for(Q=0; Q<WLUT_WIDTH; Q++)
resample_filter->filter_lut[Q] = exp((double)Q*r_scale);
resample_filter->support = WLUT_WIDTH;
#endif
#if FILTER_LUT
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp single
#endif
{
if (IsMagickTrue(GetImageArtifact(resample_filter->image,
"resample:verbose")) )
{
register int
Q;
double
r_scale;
/* Debug output of the filter weighting LUT
Gnuplot the LUT data, the x scale index has been adjusted
plot [0:2][-.2:1] "lut.dat" with lines
The filter values should be normalized for comparision
*/
printf("#\n");
printf("# Resampling Filter LUT (%d values) for '%s' filter\n",
WLUT_WIDTH, CommandOptionToMnemonic(MagickFilterOptions,
resample_filter->filter) );
printf("#\n");
printf("# Note: values in table are using a squared radius lookup.\n");
printf("# As such its distribution is not uniform.\n");
printf("#\n");
printf("# The X value is the support distance for the Y weight\n");
printf("# so you can use gnuplot to plot this cylindrical filter\n");
printf("# plot [0:2][-.2:1] \"lut.dat\" with lines\n");
printf("#\n");
/* Scale radius so the filter LUT covers the full support range */
r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH);
for(Q=0; Q<WLUT_WIDTH; Q++)
printf("%8.*g %.*g\n",
GetMagickPrecision(),sqrt((double)Q)*r_scale,
GetMagickPrecision(),resample_filter->filter_lut[Q] );
printf("\n\n"); /* generate a 'break' in gnuplot if multiple outputs */
}
/* Output the above once only for each image, and each setting
(void) DeleteImageArtifact(resample_filter->image,"resample:verbose");
*/
}
#endif /* FILTER_LUT */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t R e s a m p l e F i l t e r I n t e r p o l a t e M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetResampleFilterInterpolateMethod() sets the resample filter interpolation
% method.
%
% The format of the SetResampleFilterInterpolateMethod method is:
%
% MagickBooleanType SetResampleFilterInterpolateMethod(
% ResampleFilter *resample_filter,const InterpolateMethod method)
%
% A description of each parameter follows:
%
% o resample_filter: the resample filter.
%
% o method: the interpolation method.
%
*/
MagickExport MagickBooleanType SetResampleFilterInterpolateMethod(
ResampleFilter *resample_filter,const InterpolatePixelMethod method)
{
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
assert(resample_filter->image != (Image *) NULL);
if (resample_filter->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
resample_filter->image->filename);
resample_filter->interpolate=method;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t R e s a m p l e F i l t e r V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetResampleFilterVirtualPixelMethod() changes the virtual pixel method
% associated with the specified resample filter.
%
% The format of the SetResampleFilterVirtualPixelMethod method is:
%
% MagickBooleanType SetResampleFilterVirtualPixelMethod(
% ResampleFilter *resample_filter,const VirtualPixelMethod method)
%
% A description of each parameter follows:
%
% o resample_filter: the resample filter.
%
% o method: the virtual pixel method.
%
*/
MagickExport MagickBooleanType SetResampleFilterVirtualPixelMethod(
ResampleFilter *resample_filter,const VirtualPixelMethod method)
{
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
assert(resample_filter->image != (Image *) NULL);
if (resample_filter->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
resample_filter->image->filename);
resample_filter->virtual_pixel=method;
if (method != UndefinedVirtualPixelMethod)
(void) SetCacheViewVirtualPixelMethod(resample_filter->view,method);
return(MagickTrue);
}
|
convolutiondepthwise_5x5_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw5x5s1_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g=0; g<group; g++)
{
Mat out = top_blob.channel(g);
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + g * 4) : vdupq_n_f32(0.f);
const float* k0 = kernel.row(g);
float* outptr0 = out.row(0);
float* outptr1 = out.row(1);
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* r3 = img0.row(3);
const float* r4 = img0.row(4);
const float* r5 = img0.row(5);
int i = 0;
#if __aarch64__
for (; i+1 < outh; i+=2)
{
int j = 0;
for (; j+3 < outw; j+=4)
{
float32x4_t _sum00 = _bias0;
float32x4_t _sum01 = _bias0;
float32x4_t _sum02 = _bias0;
float32x4_t _sum03 = _bias0;
float32x4_t _sum10 = _bias0;
float32x4_t _sum11 = _bias0;
float32x4_t _sum12 = _bias0;
float32x4_t _sum13 = _bias0;
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0+4);
float32x4_t _r02 = vld1q_f32(r0+8);
float32x4_t _r03 = vld1q_f32(r0+12);
float32x4_t _r04 = vld1q_f32(r0+16);
float32x4_t _r05 = vld1q_f32(r0+20);
float32x4_t _r06 = vld1q_f32(r0+24);
float32x4_t _r07 = vld1q_f32(r0+28);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0+4);
float32x4_t _k02 = vld1q_f32(k0+8);
float32x4_t _k03 = vld1q_f32(k0+12);
float32x4_t _k04 = vld1q_f32(k0+16);
k0 += 20;
_sum00 = vmlaq_f32(_sum00, _k00, _r00);
_sum00 = vmlaq_f32(_sum00, _k01, _r01);
_sum00 = vmlaq_f32(_sum00, _k02, _r02);
_sum00 = vmlaq_f32(_sum00, _k03, _r03);
_sum00 = vmlaq_f32(_sum00, _k04, _r04);
_sum01 = vmlaq_f32(_sum01, _k00, _r01);
_sum01 = vmlaq_f32(_sum01, _k01, _r02);
_sum01 = vmlaq_f32(_sum01, _k02, _r03);
_sum01 = vmlaq_f32(_sum01, _k03, _r04);
_sum01 = vmlaq_f32(_sum01, _k04, _r05);
_sum02 = vmlaq_f32(_sum02, _k00, _r02);
_sum02 = vmlaq_f32(_sum02, _k01, _r03);
_sum02 = vmlaq_f32(_sum02, _k02, _r04);
_sum02 = vmlaq_f32(_sum02, _k03, _r05);
_sum02 = vmlaq_f32(_sum02, _k04, _r06);
_sum03 = vmlaq_f32(_sum03, _k00, _r03);
_sum03 = vmlaq_f32(_sum03, _k01, _r04);
_sum03 = vmlaq_f32(_sum03, _k02, _r05);
_sum03 = vmlaq_f32(_sum03, _k03, _r06);
_sum03 = vmlaq_f32(_sum03, _k04, _r07);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1+4);
float32x4_t _r12 = vld1q_f32(r1+8);
float32x4_t _r13 = vld1q_f32(r1+12);
float32x4_t _r14 = vld1q_f32(r1+16);
float32x4_t _r15 = vld1q_f32(r1+20);
float32x4_t _r16 = vld1q_f32(r1+24);
float32x4_t _r17 = vld1q_f32(r1+28);
float32x4_t _k10 = vld1q_f32(k0);
float32x4_t _k11 = vld1q_f32(k0+4);
float32x4_t _k12 = vld1q_f32(k0+8);
float32x4_t _k13 = vld1q_f32(k0+12);
float32x4_t _k14 = vld1q_f32(k0+16);
k0 += 20;
_sum10 = vmlaq_f32(_sum10, _k00, _r10);
_sum10 = vmlaq_f32(_sum10, _k01, _r11);
_sum10 = vmlaq_f32(_sum10, _k02, _r12);
_sum10 = vmlaq_f32(_sum10, _k03, _r13);
_sum10 = vmlaq_f32(_sum10, _k04, _r14);
_sum11 = vmlaq_f32(_sum11, _k00, _r11);
_sum11 = vmlaq_f32(_sum11, _k01, _r12);
_sum11 = vmlaq_f32(_sum11, _k02, _r13);
_sum11 = vmlaq_f32(_sum11, _k03, _r14);
_sum11 = vmlaq_f32(_sum11, _k04, _r15);
_sum12 = vmlaq_f32(_sum12, _k00, _r12);
_sum12 = vmlaq_f32(_sum12, _k01, _r13);
_sum12 = vmlaq_f32(_sum12, _k02, _r14);
_sum12 = vmlaq_f32(_sum12, _k03, _r15);
_sum12 = vmlaq_f32(_sum12, _k04, _r16);
_sum13 = vmlaq_f32(_sum13, _k00, _r13);
_sum13 = vmlaq_f32(_sum13, _k01, _r14);
_sum13 = vmlaq_f32(_sum13, _k02, _r15);
_sum13 = vmlaq_f32(_sum13, _k03, _r16);
_sum13 = vmlaq_f32(_sum13, _k04, _r17);
_sum00 = vmlaq_f32(_sum00, _k10, _r10);
_sum00 = vmlaq_f32(_sum00, _k11, _r11);
_sum00 = vmlaq_f32(_sum00, _k12, _r12);
_sum00 = vmlaq_f32(_sum00, _k13, _r13);
_sum00 = vmlaq_f32(_sum00, _k14, _r14);
_sum01 = vmlaq_f32(_sum01, _k10, _r11);
_sum01 = vmlaq_f32(_sum01, _k11, _r12);
_sum01 = vmlaq_f32(_sum01, _k12, _r13);
_sum01 = vmlaq_f32(_sum01, _k13, _r14);
_sum01 = vmlaq_f32(_sum01, _k14, _r15);
_sum02 = vmlaq_f32(_sum02, _k10, _r12);
_sum02 = vmlaq_f32(_sum02, _k11, _r13);
_sum02 = vmlaq_f32(_sum02, _k12, _r14);
_sum02 = vmlaq_f32(_sum02, _k13, _r15);
_sum02 = vmlaq_f32(_sum02, _k14, _r16);
_sum03 = vmlaq_f32(_sum03, _k10, _r13);
_sum03 = vmlaq_f32(_sum03, _k11, _r14);
_sum03 = vmlaq_f32(_sum03, _k12, _r15);
_sum03 = vmlaq_f32(_sum03, _k13, _r16);
_sum03 = vmlaq_f32(_sum03, _k14, _r17);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2+4);
float32x4_t _r22 = vld1q_f32(r2+8);
float32x4_t _r23 = vld1q_f32(r2+12);
float32x4_t _r24 = vld1q_f32(r2+16);
float32x4_t _r25 = vld1q_f32(r2+20);
float32x4_t _r26 = vld1q_f32(r2+24);
float32x4_t _r27 = vld1q_f32(r2+28);
float32x4_t _k20 = vld1q_f32(k0);
float32x4_t _k21 = vld1q_f32(k0+4);
float32x4_t _k22 = vld1q_f32(k0+8);
float32x4_t _k23 = vld1q_f32(k0+12);
float32x4_t _k24 = vld1q_f32(k0+16);
k0 += 20;
_sum10 = vmlaq_f32(_sum10, _k10, _r20);
_sum10 = vmlaq_f32(_sum10, _k11, _r21);
_sum10 = vmlaq_f32(_sum10, _k12, _r22);
_sum10 = vmlaq_f32(_sum10, _k13, _r23);
_sum10 = vmlaq_f32(_sum10, _k14, _r24);
_sum11 = vmlaq_f32(_sum11, _k10, _r21);
_sum11 = vmlaq_f32(_sum11, _k11, _r22);
_sum11 = vmlaq_f32(_sum11, _k12, _r23);
_sum11 = vmlaq_f32(_sum11, _k13, _r24);
_sum11 = vmlaq_f32(_sum11, _k14, _r25);
_sum12 = vmlaq_f32(_sum12, _k10, _r22);
_sum12 = vmlaq_f32(_sum12, _k11, _r23);
_sum12 = vmlaq_f32(_sum12, _k12, _r24);
_sum12 = vmlaq_f32(_sum12, _k13, _r25);
_sum12 = vmlaq_f32(_sum12, _k14, _r26);
_sum13 = vmlaq_f32(_sum13, _k10, _r23);
_sum13 = vmlaq_f32(_sum13, _k11, _r24);
_sum13 = vmlaq_f32(_sum13, _k12, _r25);
_sum13 = vmlaq_f32(_sum13, _k13, _r26);
_sum13 = vmlaq_f32(_sum13, _k14, _r27);
_sum00 = vmlaq_f32(_sum00, _k20, _r20);
_sum00 = vmlaq_f32(_sum00, _k21, _r21);
_sum00 = vmlaq_f32(_sum00, _k22, _r22);
_sum00 = vmlaq_f32(_sum00, _k23, _r23);
_sum00 = vmlaq_f32(_sum00, _k24, _r24);
_sum01 = vmlaq_f32(_sum01, _k20, _r21);
_sum01 = vmlaq_f32(_sum01, _k21, _r22);
_sum01 = vmlaq_f32(_sum01, _k22, _r23);
_sum01 = vmlaq_f32(_sum01, _k23, _r24);
_sum01 = vmlaq_f32(_sum01, _k24, _r25);
_sum02 = vmlaq_f32(_sum02, _k20, _r22);
_sum02 = vmlaq_f32(_sum02, _k21, _r23);
_sum02 = vmlaq_f32(_sum02, _k22, _r24);
_sum02 = vmlaq_f32(_sum02, _k23, _r25);
_sum02 = vmlaq_f32(_sum02, _k24, _r26);
_sum03 = vmlaq_f32(_sum03, _k20, _r23);
_sum03 = vmlaq_f32(_sum03, _k21, _r24);
_sum03 = vmlaq_f32(_sum03, _k22, _r25);
_sum03 = vmlaq_f32(_sum03, _k23, _r26);
_sum03 = vmlaq_f32(_sum03, _k24, _r27);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r31 = vld1q_f32(r3+4);
float32x4_t _r32 = vld1q_f32(r3+8);
float32x4_t _r33 = vld1q_f32(r3+12);
float32x4_t _r34 = vld1q_f32(r3+16);
float32x4_t _r35 = vld1q_f32(r3+20);
float32x4_t _r36 = vld1q_f32(r3+24);
float32x4_t _r37 = vld1q_f32(r3+28);
float32x4_t _k30 = vld1q_f32(k0);
float32x4_t _k31 = vld1q_f32(k0+4);
float32x4_t _k32 = vld1q_f32(k0+8);
float32x4_t _k33 = vld1q_f32(k0+12);
float32x4_t _k34 = vld1q_f32(k0+16);
k0 += 20;
_sum10 = vmlaq_f32(_sum10, _k20, _r30);
_sum10 = vmlaq_f32(_sum10, _k21, _r31);
_sum10 = vmlaq_f32(_sum10, _k22, _r32);
_sum10 = vmlaq_f32(_sum10, _k23, _r33);
_sum10 = vmlaq_f32(_sum10, _k24, _r34);
_sum11 = vmlaq_f32(_sum11, _k20, _r31);
_sum11 = vmlaq_f32(_sum11, _k21, _r32);
_sum11 = vmlaq_f32(_sum11, _k22, _r33);
_sum11 = vmlaq_f32(_sum11, _k23, _r34);
_sum11 = vmlaq_f32(_sum11, _k24, _r35);
_sum12 = vmlaq_f32(_sum12, _k20, _r32);
_sum12 = vmlaq_f32(_sum12, _k21, _r33);
_sum12 = vmlaq_f32(_sum12, _k22, _r34);
_sum12 = vmlaq_f32(_sum12, _k23, _r35);
_sum12 = vmlaq_f32(_sum12, _k24, _r36);
_sum13 = vmlaq_f32(_sum13, _k20, _r33);
_sum13 = vmlaq_f32(_sum13, _k21, _r34);
_sum13 = vmlaq_f32(_sum13, _k22, _r35);
_sum13 = vmlaq_f32(_sum13, _k23, _r36);
_sum13 = vmlaq_f32(_sum13, _k24, _r37);
_sum00 = vmlaq_f32(_sum00, _k30, _r30);
_sum00 = vmlaq_f32(_sum00, _k31, _r31);
_sum00 = vmlaq_f32(_sum00, _k32, _r32);
_sum00 = vmlaq_f32(_sum00, _k33, _r33);
_sum00 = vmlaq_f32(_sum00, _k34, _r34);
_sum01 = vmlaq_f32(_sum01, _k30, _r31);
_sum01 = vmlaq_f32(_sum01, _k31, _r32);
_sum01 = vmlaq_f32(_sum01, _k32, _r33);
_sum01 = vmlaq_f32(_sum01, _k33, _r34);
_sum01 = vmlaq_f32(_sum01, _k34, _r35);
_sum02 = vmlaq_f32(_sum02, _k30, _r32);
_sum02 = vmlaq_f32(_sum02, _k31, _r33);
_sum02 = vmlaq_f32(_sum02, _k32, _r34);
_sum02 = vmlaq_f32(_sum02, _k33, _r35);
_sum02 = vmlaq_f32(_sum02, _k34, _r36);
_sum03 = vmlaq_f32(_sum03, _k30, _r33);
_sum03 = vmlaq_f32(_sum03, _k31, _r34);
_sum03 = vmlaq_f32(_sum03, _k32, _r35);
_sum03 = vmlaq_f32(_sum03, _k33, _r36);
_sum03 = vmlaq_f32(_sum03, _k34, _r37);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r41 = vld1q_f32(r4+4);
float32x4_t _r42 = vld1q_f32(r4+8);
float32x4_t _r43 = vld1q_f32(r4+12);
float32x4_t _r44 = vld1q_f32(r4+16);
float32x4_t _r45 = vld1q_f32(r4+20);
float32x4_t _r46 = vld1q_f32(r4+24);
float32x4_t _r47 = vld1q_f32(r4+28);
float32x4_t _k40 = vld1q_f32(k0);
float32x4_t _k41 = vld1q_f32(k0+4);
float32x4_t _k42 = vld1q_f32(k0+8);
float32x4_t _k43 = vld1q_f32(k0+12);
float32x4_t _k44 = vld1q_f32(k0+16);
k0 -= 80;
_sum10 = vmlaq_f32(_sum10, _k30, _r40);
_sum10 = vmlaq_f32(_sum10, _k31, _r41);
_sum10 = vmlaq_f32(_sum10, _k32, _r42);
_sum10 = vmlaq_f32(_sum10, _k33, _r43);
_sum10 = vmlaq_f32(_sum10, _k34, _r44);
_sum11 = vmlaq_f32(_sum11, _k30, _r41);
_sum11 = vmlaq_f32(_sum11, _k31, _r42);
_sum11 = vmlaq_f32(_sum11, _k32, _r43);
_sum11 = vmlaq_f32(_sum11, _k33, _r44);
_sum11 = vmlaq_f32(_sum11, _k34, _r45);
_sum12 = vmlaq_f32(_sum12, _k30, _r42);
_sum12 = vmlaq_f32(_sum12, _k31, _r43);
_sum12 = vmlaq_f32(_sum12, _k32, _r44);
_sum12 = vmlaq_f32(_sum12, _k33, _r45);
_sum12 = vmlaq_f32(_sum12, _k34, _r46);
_sum13 = vmlaq_f32(_sum13, _k30, _r43);
_sum13 = vmlaq_f32(_sum13, _k31, _r44);
_sum13 = vmlaq_f32(_sum13, _k32, _r45);
_sum13 = vmlaq_f32(_sum13, _k33, _r46);
_sum13 = vmlaq_f32(_sum13, _k34, _r47);
_sum00 = vmlaq_f32(_sum00, _k40, _r40);
_sum00 = vmlaq_f32(_sum00, _k41, _r41);
_sum00 = vmlaq_f32(_sum00, _k42, _r42);
_sum00 = vmlaq_f32(_sum00, _k43, _r43);
_sum00 = vmlaq_f32(_sum00, _k44, _r44);
_sum01 = vmlaq_f32(_sum01, _k40, _r41);
_sum01 = vmlaq_f32(_sum01, _k41, _r42);
_sum01 = vmlaq_f32(_sum01, _k42, _r43);
_sum01 = vmlaq_f32(_sum01, _k43, _r44);
_sum01 = vmlaq_f32(_sum01, _k44, _r45);
_sum02 = vmlaq_f32(_sum02, _k40, _r42);
_sum02 = vmlaq_f32(_sum02, _k41, _r43);
_sum02 = vmlaq_f32(_sum02, _k42, _r44);
_sum02 = vmlaq_f32(_sum02, _k43, _r45);
_sum02 = vmlaq_f32(_sum02, _k44, _r46);
_sum03 = vmlaq_f32(_sum03, _k40, _r43);
_sum03 = vmlaq_f32(_sum03, _k41, _r44);
_sum03 = vmlaq_f32(_sum03, _k42, _r45);
_sum03 = vmlaq_f32(_sum03, _k43, _r46);
_sum03 = vmlaq_f32(_sum03, _k44, _r47);
float32x4_t _r50 = vld1q_f32(r5);
float32x4_t _r51 = vld1q_f32(r5+4);
float32x4_t _r52 = vld1q_f32(r5+8);
float32x4_t _r53 = vld1q_f32(r5+12);
float32x4_t _r54 = vld1q_f32(r5+16);
float32x4_t _r55 = vld1q_f32(r5+20);
float32x4_t _r56 = vld1q_f32(r5+24);
float32x4_t _r57 = vld1q_f32(r5+28);
_sum10 = vmlaq_f32(_sum10, _k40, _r50);
_sum10 = vmlaq_f32(_sum10, _k41, _r51);
_sum10 = vmlaq_f32(_sum10, _k42, _r52);
_sum10 = vmlaq_f32(_sum10, _k43, _r53);
_sum10 = vmlaq_f32(_sum10, _k44, _r54);
_sum11 = vmlaq_f32(_sum11, _k40, _r51);
_sum11 = vmlaq_f32(_sum11, _k41, _r52);
_sum11 = vmlaq_f32(_sum11, _k42, _r53);
_sum11 = vmlaq_f32(_sum11, _k43, _r54);
_sum11 = vmlaq_f32(_sum11, _k44, _r55);
_sum12 = vmlaq_f32(_sum12, _k40, _r52);
_sum12 = vmlaq_f32(_sum12, _k41, _r53);
_sum12 = vmlaq_f32(_sum12, _k42, _r54);
_sum12 = vmlaq_f32(_sum12, _k43, _r55);
_sum12 = vmlaq_f32(_sum12, _k44, _r56);
_sum13 = vmlaq_f32(_sum13, _k40, _r53);
_sum13 = vmlaq_f32(_sum13, _k41, _r54);
_sum13 = vmlaq_f32(_sum13, _k42, _r55);
_sum13 = vmlaq_f32(_sum13, _k43, _r56);
_sum13 = vmlaq_f32(_sum13, _k44, _r57);
vst1q_f32(outptr0, _sum00);
vst1q_f32(outptr0+4, _sum01);
vst1q_f32(outptr0+8, _sum02);
vst1q_f32(outptr0+12, _sum03);
vst1q_f32(outptr1, _sum10);
vst1q_f32(outptr1+4, _sum11);
vst1q_f32(outptr1+8, _sum12);
vst1q_f32(outptr1+12, _sum13);
r0 += 16;
r1 += 16;
r2 += 16;
r3 += 16;
r4 += 16;
r5 += 16;
outptr0 += 16;
outptr1 += 16;
}
for (; j+1 < outw; j+=2)
{
float32x4_t _sum00 = _bias0;
float32x4_t _sum01 = _bias0;
float32x4_t _sum10 = _bias0;
float32x4_t _sum11 = _bias0;
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0+4);
float32x4_t _r02 = vld1q_f32(r0+8);
float32x4_t _r03 = vld1q_f32(r0+12);
float32x4_t _r04 = vld1q_f32(r0+16);
float32x4_t _r05 = vld1q_f32(r0+20);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0+4);
float32x4_t _k02 = vld1q_f32(k0+8);
float32x4_t _k03 = vld1q_f32(k0+12);
float32x4_t _k04 = vld1q_f32(k0+16);
k0 += 20;
_sum00 = vmlaq_f32(_sum00, _k00, _r00);
_sum00 = vmlaq_f32(_sum00, _k01, _r01);
_sum00 = vmlaq_f32(_sum00, _k02, _r02);
_sum00 = vmlaq_f32(_sum00, _k03, _r03);
_sum00 = vmlaq_f32(_sum00, _k04, _r04);
_sum01 = vmlaq_f32(_sum01, _k00, _r01);
_sum01 = vmlaq_f32(_sum01, _k01, _r02);
_sum01 = vmlaq_f32(_sum01, _k02, _r03);
_sum01 = vmlaq_f32(_sum01, _k03, _r04);
_sum01 = vmlaq_f32(_sum01, _k04, _r05);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1+4);
float32x4_t _r12 = vld1q_f32(r1+8);
float32x4_t _r13 = vld1q_f32(r1+12);
float32x4_t _r14 = vld1q_f32(r1+16);
float32x4_t _r15 = vld1q_f32(r1+20);
float32x4_t _k10 = vld1q_f32(k0);
float32x4_t _k11 = vld1q_f32(k0+4);
float32x4_t _k12 = vld1q_f32(k0+8);
float32x4_t _k13 = vld1q_f32(k0+12);
float32x4_t _k14 = vld1q_f32(k0+16);
k0 += 20;
_sum10 = vmlaq_f32(_sum10, _k00, _r10);
_sum10 = vmlaq_f32(_sum10, _k01, _r11);
_sum10 = vmlaq_f32(_sum10, _k02, _r12);
_sum10 = vmlaq_f32(_sum10, _k03, _r13);
_sum10 = vmlaq_f32(_sum10, _k04, _r14);
_sum11 = vmlaq_f32(_sum11, _k00, _r11);
_sum11 = vmlaq_f32(_sum11, _k01, _r12);
_sum11 = vmlaq_f32(_sum11, _k02, _r13);
_sum11 = vmlaq_f32(_sum11, _k03, _r14);
_sum11 = vmlaq_f32(_sum11, _k04, _r15);
_sum00 = vmlaq_f32(_sum00, _k10, _r10);
_sum00 = vmlaq_f32(_sum00, _k11, _r11);
_sum00 = vmlaq_f32(_sum00, _k12, _r12);
_sum00 = vmlaq_f32(_sum00, _k13, _r13);
_sum00 = vmlaq_f32(_sum00, _k14, _r14);
_sum01 = vmlaq_f32(_sum01, _k10, _r11);
_sum01 = vmlaq_f32(_sum01, _k11, _r12);
_sum01 = vmlaq_f32(_sum01, _k12, _r13);
_sum01 = vmlaq_f32(_sum01, _k13, _r14);
_sum01 = vmlaq_f32(_sum01, _k14, _r15);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2+4);
float32x4_t _r22 = vld1q_f32(r2+8);
float32x4_t _r23 = vld1q_f32(r2+12);
float32x4_t _r24 = vld1q_f32(r2+16);
float32x4_t _r25 = vld1q_f32(r2+20);
float32x4_t _k20 = vld1q_f32(k0);
float32x4_t _k21 = vld1q_f32(k0+4);
float32x4_t _k22 = vld1q_f32(k0+8);
float32x4_t _k23 = vld1q_f32(k0+12);
float32x4_t _k24 = vld1q_f32(k0+16);
k0 += 20;
_sum10 = vmlaq_f32(_sum10, _k10, _r20);
_sum10 = vmlaq_f32(_sum10, _k11, _r21);
_sum10 = vmlaq_f32(_sum10, _k12, _r22);
_sum10 = vmlaq_f32(_sum10, _k13, _r23);
_sum10 = vmlaq_f32(_sum10, _k14, _r24);
_sum11 = vmlaq_f32(_sum11, _k10, _r21);
_sum11 = vmlaq_f32(_sum11, _k11, _r22);
_sum11 = vmlaq_f32(_sum11, _k12, _r23);
_sum11 = vmlaq_f32(_sum11, _k13, _r24);
_sum11 = vmlaq_f32(_sum11, _k14, _r25);
_sum00 = vmlaq_f32(_sum00, _k20, _r20);
_sum00 = vmlaq_f32(_sum00, _k21, _r21);
_sum00 = vmlaq_f32(_sum00, _k22, _r22);
_sum00 = vmlaq_f32(_sum00, _k23, _r23);
_sum00 = vmlaq_f32(_sum00, _k24, _r24);
_sum01 = vmlaq_f32(_sum01, _k20, _r21);
_sum01 = vmlaq_f32(_sum01, _k21, _r22);
_sum01 = vmlaq_f32(_sum01, _k22, _r23);
_sum01 = vmlaq_f32(_sum01, _k23, _r24);
_sum01 = vmlaq_f32(_sum01, _k24, _r25);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r31 = vld1q_f32(r3+4);
float32x4_t _r32 = vld1q_f32(r3+8);
float32x4_t _r33 = vld1q_f32(r3+12);
float32x4_t _r34 = vld1q_f32(r3+16);
float32x4_t _r35 = vld1q_f32(r3+20);
float32x4_t _k30 = vld1q_f32(k0);
float32x4_t _k31 = vld1q_f32(k0+4);
float32x4_t _k32 = vld1q_f32(k0+8);
float32x4_t _k33 = vld1q_f32(k0+12);
float32x4_t _k34 = vld1q_f32(k0+16);
k0 += 20;
_sum10 = vmlaq_f32(_sum10, _k20, _r30);
_sum10 = vmlaq_f32(_sum10, _k21, _r31);
_sum10 = vmlaq_f32(_sum10, _k22, _r32);
_sum10 = vmlaq_f32(_sum10, _k23, _r33);
_sum10 = vmlaq_f32(_sum10, _k24, _r34);
_sum11 = vmlaq_f32(_sum11, _k20, _r31);
_sum11 = vmlaq_f32(_sum11, _k21, _r32);
_sum11 = vmlaq_f32(_sum11, _k22, _r33);
_sum11 = vmlaq_f32(_sum11, _k23, _r34);
_sum11 = vmlaq_f32(_sum11, _k24, _r35);
_sum00 = vmlaq_f32(_sum00, _k30, _r30);
_sum00 = vmlaq_f32(_sum00, _k31, _r31);
_sum00 = vmlaq_f32(_sum00, _k32, _r32);
_sum00 = vmlaq_f32(_sum00, _k33, _r33);
_sum00 = vmlaq_f32(_sum00, _k34, _r34);
_sum01 = vmlaq_f32(_sum01, _k30, _r31);
_sum01 = vmlaq_f32(_sum01, _k31, _r32);
_sum01 = vmlaq_f32(_sum01, _k32, _r33);
_sum01 = vmlaq_f32(_sum01, _k33, _r34);
_sum01 = vmlaq_f32(_sum01, _k34, _r35);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r41 = vld1q_f32(r4+4);
float32x4_t _r42 = vld1q_f32(r4+8);
float32x4_t _r43 = vld1q_f32(r4+12);
float32x4_t _r44 = vld1q_f32(r4+16);
float32x4_t _r45 = vld1q_f32(r4+20);
float32x4_t _k40 = vld1q_f32(k0);
float32x4_t _k41 = vld1q_f32(k0+4);
float32x4_t _k42 = vld1q_f32(k0+8);
float32x4_t _k43 = vld1q_f32(k0+12);
float32x4_t _k44 = vld1q_f32(k0+16);
k0 -= 80;
_sum10 = vmlaq_f32(_sum10, _k30, _r40);
_sum10 = vmlaq_f32(_sum10, _k31, _r41);
_sum10 = vmlaq_f32(_sum10, _k32, _r42);
_sum10 = vmlaq_f32(_sum10, _k33, _r43);
_sum10 = vmlaq_f32(_sum10, _k34, _r44);
_sum11 = vmlaq_f32(_sum11, _k30, _r41);
_sum11 = vmlaq_f32(_sum11, _k31, _r42);
_sum11 = vmlaq_f32(_sum11, _k32, _r43);
_sum11 = vmlaq_f32(_sum11, _k33, _r44);
_sum11 = vmlaq_f32(_sum11, _k34, _r45);
_sum00 = vmlaq_f32(_sum00, _k40, _r40);
_sum00 = vmlaq_f32(_sum00, _k41, _r41);
_sum00 = vmlaq_f32(_sum00, _k42, _r42);
_sum00 = vmlaq_f32(_sum00, _k43, _r43);
_sum00 = vmlaq_f32(_sum00, _k44, _r44);
_sum01 = vmlaq_f32(_sum01, _k40, _r41);
_sum01 = vmlaq_f32(_sum01, _k41, _r42);
_sum01 = vmlaq_f32(_sum01, _k42, _r43);
_sum01 = vmlaq_f32(_sum01, _k43, _r44);
_sum01 = vmlaq_f32(_sum01, _k44, _r45);
float32x4_t _r50 = vld1q_f32(r5);
float32x4_t _r51 = vld1q_f32(r5+4);
float32x4_t _r52 = vld1q_f32(r5+8);
float32x4_t _r53 = vld1q_f32(r5+12);
float32x4_t _r54 = vld1q_f32(r5+16);
float32x4_t _r55 = vld1q_f32(r5+20);
_sum10 = vmlaq_f32(_sum10, _k40, _r50);
_sum10 = vmlaq_f32(_sum10, _k41, _r51);
_sum10 = vmlaq_f32(_sum10, _k42, _r52);
_sum10 = vmlaq_f32(_sum10, _k43, _r53);
_sum10 = vmlaq_f32(_sum10, _k44, _r54);
_sum11 = vmlaq_f32(_sum11, _k40, _r51);
_sum11 = vmlaq_f32(_sum11, _k41, _r52);
_sum11 = vmlaq_f32(_sum11, _k42, _r53);
_sum11 = vmlaq_f32(_sum11, _k43, _r54);
_sum11 = vmlaq_f32(_sum11, _k44, _r55);
vst1q_f32(outptr0, _sum00);
vst1q_f32(outptr0+4, _sum01);
vst1q_f32(outptr1, _sum10);
vst1q_f32(outptr1+4, _sum11);
r0 += 8;
r1 += 8;
r2 += 8;
r3 += 8;
r4 += 8;
r5 += 8;
outptr0 += 8;
outptr1 += 8;
}
for (; j < outw; j++)
{
float32x4_t _sum0 = _bias0;
float32x4_t _sum1 = _bias0;
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0+4);
float32x4_t _r02 = vld1q_f32(r0+8);
float32x4_t _r03 = vld1q_f32(r0+12);
float32x4_t _r04 = vld1q_f32(r0+16);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0+4);
float32x4_t _k02 = vld1q_f32(k0+8);
float32x4_t _k03 = vld1q_f32(k0+12);
float32x4_t _k04 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k00, _r00);
_sum0 = vmlaq_f32(_sum0, _k01, _r01);
_sum0 = vmlaq_f32(_sum0, _k02, _r02);
_sum0 = vmlaq_f32(_sum0, _k03, _r03);
_sum0 = vmlaq_f32(_sum0, _k04, _r04);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1+4);
float32x4_t _r12 = vld1q_f32(r1+8);
float32x4_t _r13 = vld1q_f32(r1+12);
float32x4_t _r14 = vld1q_f32(r1+16);
float32x4_t _k10 = vld1q_f32(k0);
float32x4_t _k11 = vld1q_f32(k0+4);
float32x4_t _k12 = vld1q_f32(k0+8);
float32x4_t _k13 = vld1q_f32(k0+12);
float32x4_t _k14 = vld1q_f32(k0+16);
k0 += 20;
_sum1 = vmlaq_f32(_sum1, _k00, _r10);
_sum1 = vmlaq_f32(_sum1, _k01, _r11);
_sum1 = vmlaq_f32(_sum1, _k02, _r12);
_sum1 = vmlaq_f32(_sum1, _k03, _r13);
_sum1 = vmlaq_f32(_sum1, _k04, _r14);
_sum0 = vmlaq_f32(_sum0, _k10, _r10);
_sum0 = vmlaq_f32(_sum0, _k11, _r11);
_sum0 = vmlaq_f32(_sum0, _k12, _r12);
_sum0 = vmlaq_f32(_sum0, _k13, _r13);
_sum0 = vmlaq_f32(_sum0, _k14, _r14);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2+4);
float32x4_t _r22 = vld1q_f32(r2+8);
float32x4_t _r23 = vld1q_f32(r2+12);
float32x4_t _r24 = vld1q_f32(r2+16);
float32x4_t _k20 = vld1q_f32(k0);
float32x4_t _k21 = vld1q_f32(k0+4);
float32x4_t _k22 = vld1q_f32(k0+8);
float32x4_t _k23 = vld1q_f32(k0+12);
float32x4_t _k24 = vld1q_f32(k0+16);
k0 += 20;
_sum1 = vmlaq_f32(_sum1, _k10, _r20);
_sum1 = vmlaq_f32(_sum1, _k11, _r21);
_sum1 = vmlaq_f32(_sum1, _k12, _r22);
_sum1 = vmlaq_f32(_sum1, _k13, _r23);
_sum1 = vmlaq_f32(_sum1, _k14, _r24);
_sum0 = vmlaq_f32(_sum0, _k20, _r20);
_sum0 = vmlaq_f32(_sum0, _k21, _r21);
_sum0 = vmlaq_f32(_sum0, _k22, _r22);
_sum0 = vmlaq_f32(_sum0, _k23, _r23);
_sum0 = vmlaq_f32(_sum0, _k24, _r24);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r31 = vld1q_f32(r3+4);
float32x4_t _r32 = vld1q_f32(r3+8);
float32x4_t _r33 = vld1q_f32(r3+12);
float32x4_t _r34 = vld1q_f32(r3+16);
float32x4_t _k30 = vld1q_f32(k0);
float32x4_t _k31 = vld1q_f32(k0+4);
float32x4_t _k32 = vld1q_f32(k0+8);
float32x4_t _k33 = vld1q_f32(k0+12);
float32x4_t _k34 = vld1q_f32(k0+16);
k0 += 20;
_sum1 = vmlaq_f32(_sum1, _k20, _r30);
_sum1 = vmlaq_f32(_sum1, _k21, _r31);
_sum1 = vmlaq_f32(_sum1, _k22, _r32);
_sum1 = vmlaq_f32(_sum1, _k23, _r33);
_sum1 = vmlaq_f32(_sum1, _k24, _r34);
_sum0 = vmlaq_f32(_sum0, _k30, _r30);
_sum0 = vmlaq_f32(_sum0, _k31, _r31);
_sum0 = vmlaq_f32(_sum0, _k32, _r32);
_sum0 = vmlaq_f32(_sum0, _k33, _r33);
_sum0 = vmlaq_f32(_sum0, _k34, _r34);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r41 = vld1q_f32(r4+4);
float32x4_t _r42 = vld1q_f32(r4+8);
float32x4_t _r43 = vld1q_f32(r4+12);
float32x4_t _r44 = vld1q_f32(r4+16);
float32x4_t _k40 = vld1q_f32(k0);
float32x4_t _k41 = vld1q_f32(k0+4);
float32x4_t _k42 = vld1q_f32(k0+8);
float32x4_t _k43 = vld1q_f32(k0+12);
float32x4_t _k44 = vld1q_f32(k0+16);
k0 -= 80;
_sum1 = vmlaq_f32(_sum1, _k30, _r40);
_sum1 = vmlaq_f32(_sum1, _k31, _r41);
_sum1 = vmlaq_f32(_sum1, _k32, _r42);
_sum1 = vmlaq_f32(_sum1, _k33, _r43);
_sum1 = vmlaq_f32(_sum1, _k34, _r44);
_sum0 = vmlaq_f32(_sum0, _k40, _r40);
_sum0 = vmlaq_f32(_sum0, _k41, _r41);
_sum0 = vmlaq_f32(_sum0, _k42, _r42);
_sum0 = vmlaq_f32(_sum0, _k43, _r43);
_sum0 = vmlaq_f32(_sum0, _k44, _r44);
float32x4_t _r50 = vld1q_f32(r5);
float32x4_t _r51 = vld1q_f32(r5+4);
float32x4_t _r52 = vld1q_f32(r5+8);
float32x4_t _r53 = vld1q_f32(r5+12);
float32x4_t _r54 = vld1q_f32(r5+16);
_sum1 = vmlaq_f32(_sum1, _k40, _r50);
_sum1 = vmlaq_f32(_sum1, _k41, _r51);
_sum1 = vmlaq_f32(_sum1, _k42, _r52);
_sum1 = vmlaq_f32(_sum1, _k43, _r53);
_sum1 = vmlaq_f32(_sum1, _k44, _r54);
vst1q_f32(outptr0, _sum0);
vst1q_f32(outptr1, _sum1);
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
outptr0 += 4;
outptr1 += 4;
}
r0 += 4*4 + w*4;
r1 += 4*4 + w*4;
r2 += 4*4 + w*4;
r3 += 4*4 + w*4;
r4 += 4*4 + w*4;
r5 += 4*4 + w*4;
outptr0 += outw*4;
outptr1 += outw*4;
}
#endif // __aarch64__
for (; i < outh; i++)
{
int j = 0;
for (; j+3 < outw; j+=4)
{
float32x4_t _sum0 = _bias0;
float32x4_t _sum1 = _bias0;
float32x4_t _sum2 = _bias0;
float32x4_t _sum3 = _bias0;
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0+4);
float32x4_t _r02 = vld1q_f32(r0+8);
float32x4_t _r03 = vld1q_f32(r0+12);
float32x4_t _r04 = vld1q_f32(r0+16);
float32x4_t _r05 = vld1q_f32(r0+20);
float32x4_t _r06 = vld1q_f32(r0+24);
float32x4_t _r07 = vld1q_f32(r0+28);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0+4);
float32x4_t _k02 = vld1q_f32(k0+8);
float32x4_t _k03 = vld1q_f32(k0+12);
float32x4_t _k04 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k00, _r00);
_sum0 = vmlaq_f32(_sum0, _k01, _r01);
_sum0 = vmlaq_f32(_sum0, _k02, _r02);
_sum0 = vmlaq_f32(_sum0, _k03, _r03);
_sum0 = vmlaq_f32(_sum0, _k04, _r04);
_sum1 = vmlaq_f32(_sum1, _k00, _r01);
_sum1 = vmlaq_f32(_sum1, _k01, _r02);
_sum1 = vmlaq_f32(_sum1, _k02, _r03);
_sum1 = vmlaq_f32(_sum1, _k03, _r04);
_sum1 = vmlaq_f32(_sum1, _k04, _r05);
_sum2 = vmlaq_f32(_sum2, _k00, _r02);
_sum2 = vmlaq_f32(_sum2, _k01, _r03);
_sum2 = vmlaq_f32(_sum2, _k02, _r04);
_sum2 = vmlaq_f32(_sum2, _k03, _r05);
_sum2 = vmlaq_f32(_sum2, _k04, _r06);
_sum3 = vmlaq_f32(_sum3, _k00, _r03);
_sum3 = vmlaq_f32(_sum3, _k01, _r04);
_sum3 = vmlaq_f32(_sum3, _k02, _r05);
_sum3 = vmlaq_f32(_sum3, _k03, _r06);
_sum3 = vmlaq_f32(_sum3, _k04, _r07);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1+4);
float32x4_t _r12 = vld1q_f32(r1+8);
float32x4_t _r13 = vld1q_f32(r1+12);
float32x4_t _r14 = vld1q_f32(r1+16);
float32x4_t _r15 = vld1q_f32(r1+20);
float32x4_t _r16 = vld1q_f32(r1+24);
float32x4_t _r17 = vld1q_f32(r1+28);
float32x4_t _k10 = vld1q_f32(k0);
float32x4_t _k11 = vld1q_f32(k0+4);
float32x4_t _k12 = vld1q_f32(k0+8);
float32x4_t _k13 = vld1q_f32(k0+12);
float32x4_t _k14 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k10, _r10);
_sum0 = vmlaq_f32(_sum0, _k11, _r11);
_sum0 = vmlaq_f32(_sum0, _k12, _r12);
_sum0 = vmlaq_f32(_sum0, _k13, _r13);
_sum0 = vmlaq_f32(_sum0, _k14, _r14);
_sum1 = vmlaq_f32(_sum1, _k10, _r11);
_sum1 = vmlaq_f32(_sum1, _k11, _r12);
_sum1 = vmlaq_f32(_sum1, _k12, _r13);
_sum1 = vmlaq_f32(_sum1, _k13, _r14);
_sum1 = vmlaq_f32(_sum1, _k14, _r15);
_sum2 = vmlaq_f32(_sum2, _k10, _r12);
_sum2 = vmlaq_f32(_sum2, _k11, _r13);
_sum2 = vmlaq_f32(_sum2, _k12, _r14);
_sum2 = vmlaq_f32(_sum2, _k13, _r15);
_sum2 = vmlaq_f32(_sum2, _k14, _r16);
_sum3 = vmlaq_f32(_sum3, _k10, _r13);
_sum3 = vmlaq_f32(_sum3, _k11, _r14);
_sum3 = vmlaq_f32(_sum3, _k12, _r15);
_sum3 = vmlaq_f32(_sum3, _k13, _r16);
_sum3 = vmlaq_f32(_sum3, _k14, _r17);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2+4);
float32x4_t _r22 = vld1q_f32(r2+8);
float32x4_t _r23 = vld1q_f32(r2+12);
float32x4_t _r24 = vld1q_f32(r2+16);
float32x4_t _r25 = vld1q_f32(r2+20);
float32x4_t _r26 = vld1q_f32(r2+24);
float32x4_t _r27 = vld1q_f32(r2+28);
float32x4_t _k20 = vld1q_f32(k0);
float32x4_t _k21 = vld1q_f32(k0+4);
float32x4_t _k22 = vld1q_f32(k0+8);
float32x4_t _k23 = vld1q_f32(k0+12);
float32x4_t _k24 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k20, _r20);
_sum0 = vmlaq_f32(_sum0, _k21, _r21);
_sum0 = vmlaq_f32(_sum0, _k22, _r22);
_sum0 = vmlaq_f32(_sum0, _k23, _r23);
_sum0 = vmlaq_f32(_sum0, _k24, _r24);
_sum1 = vmlaq_f32(_sum1, _k20, _r21);
_sum1 = vmlaq_f32(_sum1, _k21, _r22);
_sum1 = vmlaq_f32(_sum1, _k22, _r23);
_sum1 = vmlaq_f32(_sum1, _k23, _r24);
_sum1 = vmlaq_f32(_sum1, _k24, _r25);
_sum2 = vmlaq_f32(_sum2, _k20, _r22);
_sum2 = vmlaq_f32(_sum2, _k21, _r23);
_sum2 = vmlaq_f32(_sum2, _k22, _r24);
_sum2 = vmlaq_f32(_sum2, _k23, _r25);
_sum2 = vmlaq_f32(_sum2, _k24, _r26);
_sum3 = vmlaq_f32(_sum3, _k20, _r23);
_sum3 = vmlaq_f32(_sum3, _k21, _r24);
_sum3 = vmlaq_f32(_sum3, _k22, _r25);
_sum3 = vmlaq_f32(_sum3, _k23, _r26);
_sum3 = vmlaq_f32(_sum3, _k24, _r27);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r31 = vld1q_f32(r3+4);
float32x4_t _r32 = vld1q_f32(r3+8);
float32x4_t _r33 = vld1q_f32(r3+12);
float32x4_t _r34 = vld1q_f32(r3+16);
float32x4_t _r35 = vld1q_f32(r3+20);
float32x4_t _r36 = vld1q_f32(r3+24);
float32x4_t _r37 = vld1q_f32(r3+28);
float32x4_t _k30 = vld1q_f32(k0);
float32x4_t _k31 = vld1q_f32(k0+4);
float32x4_t _k32 = vld1q_f32(k0+8);
float32x4_t _k33 = vld1q_f32(k0+12);
float32x4_t _k34 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k30, _r30);
_sum0 = vmlaq_f32(_sum0, _k31, _r31);
_sum0 = vmlaq_f32(_sum0, _k32, _r32);
_sum0 = vmlaq_f32(_sum0, _k33, _r33);
_sum0 = vmlaq_f32(_sum0, _k34, _r34);
_sum1 = vmlaq_f32(_sum1, _k30, _r31);
_sum1 = vmlaq_f32(_sum1, _k31, _r32);
_sum1 = vmlaq_f32(_sum1, _k32, _r33);
_sum1 = vmlaq_f32(_sum1, _k33, _r34);
_sum1 = vmlaq_f32(_sum1, _k34, _r35);
_sum2 = vmlaq_f32(_sum2, _k30, _r32);
_sum2 = vmlaq_f32(_sum2, _k31, _r33);
_sum2 = vmlaq_f32(_sum2, _k32, _r34);
_sum2 = vmlaq_f32(_sum2, _k33, _r35);
_sum2 = vmlaq_f32(_sum2, _k34, _r36);
_sum3 = vmlaq_f32(_sum3, _k30, _r33);
_sum3 = vmlaq_f32(_sum3, _k31, _r34);
_sum3 = vmlaq_f32(_sum3, _k32, _r35);
_sum3 = vmlaq_f32(_sum3, _k33, _r36);
_sum3 = vmlaq_f32(_sum3, _k34, _r37);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r41 = vld1q_f32(r4+4);
float32x4_t _r42 = vld1q_f32(r4+8);
float32x4_t _r43 = vld1q_f32(r4+12);
float32x4_t _r44 = vld1q_f32(r4+16);
float32x4_t _r45 = vld1q_f32(r4+20);
float32x4_t _r46 = vld1q_f32(r4+24);
float32x4_t _r47 = vld1q_f32(r4+28);
float32x4_t _k40 = vld1q_f32(k0);
float32x4_t _k41 = vld1q_f32(k0+4);
float32x4_t _k42 = vld1q_f32(k0+8);
float32x4_t _k43 = vld1q_f32(k0+12);
float32x4_t _k44 = vld1q_f32(k0+16);
k0 -= 80;
_sum0 = vmlaq_f32(_sum0, _k40, _r40);
_sum0 = vmlaq_f32(_sum0, _k41, _r41);
_sum0 = vmlaq_f32(_sum0, _k42, _r42);
_sum0 = vmlaq_f32(_sum0, _k43, _r43);
_sum0 = vmlaq_f32(_sum0, _k44, _r44);
_sum1 = vmlaq_f32(_sum1, _k40, _r41);
_sum1 = vmlaq_f32(_sum1, _k41, _r42);
_sum1 = vmlaq_f32(_sum1, _k42, _r43);
_sum1 = vmlaq_f32(_sum1, _k43, _r44);
_sum1 = vmlaq_f32(_sum1, _k44, _r45);
_sum2 = vmlaq_f32(_sum2, _k40, _r42);
_sum2 = vmlaq_f32(_sum2, _k41, _r43);
_sum2 = vmlaq_f32(_sum2, _k42, _r44);
_sum2 = vmlaq_f32(_sum2, _k43, _r45);
_sum2 = vmlaq_f32(_sum2, _k44, _r46);
_sum3 = vmlaq_f32(_sum3, _k40, _r43);
_sum3 = vmlaq_f32(_sum3, _k41, _r44);
_sum3 = vmlaq_f32(_sum3, _k42, _r45);
_sum3 = vmlaq_f32(_sum3, _k43, _r46);
_sum3 = vmlaq_f32(_sum3, _k44, _r47);
vst1q_f32(outptr0, _sum0);
vst1q_f32(outptr0+4, _sum1);
vst1q_f32(outptr0+8, _sum2);
vst1q_f32(outptr0+12, _sum3);
r0 += 16;
r1 += 16;
r2 += 16;
r3 += 16;
r4 += 16;
outptr0 += 16;
}
for (; j+1 < outw; j+=2)
{
float32x4_t _sum0 = _bias0;
float32x4_t _sum1 = _bias0;
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0+4);
float32x4_t _r02 = vld1q_f32(r0+8);
float32x4_t _r03 = vld1q_f32(r0+12);
float32x4_t _r04 = vld1q_f32(r0+16);
float32x4_t _r05 = vld1q_f32(r0+20);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0+4);
float32x4_t _k02 = vld1q_f32(k0+8);
float32x4_t _k03 = vld1q_f32(k0+12);
float32x4_t _k04 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k00, _r00);
_sum0 = vmlaq_f32(_sum0, _k01, _r01);
_sum0 = vmlaq_f32(_sum0, _k02, _r02);
_sum0 = vmlaq_f32(_sum0, _k03, _r03);
_sum0 = vmlaq_f32(_sum0, _k04, _r04);
_sum1 = vmlaq_f32(_sum1, _k00, _r01);
_sum1 = vmlaq_f32(_sum1, _k01, _r02);
_sum1 = vmlaq_f32(_sum1, _k02, _r03);
_sum1 = vmlaq_f32(_sum1, _k03, _r04);
_sum1 = vmlaq_f32(_sum1, _k04, _r05);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1+4);
float32x4_t _r12 = vld1q_f32(r1+8);
float32x4_t _r13 = vld1q_f32(r1+12);
float32x4_t _r14 = vld1q_f32(r1+16);
float32x4_t _r15 = vld1q_f32(r1+20);
float32x4_t _k10 = vld1q_f32(k0);
float32x4_t _k11 = vld1q_f32(k0+4);
float32x4_t _k12 = vld1q_f32(k0+8);
float32x4_t _k13 = vld1q_f32(k0+12);
float32x4_t _k14 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k10, _r10);
_sum0 = vmlaq_f32(_sum0, _k11, _r11);
_sum0 = vmlaq_f32(_sum0, _k12, _r12);
_sum0 = vmlaq_f32(_sum0, _k13, _r13);
_sum0 = vmlaq_f32(_sum0, _k14, _r14);
_sum1 = vmlaq_f32(_sum1, _k10, _r11);
_sum1 = vmlaq_f32(_sum1, _k11, _r12);
_sum1 = vmlaq_f32(_sum1, _k12, _r13);
_sum1 = vmlaq_f32(_sum1, _k13, _r14);
_sum1 = vmlaq_f32(_sum1, _k14, _r15);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2+4);
float32x4_t _r22 = vld1q_f32(r2+8);
float32x4_t _r23 = vld1q_f32(r2+12);
float32x4_t _r24 = vld1q_f32(r2+16);
float32x4_t _r25 = vld1q_f32(r2+20);
float32x4_t _k20 = vld1q_f32(k0);
float32x4_t _k21 = vld1q_f32(k0+4);
float32x4_t _k22 = vld1q_f32(k0+8);
float32x4_t _k23 = vld1q_f32(k0+12);
float32x4_t _k24 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k20, _r20);
_sum0 = vmlaq_f32(_sum0, _k21, _r21);
_sum0 = vmlaq_f32(_sum0, _k22, _r22);
_sum0 = vmlaq_f32(_sum0, _k23, _r23);
_sum0 = vmlaq_f32(_sum0, _k24, _r24);
_sum1 = vmlaq_f32(_sum1, _k20, _r21);
_sum1 = vmlaq_f32(_sum1, _k21, _r22);
_sum1 = vmlaq_f32(_sum1, _k22, _r23);
_sum1 = vmlaq_f32(_sum1, _k23, _r24);
_sum1 = vmlaq_f32(_sum1, _k24, _r25);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r31 = vld1q_f32(r3+4);
float32x4_t _r32 = vld1q_f32(r3+8);
float32x4_t _r33 = vld1q_f32(r3+12);
float32x4_t _r34 = vld1q_f32(r3+16);
float32x4_t _r35 = vld1q_f32(r3+20);
float32x4_t _k30 = vld1q_f32(k0);
float32x4_t _k31 = vld1q_f32(k0+4);
float32x4_t _k32 = vld1q_f32(k0+8);
float32x4_t _k33 = vld1q_f32(k0+12);
float32x4_t _k34 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k30, _r30);
_sum0 = vmlaq_f32(_sum0, _k31, _r31);
_sum0 = vmlaq_f32(_sum0, _k32, _r32);
_sum0 = vmlaq_f32(_sum0, _k33, _r33);
_sum0 = vmlaq_f32(_sum0, _k34, _r34);
_sum1 = vmlaq_f32(_sum1, _k30, _r31);
_sum1 = vmlaq_f32(_sum1, _k31, _r32);
_sum1 = vmlaq_f32(_sum1, _k32, _r33);
_sum1 = vmlaq_f32(_sum1, _k33, _r34);
_sum1 = vmlaq_f32(_sum1, _k34, _r35);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r41 = vld1q_f32(r4+4);
float32x4_t _r42 = vld1q_f32(r4+8);
float32x4_t _r43 = vld1q_f32(r4+12);
float32x4_t _r44 = vld1q_f32(r4+16);
float32x4_t _r45 = vld1q_f32(r4+20);
float32x4_t _k40 = vld1q_f32(k0);
float32x4_t _k41 = vld1q_f32(k0+4);
float32x4_t _k42 = vld1q_f32(k0+8);
float32x4_t _k43 = vld1q_f32(k0+12);
float32x4_t _k44 = vld1q_f32(k0+16);
k0 -= 80;
_sum0 = vmlaq_f32(_sum0, _k40, _r40);
_sum0 = vmlaq_f32(_sum0, _k41, _r41);
_sum0 = vmlaq_f32(_sum0, _k42, _r42);
_sum0 = vmlaq_f32(_sum0, _k43, _r43);
_sum0 = vmlaq_f32(_sum0, _k44, _r44);
_sum1 = vmlaq_f32(_sum1, _k40, _r41);
_sum1 = vmlaq_f32(_sum1, _k41, _r42);
_sum1 = vmlaq_f32(_sum1, _k42, _r43);
_sum1 = vmlaq_f32(_sum1, _k43, _r44);
_sum1 = vmlaq_f32(_sum1, _k44, _r45);
vst1q_f32(outptr0, _sum0);
vst1q_f32(outptr0+4, _sum1);
r0 += 8;
r1 += 8;
r2 += 8;
r3 += 8;
r4 += 8;
outptr0 += 8;
}
for (; j < outw; j++)
{
float32x4_t _sum0 = _bias0;
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0+4);
float32x4_t _r02 = vld1q_f32(r0+8);
float32x4_t _r03 = vld1q_f32(r0+12);
float32x4_t _r04 = vld1q_f32(r0+16);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0+4);
float32x4_t _k02 = vld1q_f32(k0+8);
float32x4_t _k03 = vld1q_f32(k0+12);
float32x4_t _k04 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k00, _r00);
_sum0 = vmlaq_f32(_sum0, _k01, _r01);
_sum0 = vmlaq_f32(_sum0, _k02, _r02);
_sum0 = vmlaq_f32(_sum0, _k03, _r03);
_sum0 = vmlaq_f32(_sum0, _k04, _r04);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1+4);
float32x4_t _r12 = vld1q_f32(r1+8);
float32x4_t _r13 = vld1q_f32(r1+12);
float32x4_t _r14 = vld1q_f32(r1+16);
float32x4_t _k10 = vld1q_f32(k0);
float32x4_t _k11 = vld1q_f32(k0+4);
float32x4_t _k12 = vld1q_f32(k0+8);
float32x4_t _k13 = vld1q_f32(k0+12);
float32x4_t _k14 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k10, _r10);
_sum0 = vmlaq_f32(_sum0, _k11, _r11);
_sum0 = vmlaq_f32(_sum0, _k12, _r12);
_sum0 = vmlaq_f32(_sum0, _k13, _r13);
_sum0 = vmlaq_f32(_sum0, _k14, _r14);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2+4);
float32x4_t _r22 = vld1q_f32(r2+8);
float32x4_t _r23 = vld1q_f32(r2+12);
float32x4_t _r24 = vld1q_f32(r2+16);
float32x4_t _k20 = vld1q_f32(k0);
float32x4_t _k21 = vld1q_f32(k0+4);
float32x4_t _k22 = vld1q_f32(k0+8);
float32x4_t _k23 = vld1q_f32(k0+12);
float32x4_t _k24 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k20, _r20);
_sum0 = vmlaq_f32(_sum0, _k21, _r21);
_sum0 = vmlaq_f32(_sum0, _k22, _r22);
_sum0 = vmlaq_f32(_sum0, _k23, _r23);
_sum0 = vmlaq_f32(_sum0, _k24, _r24);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r31 = vld1q_f32(r3+4);
float32x4_t _r32 = vld1q_f32(r3+8);
float32x4_t _r33 = vld1q_f32(r3+12);
float32x4_t _r34 = vld1q_f32(r3+16);
float32x4_t _k30 = vld1q_f32(k0);
float32x4_t _k31 = vld1q_f32(k0+4);
float32x4_t _k32 = vld1q_f32(k0+8);
float32x4_t _k33 = vld1q_f32(k0+12);
float32x4_t _k34 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k30, _r30);
_sum0 = vmlaq_f32(_sum0, _k31, _r31);
_sum0 = vmlaq_f32(_sum0, _k32, _r32);
_sum0 = vmlaq_f32(_sum0, _k33, _r33);
_sum0 = vmlaq_f32(_sum0, _k34, _r34);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r41 = vld1q_f32(r4+4);
float32x4_t _r42 = vld1q_f32(r4+8);
float32x4_t _r43 = vld1q_f32(r4+12);
float32x4_t _r44 = vld1q_f32(r4+16);
float32x4_t _k40 = vld1q_f32(k0);
float32x4_t _k41 = vld1q_f32(k0+4);
float32x4_t _k42 = vld1q_f32(k0+8);
float32x4_t _k43 = vld1q_f32(k0+12);
float32x4_t _k44 = vld1q_f32(k0+16);
k0 -= 80;
_sum0 = vmlaq_f32(_sum0, _k40, _r40);
_sum0 = vmlaq_f32(_sum0, _k41, _r41);
_sum0 = vmlaq_f32(_sum0, _k42, _r42);
_sum0 = vmlaq_f32(_sum0, _k43, _r43);
_sum0 = vmlaq_f32(_sum0, _k44, _r44);
vst1q_f32(outptr0, _sum0);
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
outptr0 += 4;
}
r0 += 4*4;
r1 += 4*4;
r2 += 4*4;
r3 += 4*4;
r4 += 4*4;
}
}
}
static void convdw5x5s2_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = (w - 2*outw + w) * 4;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g=0; g<group; g++)
{
Mat out = top_blob.channel(g);
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + g * 4) : vdupq_n_f32(0.f);
const float* k0 = kernel.row(g);
float* outptr0 = out;
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* r3 = img0.row(3);
const float* r4 = img0.row(4);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j+3 < outw; j+=4)
{
float32x4_t _sum0 = _bias0;
float32x4_t _sum1 = _bias0;
float32x4_t _sum2 = _bias0;
float32x4_t _sum3 = _bias0;
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0+4);
float32x4_t _r02 = vld1q_f32(r0+8);
float32x4_t _r03 = vld1q_f32(r0+12);
float32x4_t _r04 = vld1q_f32(r0+16);
float32x4_t _r05 = vld1q_f32(r0+20);
float32x4_t _r06 = vld1q_f32(r0+24);
float32x4_t _r07 = vld1q_f32(r0+28);
float32x4_t _r08 = vld1q_f32(r0+32);
float32x4_t _r09 = vld1q_f32(r0+36);
float32x4_t _r010 = vld1q_f32(r0+40);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0+4);
float32x4_t _k02 = vld1q_f32(k0+8);
float32x4_t _k03 = vld1q_f32(k0+12);
float32x4_t _k04 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k00, _r00);
_sum0 = vmlaq_f32(_sum0, _k01, _r01);
_sum0 = vmlaq_f32(_sum0, _k02, _r02);
_sum0 = vmlaq_f32(_sum0, _k03, _r03);
_sum0 = vmlaq_f32(_sum0, _k04, _r04);
_sum1 = vmlaq_f32(_sum1, _k00, _r02);
_sum1 = vmlaq_f32(_sum1, _k01, _r03);
_sum1 = vmlaq_f32(_sum1, _k02, _r04);
_sum1 = vmlaq_f32(_sum1, _k03, _r05);
_sum1 = vmlaq_f32(_sum1, _k04, _r06);
_sum2 = vmlaq_f32(_sum2, _k00, _r04);
_sum2 = vmlaq_f32(_sum2, _k01, _r05);
_sum2 = vmlaq_f32(_sum2, _k02, _r06);
_sum2 = vmlaq_f32(_sum2, _k03, _r07);
_sum2 = vmlaq_f32(_sum2, _k04, _r08);
_sum3 = vmlaq_f32(_sum3, _k00, _r06);
_sum3 = vmlaq_f32(_sum3, _k01, _r07);
_sum3 = vmlaq_f32(_sum3, _k02, _r08);
_sum3 = vmlaq_f32(_sum3, _k03, _r09);
_sum3 = vmlaq_f32(_sum3, _k04, _r010);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1+4);
float32x4_t _r12 = vld1q_f32(r1+8);
float32x4_t _r13 = vld1q_f32(r1+12);
float32x4_t _r14 = vld1q_f32(r1+16);
float32x4_t _r15 = vld1q_f32(r1+20);
float32x4_t _r16 = vld1q_f32(r1+24);
float32x4_t _r17 = vld1q_f32(r1+28);
float32x4_t _r18 = vld1q_f32(r1+32);
float32x4_t _r19 = vld1q_f32(r1+36);
float32x4_t _r110 = vld1q_f32(r1+40);
float32x4_t _k10 = vld1q_f32(k0);
float32x4_t _k11 = vld1q_f32(k0+4);
float32x4_t _k12 = vld1q_f32(k0+8);
float32x4_t _k13 = vld1q_f32(k0+12);
float32x4_t _k14 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k10, _r10);
_sum0 = vmlaq_f32(_sum0, _k11, _r11);
_sum0 = vmlaq_f32(_sum0, _k12, _r12);
_sum0 = vmlaq_f32(_sum0, _k13, _r13);
_sum0 = vmlaq_f32(_sum0, _k14, _r14);
_sum1 = vmlaq_f32(_sum1, _k10, _r12);
_sum1 = vmlaq_f32(_sum1, _k11, _r13);
_sum1 = vmlaq_f32(_sum1, _k12, _r14);
_sum1 = vmlaq_f32(_sum1, _k13, _r15);
_sum1 = vmlaq_f32(_sum1, _k14, _r16);
_sum2 = vmlaq_f32(_sum2, _k10, _r14);
_sum2 = vmlaq_f32(_sum2, _k11, _r15);
_sum2 = vmlaq_f32(_sum2, _k12, _r16);
_sum2 = vmlaq_f32(_sum2, _k13, _r17);
_sum2 = vmlaq_f32(_sum2, _k14, _r18);
_sum3 = vmlaq_f32(_sum3, _k10, _r16);
_sum3 = vmlaq_f32(_sum3, _k11, _r17);
_sum3 = vmlaq_f32(_sum3, _k12, _r18);
_sum3 = vmlaq_f32(_sum3, _k13, _r19);
_sum3 = vmlaq_f32(_sum3, _k14, _r110);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2+4);
float32x4_t _r22 = vld1q_f32(r2+8);
float32x4_t _r23 = vld1q_f32(r2+12);
float32x4_t _r24 = vld1q_f32(r2+16);
float32x4_t _r25 = vld1q_f32(r2+20);
float32x4_t _r26 = vld1q_f32(r2+24);
float32x4_t _r27 = vld1q_f32(r2+28);
float32x4_t _r28 = vld1q_f32(r2+32);
float32x4_t _r29 = vld1q_f32(r2+36);
float32x4_t _r210 = vld1q_f32(r2+40);
float32x4_t _k20 = vld1q_f32(k0);
float32x4_t _k21 = vld1q_f32(k0+4);
float32x4_t _k22 = vld1q_f32(k0+8);
float32x4_t _k23 = vld1q_f32(k0+12);
float32x4_t _k24 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k20, _r20);
_sum0 = vmlaq_f32(_sum0, _k21, _r21);
_sum0 = vmlaq_f32(_sum0, _k22, _r22);
_sum0 = vmlaq_f32(_sum0, _k23, _r23);
_sum0 = vmlaq_f32(_sum0, _k24, _r24);
_sum1 = vmlaq_f32(_sum1, _k20, _r22);
_sum1 = vmlaq_f32(_sum1, _k21, _r23);
_sum1 = vmlaq_f32(_sum1, _k22, _r24);
_sum1 = vmlaq_f32(_sum1, _k23, _r25);
_sum1 = vmlaq_f32(_sum1, _k24, _r26);
_sum2 = vmlaq_f32(_sum2, _k20, _r24);
_sum2 = vmlaq_f32(_sum2, _k21, _r25);
_sum2 = vmlaq_f32(_sum2, _k22, _r26);
_sum2 = vmlaq_f32(_sum2, _k23, _r27);
_sum2 = vmlaq_f32(_sum2, _k24, _r28);
_sum3 = vmlaq_f32(_sum3, _k20, _r26);
_sum3 = vmlaq_f32(_sum3, _k21, _r27);
_sum3 = vmlaq_f32(_sum3, _k22, _r28);
_sum3 = vmlaq_f32(_sum3, _k23, _r29);
_sum3 = vmlaq_f32(_sum3, _k24, _r210);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r31 = vld1q_f32(r3+4);
float32x4_t _r32 = vld1q_f32(r3+8);
float32x4_t _r33 = vld1q_f32(r3+12);
float32x4_t _r34 = vld1q_f32(r3+16);
float32x4_t _r35 = vld1q_f32(r3+20);
float32x4_t _r36 = vld1q_f32(r3+24);
float32x4_t _r37 = vld1q_f32(r3+28);
float32x4_t _r38 = vld1q_f32(r3+32);
float32x4_t _r39 = vld1q_f32(r3+36);
float32x4_t _r310 = vld1q_f32(r3+40);
float32x4_t _k30 = vld1q_f32(k0);
float32x4_t _k31 = vld1q_f32(k0+4);
float32x4_t _k32 = vld1q_f32(k0+8);
float32x4_t _k33 = vld1q_f32(k0+12);
float32x4_t _k34 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k30, _r30);
_sum0 = vmlaq_f32(_sum0, _k31, _r31);
_sum0 = vmlaq_f32(_sum0, _k32, _r32);
_sum0 = vmlaq_f32(_sum0, _k33, _r33);
_sum0 = vmlaq_f32(_sum0, _k34, _r34);
_sum1 = vmlaq_f32(_sum1, _k30, _r32);
_sum1 = vmlaq_f32(_sum1, _k31, _r33);
_sum1 = vmlaq_f32(_sum1, _k32, _r34);
_sum1 = vmlaq_f32(_sum1, _k33, _r35);
_sum1 = vmlaq_f32(_sum1, _k34, _r36);
_sum2 = vmlaq_f32(_sum2, _k30, _r34);
_sum2 = vmlaq_f32(_sum2, _k31, _r35);
_sum2 = vmlaq_f32(_sum2, _k32, _r36);
_sum2 = vmlaq_f32(_sum2, _k33, _r37);
_sum2 = vmlaq_f32(_sum2, _k34, _r38);
_sum3 = vmlaq_f32(_sum3, _k30, _r36);
_sum3 = vmlaq_f32(_sum3, _k31, _r37);
_sum3 = vmlaq_f32(_sum3, _k32, _r38);
_sum3 = vmlaq_f32(_sum3, _k33, _r39);
_sum3 = vmlaq_f32(_sum3, _k34, _r310);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r41 = vld1q_f32(r4+4);
float32x4_t _r42 = vld1q_f32(r4+8);
float32x4_t _r43 = vld1q_f32(r4+12);
float32x4_t _r44 = vld1q_f32(r4+16);
float32x4_t _r45 = vld1q_f32(r4+20);
float32x4_t _r46 = vld1q_f32(r4+24);
float32x4_t _r47 = vld1q_f32(r4+28);
float32x4_t _r48 = vld1q_f32(r4+32);
float32x4_t _r49 = vld1q_f32(r4+36);
float32x4_t _r410 = vld1q_f32(r4+40);
float32x4_t _k40 = vld1q_f32(k0);
float32x4_t _k41 = vld1q_f32(k0+4);
float32x4_t _k42 = vld1q_f32(k0+8);
float32x4_t _k43 = vld1q_f32(k0+12);
float32x4_t _k44 = vld1q_f32(k0+16);
k0 -= 80;
_sum0 = vmlaq_f32(_sum0, _k40, _r40);
_sum0 = vmlaq_f32(_sum0, _k41, _r41);
_sum0 = vmlaq_f32(_sum0, _k42, _r42);
_sum0 = vmlaq_f32(_sum0, _k43, _r43);
_sum0 = vmlaq_f32(_sum0, _k44, _r44);
_sum1 = vmlaq_f32(_sum1, _k40, _r42);
_sum1 = vmlaq_f32(_sum1, _k41, _r43);
_sum1 = vmlaq_f32(_sum1, _k42, _r44);
_sum1 = vmlaq_f32(_sum1, _k43, _r45);
_sum1 = vmlaq_f32(_sum1, _k44, _r46);
_sum2 = vmlaq_f32(_sum2, _k40, _r44);
_sum2 = vmlaq_f32(_sum2, _k41, _r45);
_sum2 = vmlaq_f32(_sum2, _k42, _r46);
_sum2 = vmlaq_f32(_sum2, _k43, _r47);
_sum2 = vmlaq_f32(_sum2, _k44, _r48);
_sum3 = vmlaq_f32(_sum3, _k40, _r46);
_sum3 = vmlaq_f32(_sum3, _k41, _r47);
_sum3 = vmlaq_f32(_sum3, _k42, _r48);
_sum3 = vmlaq_f32(_sum3, _k43, _r49);
_sum3 = vmlaq_f32(_sum3, _k44, _r410);
vst1q_f32(outptr0, _sum0);
vst1q_f32(outptr0+4, _sum1);
vst1q_f32(outptr0+8, _sum2);
vst1q_f32(outptr0+12, _sum3);
r0 += 8*4;
r1 += 8*4;
r2 += 8*4;
r3 += 8*4;
r4 += 8*4;
outptr0 += 16;
}
for (; j+1 < outw; j+=2)
{
float32x4_t _sum0 = _bias0;
float32x4_t _sum1 = _bias0;
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0+4);
float32x4_t _r02 = vld1q_f32(r0+8);
float32x4_t _r03 = vld1q_f32(r0+12);
float32x4_t _r04 = vld1q_f32(r0+16);
float32x4_t _r05 = vld1q_f32(r0+20);
float32x4_t _r06 = vld1q_f32(r0+24);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0+4);
float32x4_t _k02 = vld1q_f32(k0+8);
float32x4_t _k03 = vld1q_f32(k0+12);
float32x4_t _k04 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k00, _r00);
_sum0 = vmlaq_f32(_sum0, _k01, _r01);
_sum0 = vmlaq_f32(_sum0, _k02, _r02);
_sum0 = vmlaq_f32(_sum0, _k03, _r03);
_sum0 = vmlaq_f32(_sum0, _k04, _r04);
_sum1 = vmlaq_f32(_sum1, _k00, _r02);
_sum1 = vmlaq_f32(_sum1, _k01, _r03);
_sum1 = vmlaq_f32(_sum1, _k02, _r04);
_sum1 = vmlaq_f32(_sum1, _k03, _r05);
_sum1 = vmlaq_f32(_sum1, _k04, _r06);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1+4);
float32x4_t _r12 = vld1q_f32(r1+8);
float32x4_t _r13 = vld1q_f32(r1+12);
float32x4_t _r14 = vld1q_f32(r1+16);
float32x4_t _r15 = vld1q_f32(r1+20);
float32x4_t _r16 = vld1q_f32(r1+24);
float32x4_t _k10 = vld1q_f32(k0);
float32x4_t _k11 = vld1q_f32(k0+4);
float32x4_t _k12 = vld1q_f32(k0+8);
float32x4_t _k13 = vld1q_f32(k0+12);
float32x4_t _k14 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k10, _r10);
_sum0 = vmlaq_f32(_sum0, _k11, _r11);
_sum0 = vmlaq_f32(_sum0, _k12, _r12);
_sum0 = vmlaq_f32(_sum0, _k13, _r13);
_sum0 = vmlaq_f32(_sum0, _k14, _r14);
_sum1 = vmlaq_f32(_sum1, _k10, _r12);
_sum1 = vmlaq_f32(_sum1, _k11, _r13);
_sum1 = vmlaq_f32(_sum1, _k12, _r14);
_sum1 = vmlaq_f32(_sum1, _k13, _r15);
_sum1 = vmlaq_f32(_sum1, _k14, _r16);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2+4);
float32x4_t _r22 = vld1q_f32(r2+8);
float32x4_t _r23 = vld1q_f32(r2+12);
float32x4_t _r24 = vld1q_f32(r2+16);
float32x4_t _r25 = vld1q_f32(r2+20);
float32x4_t _r26 = vld1q_f32(r2+24);
float32x4_t _k20 = vld1q_f32(k0);
float32x4_t _k21 = vld1q_f32(k0+4);
float32x4_t _k22 = vld1q_f32(k0+8);
float32x4_t _k23 = vld1q_f32(k0+12);
float32x4_t _k24 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k20, _r20);
_sum0 = vmlaq_f32(_sum0, _k21, _r21);
_sum0 = vmlaq_f32(_sum0, _k22, _r22);
_sum0 = vmlaq_f32(_sum0, _k23, _r23);
_sum0 = vmlaq_f32(_sum0, _k24, _r24);
_sum1 = vmlaq_f32(_sum1, _k20, _r22);
_sum1 = vmlaq_f32(_sum1, _k21, _r23);
_sum1 = vmlaq_f32(_sum1, _k22, _r24);
_sum1 = vmlaq_f32(_sum1, _k23, _r25);
_sum1 = vmlaq_f32(_sum1, _k24, _r26);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r31 = vld1q_f32(r3+4);
float32x4_t _r32 = vld1q_f32(r3+8);
float32x4_t _r33 = vld1q_f32(r3+12);
float32x4_t _r34 = vld1q_f32(r3+16);
float32x4_t _r35 = vld1q_f32(r3+20);
float32x4_t _r36 = vld1q_f32(r3+24);
float32x4_t _k30 = vld1q_f32(k0);
float32x4_t _k31 = vld1q_f32(k0+4);
float32x4_t _k32 = vld1q_f32(k0+8);
float32x4_t _k33 = vld1q_f32(k0+12);
float32x4_t _k34 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k30, _r30);
_sum0 = vmlaq_f32(_sum0, _k31, _r31);
_sum0 = vmlaq_f32(_sum0, _k32, _r32);
_sum0 = vmlaq_f32(_sum0, _k33, _r33);
_sum0 = vmlaq_f32(_sum0, _k34, _r34);
_sum1 = vmlaq_f32(_sum1, _k30, _r32);
_sum1 = vmlaq_f32(_sum1, _k31, _r33);
_sum1 = vmlaq_f32(_sum1, _k32, _r34);
_sum1 = vmlaq_f32(_sum1, _k33, _r35);
_sum1 = vmlaq_f32(_sum1, _k34, _r36);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r41 = vld1q_f32(r4+4);
float32x4_t _r42 = vld1q_f32(r4+8);
float32x4_t _r43 = vld1q_f32(r4+12);
float32x4_t _r44 = vld1q_f32(r4+16);
float32x4_t _r45 = vld1q_f32(r4+20);
float32x4_t _r46 = vld1q_f32(r4+24);
float32x4_t _k40 = vld1q_f32(k0);
float32x4_t _k41 = vld1q_f32(k0+4);
float32x4_t _k42 = vld1q_f32(k0+8);
float32x4_t _k43 = vld1q_f32(k0+12);
float32x4_t _k44 = vld1q_f32(k0+16);
k0 -= 80;
_sum0 = vmlaq_f32(_sum0, _k40, _r40);
_sum0 = vmlaq_f32(_sum0, _k41, _r41);
_sum0 = vmlaq_f32(_sum0, _k42, _r42);
_sum0 = vmlaq_f32(_sum0, _k43, _r43);
_sum0 = vmlaq_f32(_sum0, _k44, _r44);
_sum1 = vmlaq_f32(_sum1, _k40, _r42);
_sum1 = vmlaq_f32(_sum1, _k41, _r43);
_sum1 = vmlaq_f32(_sum1, _k42, _r44);
_sum1 = vmlaq_f32(_sum1, _k43, _r45);
_sum1 = vmlaq_f32(_sum1, _k44, _r46);
vst1q_f32(outptr0, _sum0);
vst1q_f32(outptr0+4, _sum1);
r0 += 4*4;
r1 += 4*4;
r2 += 4*4;
r3 += 4*4;
r4 += 4*4;
outptr0 += 8;
}
for (; j < outw; j++)
{
float32x4_t _sum0 = _bias0;
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0+4);
float32x4_t _r02 = vld1q_f32(r0+8);
float32x4_t _r03 = vld1q_f32(r0+12);
float32x4_t _r04 = vld1q_f32(r0+16);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0+4);
float32x4_t _k02 = vld1q_f32(k0+8);
float32x4_t _k03 = vld1q_f32(k0+12);
float32x4_t _k04 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k00, _r00);
_sum0 = vmlaq_f32(_sum0, _k01, _r01);
_sum0 = vmlaq_f32(_sum0, _k02, _r02);
_sum0 = vmlaq_f32(_sum0, _k03, _r03);
_sum0 = vmlaq_f32(_sum0, _k04, _r04);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1+4);
float32x4_t _r12 = vld1q_f32(r1+8);
float32x4_t _r13 = vld1q_f32(r1+12);
float32x4_t _r14 = vld1q_f32(r1+16);
float32x4_t _k10 = vld1q_f32(k0);
float32x4_t _k11 = vld1q_f32(k0+4);
float32x4_t _k12 = vld1q_f32(k0+8);
float32x4_t _k13 = vld1q_f32(k0+12);
float32x4_t _k14 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k10, _r10);
_sum0 = vmlaq_f32(_sum0, _k11, _r11);
_sum0 = vmlaq_f32(_sum0, _k12, _r12);
_sum0 = vmlaq_f32(_sum0, _k13, _r13);
_sum0 = vmlaq_f32(_sum0, _k14, _r14);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2+4);
float32x4_t _r22 = vld1q_f32(r2+8);
float32x4_t _r23 = vld1q_f32(r2+12);
float32x4_t _r24 = vld1q_f32(r2+16);
float32x4_t _k20 = vld1q_f32(k0);
float32x4_t _k21 = vld1q_f32(k0+4);
float32x4_t _k22 = vld1q_f32(k0+8);
float32x4_t _k23 = vld1q_f32(k0+12);
float32x4_t _k24 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k20, _r20);
_sum0 = vmlaq_f32(_sum0, _k21, _r21);
_sum0 = vmlaq_f32(_sum0, _k22, _r22);
_sum0 = vmlaq_f32(_sum0, _k23, _r23);
_sum0 = vmlaq_f32(_sum0, _k24, _r24);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r31 = vld1q_f32(r3+4);
float32x4_t _r32 = vld1q_f32(r3+8);
float32x4_t _r33 = vld1q_f32(r3+12);
float32x4_t _r34 = vld1q_f32(r3+16);
float32x4_t _k30 = vld1q_f32(k0);
float32x4_t _k31 = vld1q_f32(k0+4);
float32x4_t _k32 = vld1q_f32(k0+8);
float32x4_t _k33 = vld1q_f32(k0+12);
float32x4_t _k34 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k30, _r30);
_sum0 = vmlaq_f32(_sum0, _k31, _r31);
_sum0 = vmlaq_f32(_sum0, _k32, _r32);
_sum0 = vmlaq_f32(_sum0, _k33, _r33);
_sum0 = vmlaq_f32(_sum0, _k34, _r34);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r41 = vld1q_f32(r4+4);
float32x4_t _r42 = vld1q_f32(r4+8);
float32x4_t _r43 = vld1q_f32(r4+12);
float32x4_t _r44 = vld1q_f32(r4+16);
float32x4_t _k40 = vld1q_f32(k0);
float32x4_t _k41 = vld1q_f32(k0+4);
float32x4_t _k42 = vld1q_f32(k0+8);
float32x4_t _k43 = vld1q_f32(k0+12);
float32x4_t _k44 = vld1q_f32(k0+16);
k0 -= 80;
_sum0 = vmlaq_f32(_sum0, _k40, _r40);
_sum0 = vmlaq_f32(_sum0, _k41, _r41);
_sum0 = vmlaq_f32(_sum0, _k42, _r42);
_sum0 = vmlaq_f32(_sum0, _k43, _r43);
_sum0 = vmlaq_f32(_sum0, _k44, _r44);
vst1q_f32(outptr0, _sum0);
r0 += 2*4;
r1 += 2*4;
r2 += 2*4;
r3 += 2*4;
r4 += 2*4;
outptr0 += 4;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
}
}
}
|
nanoBragg_standalone.c | /* perfect-lattice nanocrystal diffraction simulator -James Holton and Ken Frankel 2-3-18
example:
gcc -O3 -o nanoBragg nanoBragg.c -lm -fopenmp
./nanoBragg -mat auto.mat -hkl P1.hkl -distance 2500
./nanoBragg -mat A.mat -hkl P1.hkl -lambda 1 -dispersion 0.1 -dispstep 3 -distance 100 -detsize 100 -pixel 0.1 \
-hdiv 0.28 -hdivstep 0.02 -vdiv 0.28 -vdivstep 0.02 \
-fluence 1e24 -N 0 \
-water 0
./nanoBragg -cell 74 74 36 90 90 90 -misset 10 20 30 \
-hkl P1.hkl -lambda 1 -dispersion 0.1 -dispstep 3 -distance 100 -detsize 100 -pixel 0.1 \
-hdiv 0.28 -hdivstep 0.02 -vdiv 0.28 -vdivstep 0.02 \
-fluence 1e24 -N 0 \
-water 0
lattice positions and wavelength (lambda) should be provided in Angstrom, three numbers per line
detector distance, detsize and pixel size in mm
divergence in mrad
dispersion in percent
phi and osc are in degrees
fluence is in photons/meter^2 (integrated exposure time)
Na, Nb, Nc, are the number of unit cells along the a,b,c axes, respectively
note that any of Na,Nb,Nc can be zero to simulate an isolated unit cell (SAXS)
water is the thickness in microns of "water" also traversed by the beam
this generates a simplitic background: that from a material with density 1.0 and isotropic
structure factor of 2.57 electrons (the forward-scattered structure factor of water
more complicated backgrounds can be made in a separate run of this program using Na=Nb=Nc=0.
auto.mat can be an orientation matrix from MOSFLM, or simply a text file of the
three reciprocal lattice vector components along x,y,z:
a_star_x b_star_x c_star_x
a_star_y b_star_y c_star_y
a_star_z b_star_z c_star_z
you can also simply specify the unit cell with -cell and some miss-setting angles with -misset
P1.hkl should be a text file containing
h k l F
for EVERY spot that has an intensity (including F000). No symmetry operators will
be imposed by this program. Not even Friedel symmetry.
Since reading the HKL file can often be the slowest step, this program will create
a binary "dumpfile" in the current working directory that it will re-read upon
subsequent runs if -hkl is not specified.
Please note that unlike nearBragg, this program does not work in the near field,
so detector distances should always be much larger than the crystal size
*/
#define _USE_MATH_DEFINES
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <limits.h>
#include <float.h>
#ifndef NAN
#define NAN strtod("NAN",NULL)
#endif
#define TRUE 1
#define FALSE 0
#define Avogadro 6.02214179e23
/* read in text file into double arrays at provided addresses */
size_t read_text_file(char *filename, size_t nargs, ... );
/* cubic spline interpolation functions */
void polint(double *xa, double *ya, double x, double *y);
void polin2(double *x1a, double *x2a, double **ya, double x1,double x2, double *y);
void polin3(double *x1a, double *x2a, double *x3a, double ***ya, double x1,double x2, double x3, double *y);
/* rotate a 3-vector in space applied in order phix,phiy,phiz*/
double *rotate(double *v, double *newv, double phix, double phiy, double phiz);
/* rotate a 3-vector about a unit vector axis */
double *rotate_axis(double *v, double *newv, double *axis, double phi);
/* rotate a 3-vector using a 9-element unitary matrix */
double *rotate_umat(double *v, double *newv, double *umat);
/* vector cross product where vector magnitude is 0th element */
double *cross_product(double *x, double *y, double *z);
/* vector inner product where vector magnitude is 0th element */
double dot_product(double *x, double *y);
/* compute difference between two vectors */
double vector_diff(double *vector, double *origin_vector, double *new_vector);
/* measure magnitude of vector and put it in 0th element */
double magnitude(double *vector);
/* scale the magnitude of a vector */
double vector_scale(double *vector, double *new_vector, double scale);
/* force the magnitude of vector to given value */
double vector_rescale(double *vector, double *new_vector, double magnitude);
/* make a unit vector pointing in same direction and report magnitude (both args can be same vector) */
double unitize(double *vector, double *new_unit_vector);
/* polarization factor from vectors */
double polarization_factor(double kahn_factor, double *incident, double *diffracted, double *axis);
/* generate unit vector in random direction */
float uniform3Ddev(float *dx, float *dy, float *dz, long *idum);
/* generate random unitary rotation matrix within a spherical cap */
double *mosaic_rotation_umat(float mosaicity, double umat[9], long *idum);
/* convert unitary matrix into missetting angles */
double *umat2misset(double umat[9],double *missets);
/* random deviate with Poisson distribution */
float poidev(float xm, long *idum);
/* random deviate with Gaussian distribution */
float gaussdev(long *idum);
/* random deviate with Lorentzian distribution */
float lorentzdev(long *idum);
/* random deviate with triangle-shaped distribution */
float triangledev(long *idum);
/* random deviate with exponential distribution (>0) */
float expdev(long *idum);
/* random deviate with uniform distribution */
float ran1(long *idum);
/* Fourier transform of a truncated lattice */
double sincg(double x, double N);
/* Fourier transform of a sphere */
double sinc3(double x);
/* Fourier transform of a spherically-truncated lattice */
double sinc_conv_sinc3(double x);
/* file stuff */
char *matfilename = NULL;
char *hklfilename = NULL;
char *dumpfilename = "Fdump.bin\0";
char *stolfilename = NULL;
char *imginfilename = NULL;
char *maskfilename = NULL;
char *stoloutfilename = "output.stol\0";
char *sourcefilename = NULL;
char *floatfilename = "floatimage.bin\0";
//char *sinfilename = "sinimage.bin\0";
//char *cosfilename = "cosimage.bin\0";
char *intfilename = "intimage.img\0";
char *pgmfilename = "image.pgm\0";
char *noisefilename = "noiseimage.img\0";
FILE *infile = NULL;
FILE *Fdumpfile = NULL;
FILE *outfile = NULL;
FILE *stoloutfile = NULL;
typedef enum { SAMPLE, BEAM } pivot;
typedef enum { SQUARE, ROUND, GAUSS, TOPHAT } shapetype;
typedef enum { CUSTOM, ADXV, MOSFLM, XDS, DIALS, DENZO } convention;
/* frame handling routines */
typedef struct _SMVinfo
{
char *filename;
FILE *handle;
int swap_bytes;
int header_size;
int width;
int height;
char *header;
unsigned short int *mmapdata;
} SMVinfo;
/* SMV image handling routines */
SMVinfo GetFrame(char *filename);
double ValueOf( const char *keyword, SMVinfo smvfile);
char *get_byte_order();
unsigned char *read_pgm5_bytes(char *filename,unsigned int *returned_width,unsigned int *returned_height);
int main(int argc, char** argv)
{
/* progress meter stuff */
long progress_pixel,progress_pixels;
int progress_meter=1;
int babble=1;
int printout = 0;
int printout_spixel=-1,printout_fpixel=-1;
/* x-ray beam properties */
double beam_vector[4] = {0,1,0,0};
int coherent = 0;
int far_source = 1;
int round_div = 1;
double lambda,*lambda_of;
double mosaic_spread=-1.0,*mosaic_umats,mosaic_missets[4];
double umat[9];
double dispersion=0.0,dispstep=-1,lambda0 = 1.0e-10;
double hdiv,hdivstep=-1.0,hdivrange= -1.0;
double vdiv,vdivstep=-1.0,vdivrange= -1.0;
double source_path,source_distance = 10.0;
int divsteps=-1,hdivsteps=-1,vdivsteps=-1,dispsteps=-1;
int hdiv_tic,vdiv_tic,disp_tic,mos_tic;
int mosaic_domains=-1;
double weight;
int source,sources;
double *source_X,*source_Y,*source_Z,*source_I,*source_lambda;
/* Thomson cross section (m^2) */
double r_e_sqr = 7.94079248018965e-30;
/* incident x-ray fluence in photons/m^2 */
double fluence = 125932015286227086360700780544.0;
double flux=0.0,exposure=1.0,beamsize=1e-4;
/* sample size stuff */
int N=1;
double Na=1.0,Nb=1.0,Nc=1.0;
double xtalsize_max,xtalsize_a,xtalsize_b,xtalsize_c;
double reciprocal_pixel_size;
shapetype xtal_shape = SQUARE;
double hrad_sqr,fudge=1;
double sample_x = 0; /* m */
double sample_y = 0; /* m */
double sample_z = 0; /* m */
double density = 1.0e6; /* g/m^3 */
double molecular_weight = 18.0; /* g/mol */
double volume=0.0,molecules = 0.0;
/* scale factor = F^2*r_e_sqr*fluence*Avogadro*volume*density/molecular_weight
m^2 ph/m^2 /mol m^3 g/m^3 g/mol */
double water_size = 0.0;
double water_F = 2.57;
double water_MW = 18.0;
/* water F = 2.57 in forward direction */
/* detector stuff */
double pixel_size = 0.1e-3;
double pixel_pos[4];
int fpixel,spixel,fpixels=0,spixels=0,pixels;
double distance = 100.0e-3;
double detsize_f = 102.4e-3;
double detsize_s = 102.4e-3;
double detector_mu=0.0,detector_thick=0.0,detector_thickstep=-1.0,parallax,capture_fraction;
int detector_thicksteps=-1,thick_tic;
double fdet_vector[4] = {0,0,0,1};
double sdet_vector[4] = {0,0,-1,0};
double odet_vector[4] = {0,1,0,0};
double pix0_vector[4] = {0,0,0,0};
double detector_rotx=0.0,detector_roty=0.0,detector_rotz=0.0;
double twotheta_axis[4] = {0,0,1,0};
pivot detector_pivot = BEAM;
convention beam_convention = MOSFLM;
double detector_twotheta = 0.0;
double airpath,omega_pixel,omega_Rsqr_pixel,omega_sum;
int curved_detector = 0;
int point_pixel= 0;
/* beam center value that goes into the image header */
double Xbeam=NAN,Ybeam=NAN;
/* direct beam coordinate on fast/slow pixel axes; used for diffraction if pivot=beam */
double Fbeam=NAN,Sbeam=NAN;
double Fdet,Sdet,Odet;
double Fdet0,Sdet0;
/* nearest point on detector for detector at rotations=0 */
double Xclose=NAN,Yclose=NAN,close_distance=NAN;
/* near point in fast/slow pixel units; used for diffraction if pivot=sample */
double Fclose=NAN,Sclose=NAN;
/* fast/slow near-point position in pixels */
double ORGX=NAN,ORGY=NAN;
/* similar to pix0,vector but with dials-default vectors */
double dials_origin[4] = {0,0,0,0};
double adc_offset = 40.0;
/* scattering vectors */
double incident[4];
double diffracted[4],diffracted0[4];
double scattering[4];
double stol,twotheta,theta;
/* diffraction geometry stuff */
double costwotheta,sintwotheta,psi=0;
double xd,yd,zd,xd0,yd0,zd0;
double Ewald[4],Ewald0[4],relp[4];
double dmin=0;
int integral_form = 0;
/* polarization stuff */
double polar_vector[4] = {0,0,0,1};
double vert_vector[4];
double polar=1.0,polarization=0.0;
int nopolar = 0;
/* sampling */
int steps;
int roi_xmin=-1,roi_xmax=-1,roi_ymin=-1,roi_ymax=-1;
int oversample = -1,recommended_oversample,subS,subF;
int oversample_thick = 0;
int oversample_polar = 0;
int oversample_omega = 0;
double subpixel_size;
/* spindle */
double phi,phi0=0.0,phistep=-1.0,osc=-1.0;
int phi_tic,phisteps=-1;
double spindle_vector[4] = {0,0,0,1};
/* structure factor representation */
double phase,Fa,Fb;
double F,F_bg,*stol_of,*F_of;
double ***Fhkl;
double default_F=0.0;
int hkls=0;
double F_latt,F_cell;
double F_highangle,F_lowangle;
int stols,nearest=0;
double stol_file_mult=1.0e10;
double denom;
/* intensity stats */
double I,I_bg;
double max_I = 0.0;
double max_I_x = 0.0,max_I_y = 0.0;
double intfile_scale = 0.0;
double pgm_scale = 0.0;
double sum,sumsqr,avg,rms,rmsd;
int sumn = 0;
int overloads = 0;
/* image file data */
float *floatimage;
int imgidx;
SMVinfo maskfile;
unsigned short int *maskimage = NULL;
// float *sinimage;
// float *cosimage;
unsigned short int *intimage = NULL;
unsigned char *pgmimage = NULL;
char *byte_order = get_byte_order();
SMVinfo imginfile;
float *imginfileimage = NULL;
/* misc variables */
int i,j,n;
double X,Y,Z;
double ratio,r;
double X0,Y0,Z0,d_r;
double RTD=180.0*M_1_PI;
double test;
double vector[4];
double newvector[4];
long seed;
seed = -time((time_t *)0);
// printf("random number seed = %u\n",seed);
long mosaic_seed = -12345678;
/* interpolation arrays */
int interpolate = 2;
double ***sub_Fhkl;
int h_interp[5],k_interp[5],l_interp[5];
double h_interp_d[5],k_interp_d[5],l_interp_d[5];
double h,k,l;
int h0,k0,l0,h_range,k_range,l_range,h_min,h_max,k_min,k_max,l_min,l_max;
int h0_flr,k0_flr,l0_flr;
int i1=0, i2=0, i3=0;
/* unit cell stuff */
int user_cell = 0;
double a[4] = {0,0,0,0};
double b[4] = {0,0,0,0};
double c[4] = {0,0,0,0};
double a0[4],b0[4],c0[4];
double ap[4],bp[4],cp[4];
double alpha=0.0,beta=0.0,gamma=0.0;
double a_star[4],b_star[4],c_star[4];
double a_star0[4],b_star0[4],c_star0[4];
double alpha_star,beta_star,gamma_star;
double a_cross_b[4],b_cross_c[4],c_cross_a[4];
double a_star_cross_b_star[4],b_star_cross_c_star[4],c_star_cross_a_star[4];
double V_cell,V_star,skew,aavg;
double sin_alpha,sin_beta,sin_gamma;
double cos_alpha,cos_beta,cos_gamma;
double sin_alpha_star,sin_beta_star,sin_gamma_star;
double cos_alpha_star,cos_beta_star,cos_gamma_star;
double misset[4] = {0,0,0,0};
/* special options */
int calculate_noise = 1;
int write_pgm = 1;
/* check argument list */
for(i=1; i<argc; ++i)
{
if(argv[i][0] == '-')
{
/* option specified */
if(strstr(argv[i], "-img") && (argc > (i+1)))
{
imginfilename = argv[i+1];
}
if(strstr(argv[i], "-mask") && (argc > (i+1)))
{
maskfilename = argv[i+1];
}
}
}
/* read in any provided mask file */
if(maskfilename != NULL)
{
/* frame handling routines */
maskfile = GetFrame(maskfilename);
if(maskfile.header_size > 0) {
fpixels = maskfile.width;
spixels = maskfile.height;
pixels = fpixels*spixels;
test = ValueOf("PIXEL_SIZE",maskfile);
if(! isnan(test)) pixel_size = test/1000.0;
detsize_f = pixel_size*fpixels;
detsize_s = pixel_size*spixels;
test = ValueOf("DISTANCE",maskfile);
if(! isnan(test)) distance = test/1000.0;
test = ValueOf("CLOSE_DISTANCE",maskfile);
if(! isnan(test)) close_distance = test/1000.0;
test = ValueOf("WAVELENGTH",maskfile);
if(! isnan(test)) lambda0 = test/1e10;
test = ValueOf("BEAM_CENTER_X",maskfile);
if(! isnan(test)) Xbeam = test/1000.0;
test = ValueOf("BEAM_CENTER_Y",maskfile);
if(! isnan(test)) Ybeam = detsize_s - test/1000.0;
test = ValueOf("ORGX",maskfile);
if(! isnan(test)) ORGX = test;
test = ValueOf("ORGY",maskfile);
if(! isnan(test)) ORGY = test;
test = ValueOf("PHI",maskfile);
if(! isnan(test)) phi0 = test/RTD;
test = ValueOf("OSC_RANGE",maskfile);
if(! isnan(test)) osc = test/RTD;
test = ValueOf("TWOTHETA",maskfile);
if(! isnan(test)) twotheta = test/RTD;
maskimage = (unsigned short int*) calloc(pixels+10,sizeof(unsigned short int));
imgidx = maskfile.header_size / sizeof(unsigned short int);
for(i=0;i<pixels;++i){
maskimage[i] = (float) maskfile.mmapdata[imgidx];
++imgidx;
}
}
}
/* read in any provided img file (mostly for the header) */
if(imginfilename != NULL)
{
/* frame handling routines */
imginfile = GetFrame(imginfilename);
if(imginfile.header_size > 0) {
fpixels = imginfile.width;
spixels = imginfile.height;
pixels = fpixels*spixels;
test = ValueOf("PIXEL_SIZE",imginfile);
if(! isnan(test)) pixel_size = test/1000.0;
detsize_f = pixel_size*fpixels;
detsize_s = pixel_size*spixels;
test = ValueOf("DISTANCE",imginfile);
if(! isnan(test)) distance = test/1000.0;
test = ValueOf("CLOSE_DISTANCE",imginfile);
if(! isnan(test)) close_distance = test/1000.0;
test = ValueOf("WAVELENGTH",imginfile);
if(! isnan(test)) lambda0 = test/1e10;
test = ValueOf("BEAM_CENTER_X",imginfile);
if(! isnan(test)) Xbeam = test/1000.0;
test = ValueOf("BEAM_CENTER_Y",imginfile);
if(! isnan(test)) Ybeam = test/1000.0;
test = ValueOf("ORGX",imginfile);
if(! isnan(test)) ORGX = test;
test = ValueOf("ORGY",imginfile);
if(! isnan(test)) ORGY = test;
test = ValueOf("PHI",imginfile);
if(! isnan(test)) phi0 = test/RTD;
test = ValueOf("OSC_RANGE",imginfile);
if(! isnan(test)) osc = test/RTD;
test = ValueOf("TWOTHETA",imginfile);
if(! isnan(test)) twotheta = test/RTD;
imginfileimage = (float *) calloc(pixels+10,sizeof(float));
imgidx = imginfile.header_size / sizeof(unsigned short int);
for(i=0;i<pixels;++i){
imginfileimage[i] = (float) imginfile.mmapdata[imgidx];
++imgidx;
}
}
}
/* check argument list for options */
for(i=1; i<argc; ++i)
{
if(argv[i][0] == '-')
{
/* option specified */
if(strstr(argv[i], "-Na") && (argc > (i+1)))
{
Na = atoi(argv[i+1]);
continue;
}
if(strstr(argv[i], "-Nb") && (argc > (i+1)))
{
Nb = atoi(argv[i+1]);
continue;
}
if(strstr(argv[i], "-Nc") && (argc > (i+1)))
{
Nc = atoi(argv[i+1]);
continue;
}
if(0==strcmp(argv[i], "-N") && (argc > (i+1)))
{
Na = Nb = Nc = atoi(argv[i+1]);
continue;
}
if(strstr(argv[i], "-cell") && (argc > (i+1)))
{
user_cell = 1;
if(argc <= (i+1)) continue;
if(argv[i+1][0] == '-') continue;
a[0] = atof(argv[i+1]);
if(argc <= (i+2)) continue;
if(argv[i+2][0] == '-') continue;
b[0] = atof(argv[i+2]);
if(argc <= (i+3)) continue;
if(argv[i+3][0] == '-') continue;
c[0] = atof(argv[i+3]);
if(argc <= (i+4)) continue;
if(argv[i+4][0] == '-') continue;
alpha = atof(argv[i+4])/RTD;
if(argc <= (i+5)) continue;
if(argv[i+5][0] == '-') continue;
beta = atof(argv[i+5])/RTD;
if(argc <= (i+6)) continue;
if(argv[i+6][0] == '-') continue;
gamma = atof(argv[i+6])/RTD;
}
if(strstr(argv[i], "-misset") && (argc > (i+1)))
{
if(strstr(argv[i+1],"rand"))
{
misset[0] = -1;
continue;
}
}
if(strstr(argv[i], "-misset") && (argc > (i+3)))
{
misset[0] = 1;
misset[1] = atof(argv[i+1])/RTD;
misset[2] = atof(argv[i+2])/RTD;
misset[3] = atof(argv[i+3])/RTD;
}
if((strstr(argv[i], "-samplesize") || strstr(argv[i], "-sample_size")) && (argc > (i+1)))
{
sample_x = atof(argv[i+1])/1000;
sample_y = atof(argv[i+1])/1000;
sample_z = atof(argv[i+1])/1000;
}
if((strstr(argv[i], "-sample_thick") || strstr(argv[i], "-sample_x") ) && (argc > (i+1)))
{
sample_x = atof(argv[i+1])/1000;
}
if((strstr(argv[i], "-sample_width") || strstr(argv[i], "-sample_y") || strstr(argv[i], "-width")) && (argc > (i+1)))
{
sample_y = atof(argv[i+1])/1000;
}
if((strstr(argv[i], "-sample_heigh") || strstr(argv[i], "-sample_z") || strstr(argv[i], "-heigh")) && (argc > (i+1)))
{
sample_z = atof(argv[i+1])/1000;
}
if((strstr(argv[i], "-xtalsize") || strstr(argv[i], "-xtal_size")) && (argc > (i+1)))
{
sample_x = atof(argv[i+1])/1000;
sample_y = atof(argv[i+1])/1000;
sample_z = atof(argv[i+1])/1000;
}
if((strstr(argv[i], "-xtal_thick") || strstr(argv[i], "-xtal_x") ) && (argc > (i+1)))
{
sample_x = atof(argv[i+1])/1000;
}
if((strstr(argv[i], "-xtal_width") || strstr(argv[i], "-xtal_y") || strstr(argv[i], "-width")) && (argc > (i+1)))
{
sample_y = atof(argv[i+1])/1000;
}
if((strstr(argv[i], "-xtal_heigh") || strstr(argv[i], "-xtal_z") || strstr(argv[i], "-heigh")) && (argc > (i+1)))
{
sample_z = atof(argv[i+1])/1000;
}
if((strstr(argv[i], "-density") || strstr(argv[i], "-sample_den")) && (argc > (i+1)))
{
density = atof(argv[i+1])*1e6;
}
if((0==strcmp(argv[i], "-MW") || strstr(argv[i], "-molec")) && (argc > (i+1)))
{
molecular_weight = atof(argv[i+1]);
}
if(strstr(argv[i], "-Xbeam") && (argc > (i+1)))
{
Xbeam = atof(argv[i+1])/1000.0;
detector_pivot = BEAM;
}
if(strstr(argv[i], "-Ybeam") && (argc > (i+1)))
{
Ybeam = atof(argv[i+1])/1000.0;
detector_pivot = BEAM;
}
if(strstr(argv[i], "-Xclose") && (argc > (i+1)))
{
Xclose = atof(argv[i+1])/1000.0;
detector_pivot = SAMPLE;
}
if(strstr(argv[i], "-Yclose") && (argc > (i+1)))
{
Yclose = atof(argv[i+1])/1000.0;
detector_pivot = SAMPLE;
}
if(strstr(argv[i], "-ORGX") && (argc > (i+1)))
{
ORGX = atof(argv[i+1]);
detector_pivot = SAMPLE;
}
if(strstr(argv[i], "-ORGY") && (argc > (i+1)))
{
ORGY = atof(argv[i+1]);
detector_pivot = SAMPLE;
}
if(strstr(argv[i], "-pivot") && (argc > (i+1)))
{
if(strstr(argv[i+1], "sample")) detector_pivot = SAMPLE;
if(strstr(argv[i+1], "beam")) detector_pivot = BEAM;
}
if(strstr(argv[i], "-mosflm"))
{
beam_convention = MOSFLM;
detector_pivot = BEAM;
}
if(strstr(argv[i], "-xds"))
{
beam_convention = XDS;
detector_pivot = SAMPLE;
}
if(strstr(argv[i], "-adxv"))
{
beam_convention = ADXV;
detector_pivot = BEAM;
}
if(strstr(argv[i], "-denzo"))
{
beam_convention = DENZO;
detector_pivot = BEAM;
}
if(strstr(argv[i], "-dials"))
{
beam_convention = DIALS;
detector_pivot = BEAM;
}
if(strstr(argv[i], "-fdet_vector") && (argc > (i+3)))
{
beam_convention = CUSTOM;
fdet_vector[1] = atof(argv[i+1]);
fdet_vector[2] = atof(argv[i+2]);
fdet_vector[3] = atof(argv[i+3]);
}
if(strstr(argv[i], "-sdet_vector") && (argc > (i+3)))
{
beam_convention = CUSTOM;
sdet_vector[1] = atof(argv[i+1]);
sdet_vector[2] = atof(argv[i+2]);
sdet_vector[3] = atof(argv[i+3]);
}
if(strstr(argv[i], "-odet_vector") && (argc > (i+3)))
{
beam_convention = CUSTOM;
odet_vector[1] = atof(argv[i+1]);
odet_vector[2] = atof(argv[i+2]);
odet_vector[3] = atof(argv[i+3]);
}
if(strstr(argv[i], "-beam_vector") && (argc > (i+3)))
{
beam_convention = CUSTOM;
beam_vector[1] = atof(argv[i+1]);
beam_vector[2] = atof(argv[i+2]);
beam_vector[3] = atof(argv[i+3]);
}
if(strstr(argv[i], "-polar_vector") && (argc > (i+3)))
{
beam_convention = CUSTOM;
polar_vector[1] = atof(argv[i+1]);
polar_vector[2] = atof(argv[i+2]);
polar_vector[3] = atof(argv[i+3]);
}
if(strstr(argv[i], "-spindle_axis") && (argc > (i+3)))
{
beam_convention = CUSTOM;
spindle_vector[1] = atof(argv[i+1]);
spindle_vector[2] = atof(argv[i+2]);
spindle_vector[3] = atof(argv[i+3]);
}
if(strstr(argv[i], "-twotheta_axis") && (argc > (i+3)))
{
beam_convention = CUSTOM;
twotheta_axis[1] = atof(argv[i+1]);
twotheta_axis[2] = atof(argv[i+2]);
twotheta_axis[3] = atof(argv[i+3]);
}
if(strstr(argv[i], "-pix0_vector") && (argc > (i+3)))
{
beam_convention = CUSTOM;
pix0_vector[0] = 1.0;
pix0_vector[1] = atof(argv[i+1]);
pix0_vector[2] = atof(argv[i+2]);
pix0_vector[3] = atof(argv[i+3]);
}
if(strstr(argv[i], "-distance") && (argc > (i+1)))
{
distance = atof(argv[i+1])/1000.0;
detector_pivot = BEAM;
}
if(strstr(argv[i], "-close_distance") && (argc > (i+1)))
{
close_distance = atof(argv[i+1])/1000.0;
detector_pivot = SAMPLE;
}
// if(strstr(argv[i], "-source_dist") && (argc > (i+1)))
// {
// source_distance = atof(argv[i+1])/1000.0;
// }
if(strstr(argv[i], "-detector_abs") && (argc >= (i+1)))
{
if(strstr(argv[i+1], "inf") || atof(argv[i+1]) == 0.0) {
detector_thick = 0.0;
detector_mu = 0.0;
}else{
detector_mu = 1.0/(atof(argv[i+1])*1e-6);
}
}
if(strstr(argv[i], "-detector_thick") && (strlen(argv[i]) == 15) && (argc >= (i+1)))
{
detector_thick = atof(argv[i+1])*1e-6;
}
if(strstr(argv[i], "-detector_thicksteps") && (argc >= (i+1)))
{
detector_thicksteps = atoi(argv[i+1]);
}
if(strstr(argv[i], "-thicksteps") && (argc >= (i+1)))
{
detector_thicksteps = atoi(argv[i+1]);
}
if(strstr(argv[i], "-twotheta") && (argc > (i+1)))
{
detector_twotheta = atof(argv[i+1])/RTD;
detector_pivot = SAMPLE;
}
if(strstr(argv[i], "-detector_rotx") && (argc > (i+1)))
{
detector_rotx = atof(argv[i+1])/RTD;
}
if(strstr(argv[i], "-detector_roty") && (argc > (i+1)))
{
detector_roty = atof(argv[i+1])/RTD;
}
if(strstr(argv[i], "-detector_rotz") && (argc > (i+1)))
{
detector_rotz = atof(argv[i+1])/RTD;
}
if(strstr(argv[i], "-detsize") && (strlen(argv[i]) == 8) && (argc > (i+1)))
{
detsize_f = atof(argv[i+1])/1000.0;
detsize_s = atof(argv[i+1])/1000.0;
}
if(strstr(argv[i], "-detsize_f") && (argc > (i+1)))
{
detsize_f = atof(argv[i+1])/1000.0;
}
if(strstr(argv[i], "-detsize_s") && (argc > (i+1)))
{
detsize_s = atof(argv[i+1])/1000.0;
}
if(strstr(argv[i], "-detpixels") && (strlen(argv[i]) == 10) && (argc > (i+1)))
{
fpixels = spixels = atoi(argv[i+1]);
}
if(strstr(argv[i], "-detpixels_f") && (argc > (i+1)))
{
fpixels = atoi(argv[i+1]);
}
if(strstr(argv[i], "-detpixels_s") && (argc > (i+1)))
{
spixels = atoi(argv[i+1]);
}
if(strstr(argv[i], "-curved_det") && (argc > (i+1)))
{
curved_detector = 1;
}
if(strstr(argv[i], "-pixel") && (argc > (i+1)))
{
pixel_size = atof(argv[i+1])/1000.0;
}
if(strstr(argv[i], "-point_pixel") )
{
point_pixel = 1;
}
if(strstr(argv[i], "-polar") && (strlen(argv[i]) == 6) && (argc > (i+1)))
{
polarization = atof(argv[i+1]);
nopolar = 0;
}
if(strstr(argv[i], "-nopolar") )
{
nopolar = 1;
}
if(strstr(argv[i], "-oversample_thick") )
{
oversample_thick = 1;
continue;
}
if(strstr(argv[i], "-oversample_polar") )
{
oversample_polar = 1;
continue;
}
if(strstr(argv[i], "-oversample_omega") )
{
oversample_omega = 1;
continue;
}
if(strstr(argv[i], "-oversample") && (argc > (i+1)))
{
oversample = atoi(argv[i+1]);
}
if(strstr(argv[i], "-roi") && (argc > (i+4)))
{
roi_xmin = atoi(argv[i+1]);
roi_xmax = atoi(argv[i+2]);
roi_ymin = atoi(argv[i+3]);
roi_ymax = atoi(argv[i+4]);
}
if((strstr(argv[i], "-lambda") || strstr(argv[i], "-wave")) && (argc > (i+1)))
{
lambda0 = atof(argv[i+1])/1.0e10;
}
if(strstr(argv[i], "-energy") && (argc > (i+1)))
{
lambda0 = (12398.42/atof(argv[i+1]))/1.0e10;
}
if(strstr(argv[i], "-fluence") && (argc > (i+1)))
{
fluence = atof(argv[i+1]);
}
if(strstr(argv[i], "-flux") && (argc > (i+1)))
{
flux = atof(argv[i+1]);
}
if(strstr(argv[i], "-exposure") && (argc > (i+1)))
{
exposure = atof(argv[i+1]);
}
if(strstr(argv[i], "-beamsize") && (argc > (i+1)))
{
beamsize = atof(argv[i+1])/1000;
}
if((strstr(argv[i], "-mosaic") && (strlen(argv[i]) == 7) || strstr(argv[i], "-mosaici") || strstr(argv[i], "-mosaic_spr")) && (argc > (i+1)))
{
mosaic_spread = atof(argv[i+1])/RTD;
}
if(strstr(argv[i], "-mosaic_dom") && (argc > (i+1)))
{
mosaic_domains = atoi(argv[i+1]);
}
if(strstr(argv[i], "-dispersion") && (argc > (i+1)))
{
dispersion = atof(argv[i+1])/100.0;
}
if(strstr(argv[i], "-dispsteps") && (argc > (i+1)))
{
dispsteps = atoi(argv[i+1]);
}
if(strstr(argv[i], "-divergence") && (argc > (i+1)))
{
hdivrange = vdivrange = atof(argv[i+1])/1000.0;
}
if(strstr(argv[i], "-hdivrange") && (argc > (i+1)))
{
hdivrange = atof(argv[i+1])/1000.0;
}
if(strstr(argv[i], "-vdivrange") && (argc > (i+1)))
{
vdivrange = atof(argv[i+1])/1000.0;
}
if(strstr(argv[i], "-hdivstep") && (strlen(argv[i]) == 9) && (argc > (i+1)))
{
hdivstep = atof(argv[i+1])/1000.0;
}
if(strstr(argv[i], "-hdivsteps") && (argc > (i+1)))
{
hdivsteps = atoi(argv[i+1]);
}
if(strstr(argv[i], "-vdivstep") && (strlen(argv[i]) == 9) && (argc > (i+1)))
{
vdivstep = atof(argv[i+1])/1000.0;
}
if(strstr(argv[i], "-vdivsteps") && (argc > (i+1)))
{
vdivsteps = atoi(argv[i+1]);
}
if(strstr(argv[i], "-divsteps") && (argc > (i+1)))
{
hdivsteps = vdivsteps = atoi(argv[i+1]);
}
if(strstr(argv[i], "-round_div") )
{
/* cut to circle */
round_div = 1;
}
if(strstr(argv[i], "-square_div") )
{
/* just raster */
round_div = 0;
}
if(strstr(argv[i], "-adc") && (argc > (i+1)))
{
adc_offset = atof(argv[i+1]);
}
if(strstr(argv[i], "-phi") && strlen(argv[i])==4 && (argc > (i+1)))
{
phi0 = atof(argv[i+1])/RTD;
}
if(strstr(argv[i], "-osc") && (argc > (i+1)))
{
osc = atof(argv[i+1])/RTD;
}
if(strstr(argv[i], "-phistep") && strlen(argv[i])==8 && (argc > (i+1)))
{
phistep = atof(argv[i+1])/RTD;
}
if(strstr(argv[i], "-phisteps") && (argc > (i+1)))
{
phisteps = atoi(argv[i+1]);
}
if(strstr(argv[i], "-dmin") && (argc > (i+1)))
{
dmin = atof(argv[i+1])*1e-10;
}
if(strstr(argv[i], "-mat") && (argc > (i+1)))
{
matfilename = argv[i+1];
}
if(strstr(argv[i], "-hkl") && (argc > (i+1)))
{
hklfilename = argv[i+1];
}
if(strstr(argv[i], "-default_F") && (argc > (i+1)))
{
default_F = atof(argv[i+1]);
}
if(strstr(argv[i], "-img") && (argc > (i+1)))
{
imginfilename = argv[i+1];
}
if(strstr(argv[i], "-stolout") && strlen(argv[i])>7 && (argc > (i+1)))
{
stoloutfilename = argv[i+1];
}
if(strstr(argv[i], "-stol") && strlen(argv[i])==5 && (argc > (i+1)))
{
stolfilename = argv[i+1];
stol_file_mult = 1e10;
}
if(strstr(argv[i], "-4stol") && strlen(argv[i])==6 && (argc > (i+1)))
{
stolfilename = argv[i+1];
stol_file_mult = 1e10/4;
}
if(strstr(argv[i], "-Q") && strlen(argv[i])==2 && (argc > (i+1)))
{
stolfilename = argv[i+1];
stol_file_mult = 1e10/M_PI/4;
}
if(strstr(argv[i], "-sourcefile") && (argc > (i+1)))
{
sourcefilename = argv[i+1];
}
if((strstr(argv[i], "-floatfile") || strstr(argv[i], "-floatimage")) && (argc > (i+1)))
{
floatfilename = argv[i+1];
}
if((strstr(argv[i], "-intfile") || strstr(argv[i], "-intimage")) && (argc > (i+1)))
{
intfilename = argv[i+1];
}
if((strstr(argv[i], "-pgmfile") || strstr(argv[i], "-pgmimage")) && (argc > (i+1)))
{
pgmfilename = argv[i+1];
write_pgm = 1;
}
if((strstr(argv[i], "-noisefile") || strstr(argv[i], "-noiseimage")) && (argc > (i+1)))
{
noisefilename = argv[i+1];
calculate_noise = 1;
}
if(strstr(argv[i], "-nonoise") )
{
/* turn off noise */
calculate_noise = 0;
}
if(strstr(argv[i], "-nopgm") )
{
write_pgm = 0;
}
if(strstr(argv[i], "-scale") && (argc > (i+1)))
{
/* specify the scale for the intfile */
intfile_scale = atof(argv[i+1]);
}
if(strstr(argv[i], "-pgmscale") && (argc > (i+1)))
{
/* specify the scale for the intfile */
pgm_scale = atof(argv[i+1]);
write_pgm = 1;
}
if(strstr(argv[i], "-coherent") )
{
/* turn off incoherent addition */
coherent = 1;
}
if(strstr(argv[i], "-printout") )
{
/* turn on console printing */
printout = 1;
}
if(strstr(argv[i], "-noprogress") )
{
/* turn off progress meter */
progress_meter = 0;
}
if(strstr(argv[i], "-progress") )
{
/* turn on progress meter */
progress_meter = 1;
}
if(strstr(argv[i], "-interpolate") )
{
/* turn on tricubic interpolation */
interpolate = 1;
}
if(strstr(argv[i], "-nointerpolate") )
{
/* turn off tricubic interpolation */
interpolate = 0;
}
if(strstr(argv[i], "-round_xtal") )
{
/* use sinc3 */
xtal_shape = ROUND;
}
if(strstr(argv[i], "-square_xtal") )
{
/* use sincg */
xtal_shape = SQUARE;
}
if(strstr(argv[i], "-gauss_xtal") )
{
/* use Gaussian */
xtal_shape = GAUSS;
}
if(strstr(argv[i], "-binary_spots") || strstr(argv[i], "-tophat_spots"))
{
/* top hat */
xtal_shape = TOPHAT;
}
if(strstr(argv[i], "-fudge") && (argc > (i+1)))
{
fudge = atof(argv[i+1]);
}
if(strstr(argv[i], "-printout_pixel") && (argc > (i+2)))
{
printout_fpixel = atoi(argv[i+1]);
printout_spixel = atoi(argv[i+2]);
}
if(strstr(argv[i], "-seed") && (argc > (i+1)))
{
seed = -atoi(argv[i+1]);
}
if(strstr(argv[i], "-mosaic_seed") && (argc > (i+1)))
{
mosaic_seed = -atoi(argv[i+1]);
}
if(strstr(argv[i], "-water") && (argc > (i+1)))
{
water_size = atof(argv[i+1])/1e6;
}
}
}
/* fill in blanks */
if(fpixels) {
detsize_f = pixel_size*fpixels;
}
if(spixels) {
detsize_s = pixel_size*spixels;
}
fpixels = ceil(detsize_f/pixel_size-0.5);
spixels = ceil(detsize_s/pixel_size-0.5);
pixels = fpixels*spixels;
/* get fluence from flux */
if(flux != 0.0 && exposure > 0.0 && beamsize >= 0){
fluence = flux*exposure/beamsize/beamsize;
}
if(beamsize >= 0){
if(beamsize < sample_y){
printf("WARNING: clipping sample (%lg m high) with beam (%lg m)\n",sample_y,beamsize);
sample_y = beamsize;
}
if(beamsize < sample_z){
printf("WARNING: clipping sample (%lg m wide) with beam (%lg m)\n",sample_z,beamsize);
sample_z = beamsize;
}
}
if(exposure > 0.0)
{
/* make sure flux is consistent with everything else */
flux = fluence/exposure*beamsize*beamsize;
}
/* straighten up sample properties */
// volume = sample_x*sample_y*sample_z;
// molecules = volume*density*Avogadro/molecular_weight;
/* defaults? */
if(! isnan(ORGX)) Fclose = (ORGX-0.5)*pixel_size;
if(! isnan(ORGY)) Sclose = (ORGY-0.5)*pixel_size;
/* place beam center halfway between four middle pixels */
/* place beam center at int(npix/2) location */
if(isnan(Fclose)) Fclose = (detsize_f - 0*pixel_size)/2.0;
if(isnan(Sclose)) Sclose = (detsize_s + 0*pixel_size)/2.0;
if(isnan(Xclose)) Xclose = Fclose;
if(isnan(Yclose)) Yclose = Sclose;
if(isnan(Fbeam)) Fbeam = Fclose;
if(isnan(Sbeam)) Sbeam = Sclose;
if(roi_xmin < 0) roi_xmin = 0;
if(roi_xmax < 0) roi_xmax = fpixels;
if(roi_ymin < 0) roi_ymin = 0;
if(roi_ymax < 0) roi_ymax = spixels;
progress_pixels = (roi_xmax-roi_xmin+1)*(roi_ymax-roi_ymin+1);
if(beam_convention == ADXV)
{
/* first pixel is at 0,0 pix and pixel_size,pixel_size*npixels mm */
if(isnan(Xbeam)) Xbeam = (detsize_f + pixel_size)/2.0;
if(isnan(Ybeam)) Ybeam = (detsize_s - pixel_size)/2.0;
beam_vector[1]= 0; beam_vector[2]= 0; beam_vector[3]= 1;
fdet_vector[1]= 1; fdet_vector[2]= 0; fdet_vector[3]= 0;
sdet_vector[1]= 0; sdet_vector[2]= -1; sdet_vector[3]= 0;
odet_vector[1]= 0; odet_vector[2]= 0; odet_vector[3]= 1;
twotheta_axis[1]= -1; twotheta_axis[2]= 0; twotheta_axis[3]= 0;
polar_vector[1]= 1; polar_vector[2]= 0; polar_vector[3]= 0;
spindle_vector[1]= 1; spindle_vector[2]= 0; spindle_vector[3]= 0;
Fbeam = Xbeam;
Sbeam = detsize_s - Ybeam;
detector_pivot = BEAM;
}
if(beam_convention == MOSFLM)
{
/* first pixel is at 0.5,0.5 pix and pixel_size/2,pixel_size/2 mm */
if(isnan(Xbeam)) Xbeam = (detsize_s + pixel_size)/2.0;
if(isnan(Ybeam)) Ybeam = (detsize_f + pixel_size)/2.0;
beam_vector[1]= 1; beam_vector[2]= 0; beam_vector[3]= 0;
odet_vector[1]= 1; odet_vector[2]= 0; odet_vector[3]= 0;
fdet_vector[1]= 0; fdet_vector[2]= 0; fdet_vector[3]= 1;
sdet_vector[1]= 0; sdet_vector[2]= -1; sdet_vector[3]= 0;
twotheta_axis[1]= 0; twotheta_axis[2]= 0; twotheta_axis[3]= -1;
polar_vector[1]= 0; polar_vector[2]= 0; polar_vector[3]= 1;
spindle_vector[1]= 0; spindle_vector[2]= 0; spindle_vector[3]= 1;
Fbeam = Ybeam + 0.5*pixel_size;
Sbeam = Xbeam + 0.5*pixel_size;
detector_pivot = BEAM;
}
if(beam_convention == DENZO)
{
if(isnan(Xbeam)) Xbeam = (detsize_s + pixel_size)/2.0;
if(isnan(Ybeam)) Ybeam = (detsize_f + pixel_size)/2.0;
beam_vector[1]= 1; beam_vector[2]= 0; beam_vector[3]= 0;
odet_vector[1]= 1; odet_vector[2]= 0; odet_vector[3]= 0;
fdet_vector[1]= 0; fdet_vector[2]= 0; fdet_vector[3]= 1;
sdet_vector[1]= 0; sdet_vector[2]= -1; sdet_vector[3]= 0;
twotheta_axis[1]= 0; twotheta_axis[2]= 0; twotheta_axis[3]= -1;
polar_vector[1]= 0; polar_vector[2]= 0; polar_vector[3]= 1;
spindle_vector[1]= 0; spindle_vector[2]= 0; spindle_vector[3]= 1;
Fbeam = Ybeam + 0.0*pixel_size;
Sbeam = Xbeam + 0.0*pixel_size;
detector_pivot = BEAM;
}
if(beam_convention == XDS)
{
if(isnan(Xbeam)) Xbeam = Xclose;
if(isnan(Ybeam)) Ybeam = Yclose;
beam_vector[1]= 0; beam_vector[2]= 0; beam_vector[3]= 1;
fdet_vector[1]= 1; fdet_vector[2]= 0; fdet_vector[3]= 0;
sdet_vector[1]= 0; sdet_vector[2]= 1; sdet_vector[3]= 0;
odet_vector[1]= 0; odet_vector[2]= 0; odet_vector[3]= 1;
twotheta_axis[1]= 1; twotheta_axis[2]= 0; twotheta_axis[3]= 0;
polar_vector[1]= 1; polar_vector[2]= 0; polar_vector[3]= 0;
spindle_vector[1]= 1; spindle_vector[2]= 0; spindle_vector[3]= 0;
Fbeam = Xbeam;
Sbeam = Ybeam;
detector_pivot = SAMPLE;
}
if(beam_convention == DIALS)
{
if(isnan(Xbeam)) Xbeam = Xclose;
if(isnan(Ybeam)) Ybeam = Yclose;
beam_vector[1]= 0; beam_vector[2]= 0; beam_vector[3]= 1;
fdet_vector[1]= 1; fdet_vector[2]= 0; fdet_vector[3]= 0;
sdet_vector[1]= 0; sdet_vector[2]= 1; sdet_vector[3]= 0;
odet_vector[1]= 0; odet_vector[2]= 0; odet_vector[3]= 1;
twotheta_axis[1]= 0; twotheta_axis[2]= 1; twotheta_axis[3]= 0;
polar_vector[1]= 0; polar_vector[2]= 1; polar_vector[3]= 0;
spindle_vector[1]= 0; spindle_vector[2]= 1; spindle_vector[3]= 0;
Fbeam = Xbeam;
Sbeam = Ybeam;
detector_pivot = SAMPLE;
}
if(beam_convention == CUSTOM)
{
if(isnan(Xbeam)) Xbeam = Xclose;
if(isnan(Ybeam)) Ybeam = Yclose;
Fbeam = Xbeam;
Sbeam = Ybeam;
Fclose = Xbeam;
Sclose = Ybeam;
}
/* straighten up vectors */
unitize(beam_vector,beam_vector);
unitize(fdet_vector,fdet_vector);
unitize(sdet_vector,sdet_vector);
if(unitize(odet_vector,odet_vector) != 1.0)
{
printf("WARNING: auto-generating odet_vector\n");
cross_product(fdet_vector,sdet_vector,odet_vector);
unitize(odet_vector,odet_vector);
}
unitize(polar_vector,polar_vector);
unitize(spindle_vector,spindle_vector);
cross_product(beam_vector,polar_vector,vert_vector);
unitize(vert_vector,vert_vector);
printf("nanoBragg nanocrystal diffraction simulator - James Holton and Ken Frankel 5-17-17\n");
if(hklfilename == NULL)
{
/* see if there are Fs from a previous run */
Fdumpfile = fopen(dumpfilename,"r");
if(Fdumpfile == NULL && default_F == 0.0)
{
printf("ERROR: no hkl file and no dump file to read.");
}
}
if(hklfilename == NULL && Fdumpfile == NULL && default_F == 0.0 || matfilename == NULL && a[0] == 0.0){
printf("usage: nanoBragg -mat auto.mat -hkl Fs.hkl\n");
printf("options:\n");\
printf("\t-mat filename.mat\tmosflm-style matrix file containing three reciprocal unit cell vectors\n");
printf("\t-hkl filename.hkl\ttext file containing h, k, l and F for P1 unit cell\n");
printf("\t-distance \tdistance from origin to detector center in mm\n");
printf("\t-detsize \tdetector size in mm. may also use -detsize_f -detsize_s\n");
printf("\t-detpixels \tdetector size in pixels. may also use -detpixels_x -detpixels_y\n");
printf("\t-pixel \tdetector pixel size in mm.\n");
printf("\t-detector_absorb \tdetector sensor material attenuation depth (um) (default: \"inf\" to save time)\n");
printf("\t-detector_thick \tdetector sensor thickness (um)\n");
printf("\t-detector_thicksteps\tnumber of layers of detector sensor material. Default: 1\n");
printf("\t-Xbeam \timage fast coordinate of direct-beam spot (mm). (default: center)\n");
printf("\t-Ybeam \timage slow coordinate of direct-beam spot (mm). (default: center)\n");
printf("\t-mosflm \tuse MOSFLM's direct-beam convention. (default: adxv)\n");
printf("\t-xds \tuse XDS detector origin convention. (default: adxv)\n");
printf("\t-twotheta \trotation of detector about spindle axis (deg). (default: 0)\n");
printf("\t-N \tnumber of unit cells in all directions. may also use -Na -Nb or -Nc\n");
printf("\t-square_xtal \tspecify parallelpiped crystal shape (default)\n");
printf("\t-round_xtal \tspecify ellipsoidal crystal shape (sort of)\n");
printf("\t-tophat_spots \tclip lattice transform at fwhm: no inter-Bragg maxima\n");
printf("\t-oversample \tnumber of sub-pixels per pixel. use this if xtalsize/lambda > distance/pixel\n");
printf("\t-oversample_thick \tre-calculate thickness effect for sub-pixels (not the default)\n");
printf("\t-oversample_polar \tre-calculate polarization effect for sub-pixels (not the default)\n");
printf("\t-oversample_omega \tre-calculate solid-angle effect for sub-pixels (not the default)\n");
printf("\t-lambda \tincident x-ray wavelength in Angstrom. may also use -energy in eV\n");
printf("\t-mosaic \tisotropic mosaic spread in degrees (use 90 for powder)\n");
printf("\t-mosaic_domains \tnumber of randomly-oriented mosaic domains to render\n");
printf("\t-dispersion \tspectral dispersion: delta-lambda/lambda in percent\n");
printf("\t-dispsteps \tnumber of wavelengths in above range\n");
printf("\t-hdivrange \thorizontal angular spread of source points in mrad\n");
printf("\t-vdivrange \tvertical angular spread of source points in mrad\n");
printf("\t-hdivstep \tnumber of source points in the horizontal\n");
printf("\t-vdivstep \tnumber of source points in the vertical\n");
printf("\t-square_div \tfull divergence grid (default: round off corners)\n");
printf("\t-phi \tstarting rotation value about spindle axis in degrees\n");
printf("\t-osc \trotation range about spindle axis in degrees\n");
printf("\t-phisteps \tnumber of rotation steps to render\n");
printf("\t-water \tadd contribution of x microns of water surrounding crystal\n");
printf("\t-floatfile \tname of binary output file (4-byte floats)\n");
printf("\t-intfile \tname of noiseless smv-formatted output file (not on absolute scale by default)\n");
printf("\t-scale \tscale factor to apply to intfile (default: autoscale)\n");
printf("\t-noisefile \tname of photon-scale smv-formatted output file (with Poisson noise)\n");
printf("\t-roi \tonly render part of the image: xmin xmax ymin ymax\n");
printf("\t-printout \tprint pixel values out to the screen\n");
printf("\t-seed \tspecify random-number seed for noisefile\n");
printf("\t-fluence \tincident beam intensity for photon-counting statistics (photons/m^2)\n");
printf("\t-nonoise \tdisable generating the noisefile\n");
printf("\t-noprogress \tturn off the progress meter\n");
printf("\t-nopolar \tturn off the polarization correction\n");
printf("\t-nointerpolate \tdisable inter-Bragg peak structure factor interpolation\n");
printf("\t-interpolate \tforce inter-Bragg peak structure factor interpolation (default: on if < 3 cells wide)\n");
printf("\t-point_pixel \tturn off the pixel solid angle correction\n");
printf("\t-curved_det \tall pixels same distance from crystal\n");
printf("\t-fdet_vector \tunit vector of increasing fast-axis detector pixel coordinate (default: %g %g %g)\n",fdet_vector[1],fdet_vector[2],fdet_vector[3]);
printf("\t-sdet_vector \tunit vector of increasing slow-axis detector pixel coordinate (default: %g %g %g)\n",sdet_vector[1],sdet_vector[2],sdet_vector[3]);
printf("\t-odet_vector \tunit vector of increasing detector distance (default: %g %g %g)\n",odet_vector[1],odet_vector[2],odet_vector[3]);
printf("\t-beam_vector \tunit vector of x-ray beam direction (default: %g %g %g)\n",beam_vector[1],beam_vector[2],beam_vector[3]);
printf("\t-polar_vector \tunit vector of x-ray E-vector polarization (default: %g %g %g)\n",polar_vector[1],polar_vector[2],polar_vector[3]);
printf("\t-spindle_axis \tunit vector of right-handed phi rotation axis (default: %g %g %g)\n",spindle_vector[1],spindle_vector[2],spindle_vector[3]);
printf("\t-pix0_vector \tvector from crystal to first pixel in image (default: beam centered on detector)\n");
// printf("\t-source_distance \tdistance of x-ray source from crystal (default: 10 meters)\n");
exit(9);
}
/* allocate detector memory */
floatimage = (float*) calloc(pixels+10,sizeof(float));
//sinimage = (float*) calloc(pixels+10,2*sizeof(float));
//cosimage = (float*) calloc(pixels+10,2*sizeof(float));
intimage = (unsigned short int*) calloc(pixels+10,sizeof(unsigned short int));
if(write_pgm) pgmimage = (unsigned char*) calloc(pixels+10,sizeof(unsigned char));
/* default sampling logic */
if(phisteps < 0){
/* auto-select number of phi steps */
if(osc < 0.0) {
/* auto-select osc range */
if(phistep <= 0.0) {
/* user doesn't care about anything */
phisteps = 1;
osc = 0.0;
phistep = 0.0;
} else {
/* user doesn't care about osc or steps, but specified step */
osc = phistep;
phisteps = 2;
}
} else {
/* user-speficied oscillation */
if(phistep <= 0.0) {
/* osc specified, but nothing else */
phisteps = 2;
phistep = osc/2.0;
} else {
/* osc and phi step specified */
phisteps = ceil(osc/phistep);
}
}
} else {
/* user-specified number of phi steps */
if(phisteps == 0) phisteps = 1;
if(osc < 0.0) {
/* auto-select osc range */
if(phistep <= 0.0) {
/* user cares only about number of steps */
osc = 1.0/RTD;
phistep = osc/phisteps;
} else {
/* user doesn't care about osc, but specified step */
osc = phistep;
phisteps = 2;
}
} else {
/* user-speficied oscillation */
if(phistep < 0.0) {
/* osc and steps specified */
phistep = osc/phisteps;
} else {
/* everything specified */
}
}
}
if(hdivsteps <= 0){
/* auto-select number of steps */
if(hdivrange < 0.0) {
/* auto-select range */
if(hdivstep <= 0.0) {
/* user doesn't care about anything */
hdivsteps = 1;
hdivrange = 0.0;
hdivstep = 0.0;
} else {
/* user specified stepsize and nothing else */
hdivrange = hdivstep;
hdivsteps = 2;
}
} else {
/* user-speficied range */
if(hdivstep <= 0.0) {
/* range specified, but nothing else */
hdivstep = hdivrange;
hdivsteps = 2;
} else {
/* range and step specified, but not number of steps */
hdivsteps = ceil(hdivrange/hdivstep);
}
}
} else {
/* user-specified number of steps */
if(hdivrange < 0.0) {
/* auto-select range */
if(hdivstep <= 0.0) {
/* user cares only about number of steps */
hdivrange = 1.0;
hdivstep = hdivrange/hdivsteps;
} else {
/* user doesn't care about range */
hdivrange = hdivstep;
hdivsteps = 2;
}
} else {
/* user-speficied range */
if(hdivstep <= 0.0) {
/* range and steps specified */
if(hdivsteps <=1 ) hdivsteps = 2;
hdivstep = hdivrange/(hdivsteps-1);
} else {
/* everything specified */
}
}
}
if(vdivsteps <= 0){
/* auto-select number of steps */
if(vdivrange < 0.0) {
/* auto-select range */
if(vdivstep <= 0.0) {
/* user doesn't care about anything */
vdivsteps = 1;
vdivrange = 0.0;
vdivstep = 0.0;
} else {
/* user specified stepsize and nothing else */
vdivrange = vdivstep;
vdivsteps = 2;
}
} else {
/* user-speficied range */
if(vdivstep <= 0.0) {
/* range specified, but nothing else */
vdivstep = vdivrange;
vdivsteps = 2;
} else {
/* range and step specified, but not number of steps */
vdivsteps = ceil(vdivrange/vdivstep);
}
}
} else {
/* user-specified number of steps */
if(vdivrange < 0.0) {
/* auto-select range */
if(vdivstep <= 0.0) {
/* user cares only about number of steps */
vdivrange = 1.0;
vdivstep = vdivrange/vdivsteps;
} else {
/* user doesn't care about range */
vdivrange = vdivstep;
vdivsteps = 2;
}
} else {
/* user-speficied range */
if(vdivstep <= 0.0) {
/* range and steps specified */
if(vdivsteps <=1 ) vdivsteps = 2;
vdivstep = vdivrange/(vdivsteps-1);
} else {
/* everything specified */
}
}
}
if(dispsteps <= 0){
/* auto-select number of steps */
if(dispersion < 0.0) {
/* auto-select range */
if(dispstep <= 0.0) {
/* user doesn't care about anything */
dispsteps = 1;
dispersion = 0.0;
dispstep = 0.0;
} else {
/* user specified stepsize and nothing else */
dispersion = dispstep;
dispsteps = 2;
}
} else {
/* user-speficied range */
if(dispstep <= 0.0) {
/* range specified, but nothing else */
dispstep = dispersion;
dispsteps = 2;
} else {
/* range and step specified, but not number of steps */
dispsteps = ceil(dispersion/dispstep);
}
}
} else {
/* user-specified number of steps */
if(dispersion < 0.0) {
/* auto-select range */
if(dispstep <= 0.0) {
/* user cares only about number of steps */
dispersion = 1.0;
dispstep = dispersion/dispsteps;
} else {
/* user doesn't care about range */
dispersion = dispstep;
dispsteps = 2;
}
} else {
/* user-speficied range */
if(dispstep <= 0.0) {
/* range and steps specified */
if(dispsteps <=1 ) dispsteps = 2;
dispstep = dispersion/(dispsteps-1);
} else {
/* everything specified */
}
}
}
if(detector_thicksteps <= 0){
/* auto-select number of steps */
if(detector_thick < 0.0) {
/* auto-select range */
if(detector_thickstep <= 0.0) {
/* user doesn't care about anything */
detector_thicksteps = 1;
detector_thick = 0.0;
detector_thickstep = 0.0;
} else {
/* user specified stepsize and nothing else */
detector_thick = detector_thickstep;
detector_thicksteps = 2;
}
} else {
/* user-speficied range */
if(detector_thickstep <= 0.0) {
/* range specified, but nothing else */
detector_thicksteps = 2;
detector_thickstep = detector_thick/detector_thicksteps;
} else {
/* range and step specified, but not number of steps */
detector_thicksteps = ceil(detector_thick/detector_thickstep);
}
}
} else {
/* user-specified number of steps */
if(detector_thick < 0.0) {
/* auto-select range */
if(detector_thickstep <= 0.0) {
/* user cares only about number of steps */
detector_thick = 0.5e-6;
detector_thickstep = detector_thick/detector_thicksteps;
} else {
/* user doesn't care about range */
detector_thick = detector_thickstep;
detector_thicksteps = 2;
}
} else {
/* user-speficied range */
if(detector_thickstep <= 0.0) {
/* range and steps specified */
if(detector_thicksteps <=1 ) detector_thicksteps = 2;
detector_thickstep = detector_thick/(detector_thicksteps-1);
} else {
/* everything specified */
}
}
}
if(mosaic_domains <= 0){
/* auto-select number of domains */
if(mosaic_spread < 0.0) {
/* user doesn't care about anything */
mosaic_domains = 1;
mosaic_spread = 0.0;
} else {
/* user-speficied mosaicity, but not number of domains */
if(mosaic_spread == 0.0)
{
mosaic_domains = 1;
}
else
{
printf("WARNING: finite mosaicity with only one domain! upping to 10 mosaic domains\n");
mosaic_domains = 10;
}
}
} else {
/* user-specified number of domains */
if(mosaic_spread < 0.0) {
/* number of domains specified, but no spread? */
printf("WARNING: no mosaic spread specified. setting mosaic_domains = 1\n");
mosaic_spread = 0.0;
mosaic_domains = 1;
} else {
/* user-speficied mosaicity and number of domains */
if(mosaic_spread == 0.0)
{
printf("WARNING: zero mosaic spread specified. setting mosaic_domains = 1\n");
mosaic_domains = 1;
}
}
}
/* sanity checks */
if(hdivrange <= 0.0 || hdivstep <= 0.0 || hdivsteps <= 0) {
hdivsteps = 1;
hdivrange = 0.0;
hdivstep = 0.0;
}
if(vdivrange <= 0.0 || vdivstep <= 0.0 || vdivsteps <= 0) {
vdivsteps = 1;
vdivrange = 0.0;
vdivstep = 0.0;
}
if(dispersion <= 0.0 || dispstep <= 0.0 || dispsteps <= 0) {
dispsteps = 1;
dispersion = 0.0;
dispstep = 0.0;
}
if(detector_thick <= 0.0 || detector_thickstep <= 0.0 || detector_thicksteps <= 0) {
detector_thicksteps = 1;
detector_thick = 0.0;
detector_thickstep = 0.0;
}
/* initialize detector origin from a beam center and distance */
/* there are two conventions here: mosflm and XDS */
if(beam_convention == ADXV) printf("adxv");
if(beam_convention == MOSFLM) printf("mosflm");
if(beam_convention == XDS) printf("xds");
if(beam_convention == DIALS) printf("dials");
if(beam_convention == DENZO) printf("denzo");
if(beam_convention == CUSTOM) printf("custom");
printf(" convention selected.\n");
/* first off, what is the relationship between the two "beam centers"? */
rotate(odet_vector,vector,detector_rotx,detector_roty,detector_rotz);
ratio = dot_product(beam_vector,vector);
if(ratio == 0.0) { ratio = DBL_MIN; }
if(isnan(close_distance)) close_distance = fabs(ratio*distance);
distance = close_distance/ratio;
if(detector_pivot == SAMPLE){
printf("pivoting detector around sample\n");
/* initialize detector origin before rotating detector */
pix0_vector[1] = -Fclose*fdet_vector[1]-Sclose*sdet_vector[1]+close_distance*odet_vector[1];
pix0_vector[2] = -Fclose*fdet_vector[2]-Sclose*sdet_vector[2]+close_distance*odet_vector[2];
pix0_vector[3] = -Fclose*fdet_vector[3]-Sclose*sdet_vector[3]+close_distance*odet_vector[3];
/* now swing the detector origin around */
rotate(pix0_vector,pix0_vector,detector_rotx,detector_roty,detector_rotz);
rotate_axis(pix0_vector,pix0_vector,twotheta_axis,detector_twotheta);
}
/* now orient the detector plane */
rotate(fdet_vector,fdet_vector,detector_rotx,detector_roty,detector_rotz);
rotate(sdet_vector,sdet_vector,detector_rotx,detector_roty,detector_rotz);
rotate(odet_vector,odet_vector,detector_rotx,detector_roty,detector_rotz);
/* also apply orientation part of twotheta swing */
rotate_axis(fdet_vector,fdet_vector,twotheta_axis,detector_twotheta);
rotate_axis(sdet_vector,sdet_vector,twotheta_axis,detector_twotheta);
rotate_axis(odet_vector,odet_vector,twotheta_axis,detector_twotheta);
/* make sure beam center is preserved */
if(detector_pivot == BEAM){
printf("pivoting detector around direct beam spot\n");
pix0_vector[1] = -Fbeam*fdet_vector[1]-Sbeam*sdet_vector[1]+distance*beam_vector[1];
pix0_vector[2] = -Fbeam*fdet_vector[2]-Sbeam*sdet_vector[2]+distance*beam_vector[2];
pix0_vector[3] = -Fbeam*fdet_vector[3]-Sbeam*sdet_vector[3]+distance*beam_vector[3];
}
/* what is the point of closest approach between sample and detector? */
Fclose = -dot_product(pix0_vector,fdet_vector);
Sclose = -dot_product(pix0_vector,sdet_vector);
close_distance = dot_product(pix0_vector,odet_vector);
/* where is the direct beam now? */
/* difference between beam impact vector and detector origin */
newvector[1] = close_distance/ratio*beam_vector[1]-pix0_vector[1];
newvector[2] = close_distance/ratio*beam_vector[2]-pix0_vector[2];
newvector[3] = close_distance/ratio*beam_vector[3]-pix0_vector[3];
/* extract components along detector vectors */
Fbeam = dot_product(fdet_vector,newvector);
Sbeam = dot_product(sdet_vector,newvector);
distance = close_distance/ratio;
/* find origin in XDS convention */
ORGX=Fclose/pixel_size+0.5;
ORGY=Sclose/pixel_size+0.5;
/* find origin in DIALS convention */
newvector[1]=+0;newvector[2]=+0;newvector[3]=+1;
dials_origin[1] = 1000.0*dot_product(pix0_vector,newvector);
newvector[1]=+0;newvector[2]=+1;newvector[3]=+0;
dials_origin[2] = 1000.0*dot_product(pix0_vector,newvector);
newvector[1]=-1;newvector[2]=+0;newvector[3]=+0;
dials_origin[3] = 1000.0*dot_product(pix0_vector,newvector);
/* find the beam in the detector frame */
newvector[1] = dot_product(beam_vector,fdet_vector);
newvector[2] = dot_product(beam_vector,sdet_vector);
newvector[3] = dot_product(beam_vector,odet_vector);
printf("XDS incident beam: %g %g %g\n",newvector[1],newvector[2],newvector[3]);
if(interpolate > 1){
/* no user options */
if(( Na <= 2) || (Nb <= 2) || (Nc <= 2)){
printf("auto-selected tricubic interpolation of structure factors\n");
interpolate = 1;
}
else
{
printf("auto-selected no interpolation\n");
interpolate = 0;
}
}
/* user-specified unit cell */
if(user_cell)
{
/* a few random defaults */
if(b[0] <= 0.0) b[0] = a[0];
if(c[0] <= 0.0) c[0] = a[0];
if(alpha <= 0.0) alpha = M_PI/2;
if(beta <= 0.0) beta = M_PI/2;
if(gamma <= 0.0) gamma = M_PI/2;
/* get cell volume from angles */
aavg = (alpha+beta+gamma)/2;
skew = sin(aavg)*sin(aavg-alpha)*sin(aavg-beta)*sin(aavg-gamma);
if(skew<0.0) skew=-skew;
V_cell = 2.0*a[0]*b[0]*c[0]*sqrt(skew);
if(V_cell <= 0.0)
{
printf("WARNING: impossible unit cell volume: %g\n",V_cell);
V_cell = DBL_MIN;
}
V_star = 1.0/V_cell;
/* now get reciprocal-cell lengths from the angles and volume */
a_star[0] = b[0]*c[0]*sin(alpha)*V_star;
b_star[0] = c[0]*a[0]*sin(beta)*V_star;
c_star[0] = a[0]*b[0]*sin(gamma)*V_star;
if(a_star[0] <= 0.0 || b_star[0] <= 0.0 || c_star[0] <= 0.0)
{
printf("WARNING: impossible reciprocal cell lengths: %g %g %g\n",
a_star[0],b_star[0],c_star[0]);
a_star[0] = fabs(a_star[0]);
b_star[0] = fabs(b_star[0]);
c_star[0] = fabs(c_star[0]);
if(a_star[0] <= 0.0) a_star[0] = DBL_MIN;
if(b_star[0] <= 0.0) b_star[0] = DBL_MIN;
if(c_star[0] <= 0.0) c_star[0] = DBL_MIN;
}
/* for fun, compute the reciprocal-cell angles from direct-cell angles */
sin_alpha_star = a[0]*V_star/b_star[0]/c_star[0];
sin_beta_star = b[0]*V_star/a_star[0]/c_star[0];
sin_gamma_star = c[0]*V_star/a_star[0]/b_star[0];
cos_alpha_star = (cos(beta)*cos(gamma)-cos(alpha))/(sin(beta)*sin(gamma));
cos_beta_star = (cos(gamma)*cos(alpha)-cos(beta))/(sin(gamma)*sin(alpha));
cos_gamma_star = (cos(alpha)*cos(beta)-cos(gamma))/(sin(alpha)*sin(beta));
if(sin_alpha_star>1.0000001 || sin_alpha_star<-1.0000001 ||
sin_beta_star >1.0000001 || sin_beta_star <-1.0000001 ||
sin_gamma_star>1.0000001 || sin_gamma_star<-1.0000001 ||
cos_alpha_star>1.0000001 || cos_alpha_star<-1.0000001 ||
cos_beta_star >1.0000001 || cos_beta_star <-1.0000001 ||
cos_gamma_star>1.0000001 || cos_gamma_star<-1.0000001 )
{
printf("WARNING: oddball reciprocal cell angles:\n");
printf("sin(alpha_star) = %.25g\n",sin_alpha_star);
printf("cos(alpha_star) = %.25g\n",cos_alpha_star);
printf("sin(beta_star) = %.25g\n",sin_beta_star);
printf("cos(beta_star) = %.25g\n",cos_beta_star);
printf("sin(gamma_star) = %.25g\n",sin_gamma_star);
printf("cos9gamma_star) = %.25g\n",cos_gamma_star);
}
if(sin_alpha_star>1.0) sin_alpha_star=1.0;
if(sin_beta_star >1.0) sin_beta_star =1.0;
if(sin_gamma_star>1.0) sin_gamma_star=1.0;
if(sin_alpha_star<-1.0) sin_alpha_star=-1.0;
if(sin_beta_star <-1.0) sin_beta_star =-1.0;
if(sin_gamma_star<-1.0) sin_gamma_star=-1.0;
if(cos_alpha_star*cos_alpha_star>1.0) cos_alpha_star=1.0;
if(cos_beta_star *cos_beta_star >1.0) cos_beta_star=1.0;
if(cos_gamma_star*cos_gamma_star>1.0) cos_gamma_star=1.0;
alpha_star = atan2(sin_alpha_star,cos_alpha_star);
beta_star = atan2(sin_beta_star ,cos_beta_star );
gamma_star = atan2(sin_gamma_star,cos_gamma_star);
/* construct default orientation */
a_star[1] = a_star[0];
b_star[1] = b_star[0]*cos_gamma_star;
c_star[1] = c_star[0]*cos_beta_star;
a_star[2] = 0.0;
b_star[2] = b_star[0]*sin_gamma_star;
c_star[2] = c_star[0]*(cos_alpha_star-cos_beta_star*cos_gamma_star)/sin_gamma_star;
a_star[3] = 0.0;
b_star[3] = 0.0;
c_star[3] = c_star[0]*V_cell/(a[0]*b[0]*c[0]*sin_gamma_star);
}
/* load the lattice orientation (reciprocal cell vectors) from a mosflm matrix */
if(matfilename != NULL)
{
infile = fopen(matfilename,"r");
if(infile != NULL)
{
printf("reading %s\n",matfilename);
if(! fscanf(infile,"%lg%lg%lg",a_star+1,b_star+1,c_star+1)) {perror("fscanf");};
if(! fscanf(infile,"%lg%lg%lg",a_star+2,b_star+2,c_star+2)) {perror("fscanf");};
if(! fscanf(infile,"%lg%lg%lg",a_star+3,b_star+3,c_star+3)) {perror("fscanf");};
fclose(infile);
/* mosflm A matrix includes the wavelength, so remove it */
/* calculate reciprocal cell lengths, store in 0th element */
vector_scale(a_star,a_star,1e-10/lambda0);
vector_scale(b_star,b_star,1e-10/lambda0);
vector_scale(c_star,c_star,1e-10/lambda0);
}
}
/* check for flag to generate random missetting angle */
if(misset[0] == -1.0)
{
/* use spherical cap as sphere to generate random orientation in umat */
mosaic_rotation_umat(90.0, umat, &seed);
/* get the missetting angles, in case we want to use them again on -misset option */
umat2misset(umat,misset);
printf("random orientation misset angles: %f %f %f deg\n",misset[1]*RTD,misset[2]*RTD,misset[3]*RTD);
/* apply this orientation shift */
//rotate_umat(a_star,a_star,umat);
//rotate_umat(b_star,b_star,umat);
//rotate_umat(c_star,c_star,umat);
/* do not apply again */
misset[0] = 1.0;
}
/* apply any missetting angle, if not already done */
if(misset[0] > 0.0)
{
rotate(a_star,a_star,misset[1],misset[2],misset[3]);
rotate(b_star,b_star,misset[1],misset[2],misset[3]);
rotate(c_star,c_star,misset[1],misset[2],misset[3]);
}
/* various cross products */
cross_product(a_star,b_star,a_star_cross_b_star);
cross_product(b_star,c_star,b_star_cross_c_star);
cross_product(c_star,a_star,c_star_cross_a_star);
/* reciprocal lattice vector "a_star" is defined as perpendicular to both b and c, and must also preserve volume
converse is true for direct-space lattice: a is perpendicular to both b_star and c_star
a = ( b_star cross c_star ) / V_star */
/* reciprocal unit cell volume, but is it lambda-corrected? */
V_star = dot_product(a_star,b_star_cross_c_star);
/* make sure any user-supplied cell takes */
if(user_cell)
{
/* a,b,c and V_cell were generated above */
/* force the cross-product vectors to have proper magnitude: b_star X c_star = a*V_star */
vector_rescale(b_star_cross_c_star,b_star_cross_c_star,a[0]/V_cell);
vector_rescale(c_star_cross_a_star,c_star_cross_a_star,b[0]/V_cell);
vector_rescale(a_star_cross_b_star,a_star_cross_b_star,c[0]/V_cell);
V_star = 1.0/V_cell;
}
/* direct-space cell volume */
V_cell = 1.0/V_star;
/* generate direct-space cell vectors, also updates magnitudes */
vector_scale(b_star_cross_c_star,a,V_cell);
vector_scale(c_star_cross_a_star,b,V_cell);
vector_scale(a_star_cross_b_star,c,V_cell);
/* now that we have direct-space vectors, re-generate the reciprocal ones */
cross_product(a,b,a_cross_b);
cross_product(b,c,b_cross_c);
cross_product(c,a,c_cross_a);
vector_scale(b_cross_c,a_star,V_star);
vector_scale(c_cross_a,b_star,V_star);
vector_scale(a_cross_b,c_star,V_star);
/* for fun, calculate the cell angles too */
sin_alpha = a_star[0]*V_cell/b[0]/c[0];
sin_beta = b_star[0]*V_cell/a[0]/c[0];
sin_gamma = c_star[0]*V_cell/a[0]/b[0];
cos_alpha = dot_product(b,c)/b[0]/c[0];
cos_beta = dot_product(a,c)/a[0]/c[0];
cos_gamma = dot_product(a,b)/a[0]/b[0];
if(sin_alpha>1.0000001 || sin_alpha<-1.0000001 ||
sin_beta >1.0000001 || sin_beta <-1.0000001 ||
sin_gamma>1.0000001 || sin_gamma<-1.0000001 ||
cos_alpha>1.0000001 || cos_alpha<-1.0000001 ||
cos_beta >1.0000001 || cos_beta <-1.0000001 ||
cos_gamma>1.0000001 || cos_gamma<-1.0000001 )
{
printf("WARNING: oddball cell angles:\n");
printf("sin_alpha = %.25g\n",sin_alpha);
printf("cos_alpha = %.25g\n",cos_alpha);
printf("sin_beta = %.25g\n",sin_beta);
printf("cos_beta = %.25g\n",cos_beta);
printf("sin_gamma = %.25g\n",sin_gamma);
printf("cos_gamma = %.25g\n",cos_gamma);
}
if(sin_alpha>1.0) sin_alpha=1.0;
if(sin_beta >1.0) sin_beta =1.0;
if(sin_gamma>1.0) sin_gamma=1.0;
if(sin_alpha<-1.0) sin_alpha=-1.0;
if(sin_beta <-1.0) sin_beta =-1.0;
if(sin_gamma<-1.0) sin_gamma=-1.0;
if(cos_alpha*cos_alpha>1.0) cos_alpha=1.0;
if(cos_beta *cos_beta >1.0) cos_beta=1.0;
if(cos_gamma*cos_gamma>1.0) cos_gamma=1.0;
alpha = atan2(sin_alpha,cos_alpha);
beta = atan2(sin_beta ,cos_beta );
gamma = atan2(sin_gamma,cos_gamma);
/* reciprocal cell angles */
sin_alpha_star = a[0]*V_star/b_star[0]/c_star[0];
sin_beta_star = b[0]*V_star/a_star[0]/c_star[0];
sin_gamma_star = c[0]*V_star/a_star[0]/b_star[0];
cos_alpha_star = dot_product(b_star,c_star)/b_star[0]/c_star[0];
cos_beta_star = dot_product(a_star,c_star)/a_star[0]/c_star[0];
cos_gamma_star = dot_product(a_star,b_star)/a_star[0]/b_star[0];
if(sin_alpha_star>1.0000001 || sin_alpha_star<-1.0000001 ||
sin_beta_star >1.0000001 || sin_beta_star <-1.0000001 ||
sin_gamma_star>1.0000001 || sin_gamma_star<-1.0000001 ||
cos_alpha_star>1.0000001 || cos_alpha_star<-1.0000001 ||
cos_beta_star >1.0000001 || cos_beta_star <-1.0000001 ||
cos_gamma_star>1.0000001 || cos_gamma_star<-1.0000001 )
{
printf("WARNING: oddball reciprocal cell angles:\n");
printf("sin(alpha_star) = %.25g\n",sin_alpha_star);
printf("cos(alpha_star) = %.25g\n",cos_alpha_star);
printf("sin(beta_star) = %.25g\n",sin_beta_star);
printf("cos(beta_star) = %.25g\n",cos_beta_star);
printf("sin(gamma_star) = %.25g\n",sin_gamma_star);
printf("cos(gamma_star) = %.25g\n",cos_gamma_star);
}
if(sin_alpha_star>1.0) sin_alpha_star=1.0;
if(sin_beta_star >1.0) sin_beta_star =1.0;
if(sin_gamma_star>1.0) sin_gamma_star=1.0;
if(sin_alpha_star<-1.0) sin_alpha_star=-1.0;
if(sin_beta_star <-1.0) sin_beta_star =-1.0;
if(sin_gamma_star<-1.0) sin_gamma_star=-1.0;
if(cos_alpha_star*cos_alpha_star>1.0) cos_alpha_star=1.0;
if(cos_beta_star *cos_beta_star >1.0) cos_beta_star=1.0;
if(cos_gamma_star*cos_gamma_star>1.0) cos_gamma_star=1.0;
alpha_star = atan2(sin_alpha_star,cos_alpha_star);
beta_star = atan2(sin_beta_star ,cos_beta_star );
gamma_star = atan2(sin_gamma_star,cos_gamma_star);
printf("Unit Cell: %g %g %g %g %g %g\n", a[0],b[0],c[0],alpha*RTD,beta*RTD,gamma*RTD);
printf("Recp Cell: %g %g %g %g %g %g\n", a_star[0],b_star[0],c_star[0],alpha_star*RTD,beta_star*RTD,gamma_star*RTD);
printf("volume = %g A^3\n",V_cell);
/* print out the real-space matrix */
printf("real-space cell vectors (Angstrom):\n");
printf(" %-10s %-10s %-10s\n","a","b","c");
printf("X: %11.8f %11.8f %11.8f\n",a[1],b[1],c[1]);
printf("Y: %11.8f %11.8f %11.8f\n",a[2],b[2],c[2]);
printf("Z: %11.8f %11.8f %11.8f\n",a[3],b[3],c[3]);
printf("reciprocal-space cell vectors (Angstrom^-1):\n");
printf(" %-10s %-10s %-10s\n","a_star","b_star","c_star");
printf("X: %11.8f %11.8f %11.8f\n",a_star[1],b_star[1],c_star[1]);
printf("Y: %11.8f %11.8f %11.8f\n",a_star[2],b_star[2],c_star[2]);
printf("Z: %11.8f %11.8f %11.8f\n",a_star[3],b_star[3],c_star[3]);
/* now convert these to meters */
vector_scale(a,a,1e-10);
vector_scale(b,b,1e-10);
vector_scale(c,c,1e-10);
/* define phi=0 mosaic=0 crystal orientation */
vector_scale(a,a0,1.0);
vector_scale(b,b0,1.0);
vector_scale(c,c0,1.0);
/* define phi=0 crystal orientation */
vector_scale(a,ap,1.0);
vector_scale(b,bp,1.0);
vector_scale(c,cp,1.0);
/* now we know the cell, calculate crystal size in meters */
if(sample_x > 0) Na = ceil(sample_x/a[0]);
if(sample_y > 0) Nb = ceil(sample_y/b[0]);
if(sample_z > 0) Nc = ceil(sample_z/c[0]);
if(Na <= 1.0) Na = 1.0;
if(Nb <= 1.0) Nb = 1.0;
if(Nc <= 1.0) Nc = 1.0;
xtalsize_a = a[0]*Na;
xtalsize_b = b[0]*Nb;
xtalsize_c = c[0]*Nc;
printf("crystal is %g x %g x %g microns\n",xtalsize_a*1e6,xtalsize_b*1e6,xtalsize_c*1e6);
xtalsize_max = xtalsize_a;
if(xtalsize_max < xtalsize_b) xtalsize_max = xtalsize_b;
if(xtalsize_max < xtalsize_c) xtalsize_max = xtalsize_c;
reciprocal_pixel_size = lambda0*distance/pixel_size;
recommended_oversample = ceil(3.0 * xtalsize_max/reciprocal_pixel_size);
if(recommended_oversample <= 0) recommended_oversample = 1;
if(oversample <= 0) {
oversample = recommended_oversample;
printf("auto-selected %d-fold oversampling\n",oversample);
}
if(oversample < recommended_oversample)
{
printf("WARNING: maximum dimension of sample is %g A\n",xtalsize_max*1e10);
printf(" but reciprocal pixel size is %g A\n", reciprocal_pixel_size*1e10 );
printf(" intensity may vary significantly across a pixel!\n");
printf(" recommend -oversample %d to work around this\n",recommended_oversample);
}
/* rough estimate of sample properties */
sample_x = xtalsize_a;
sample_y = xtalsize_b;
sample_z = xtalsize_c;
volume = sample_x*sample_y*sample_z;
density = 1.2e6;
molecules = Na*Nb*Nc;
molecular_weight = volume*density*Avogadro/molecules;
printf("approximate MW = %g\n",molecular_weight);
/* load the structure factors */
if(hklfilename == NULL)
{
/* try to recover Fs from a previous run */
if(Fdumpfile != NULL)
{
printf("reading Fs from %s\n",dumpfilename);
// n=0;
if(! fscanf(Fdumpfile,"%d%d%d%d%d%d\n\f",&h_min,&h_max,&k_min,&k_max,&l_min,&l_max) ) {perror("fscanf");};
h_range = h_max - h_min + 1;
k_range = k_max - k_min + 1;
l_range = l_max - l_min + 1;
Fhkl = (double***) calloc(h_range+1,sizeof(double**));
for (h0=0; h0<=h_range;h0++) {
*(Fhkl +h0) = (double**) calloc(k_range+1,sizeof(double*));
for (k0=0; k0<=k_range;k0++) {
*(*(Fhkl +h0)+k0) = (double*) calloc(l_range+1,sizeof(double));
if(! fread(*(*(Fhkl +h0)+k0),sizeof(double),l_range+1,Fdumpfile) )
{
perror("fscanf");
};
// n+=l_range;
}
}
fclose(Fdumpfile);
hkls = h_range*k_range*l_range;
}
else
{
/* no hkl file and no dumpfile */
if(default_F == 0.0)
{
printf("ERROR: no hkl file and no dump file to read.");
exit(9);
}
}
}
else
{
infile = fopen(hklfilename,"r");
if(infile == NULL)
{
printf("ERROR: unable to open %s.",hklfilename);
exit(9);
}
hkls = 0;
h_min=k_min=l_min=1e9;
h_max=k_max=l_max=-1e9;
printf("counting entries in %s\n",hklfilename);
while(4 == fscanf(infile,"%lg%lg%lg%lg",&h,&k,&l,&F_cell)){
if(h != ceil(h-0.4)) printf("WARNING: non-integer value for h (%g) at line %d\n",h,hkls);
if(k != ceil(k-0.4)) printf("WARNING: non-integer value for k (%g) at line %d\n",k,hkls);
if(l != ceil(l-0.4)) printf("WARNING: non-integer value for l (%g) at line %d\n",l,hkls);
if(h_min > h) h_min = h;
if(k_min > k) k_min = k;
if(l_min > l) l_min = l;
if(h_max < h) h_max = h;
if(k_max < k) k_max = k;
if(l_max < l) l_max = l;
++hkls;
}
rewind(infile);
h_range = h_max - h_min + 1;
k_range = k_max - k_min + 1;
l_range = l_max - l_min + 1;
if(h_range < 0 || k_range < 0 || l_range < 0) {
printf("h: %d - %d\n",h_min,h_max);
printf("k: %d - %d\n",k_min,k_max);
printf("l: %d - %d\n",l_min,l_max);
printf("ERROR: not enough HKL indices in %s\n",hklfilename);
exit(9);
}
/* allocate memory for 3d arrays */
//printf("allocating %d %d-byte double**\n",h_range+1,sizeof(double**));
Fhkl = (double***) calloc(h_range+1,sizeof(double**));
if(Fhkl==NULL){perror("ERROR");exit(9);};
for (h0=0; h0<=h_range;h0++) {
//printf("allocating %d %d-byte double*\n",k_range+1,sizeof(double*));
Fhkl[h0] = (double**) calloc(k_range+1,sizeof(double*));
if(Fhkl[h0]==NULL){perror("ERROR");exit(9);};
for (k0=0; k0<=k_range;k0++) {
//printf("allocating %d %d-byte double\n",k_range+1,sizeof(double));
Fhkl[h0][k0] = (double*) calloc(l_range+1,sizeof(double));
if(Fhkl[h0][k0]==NULL){perror("ERROR");exit(9);};
}
}
if(default_F != 0.0) {
printf("initializing to default_F = %g:\n",default_F);
for (h0=0; h0<h_range;h0++) {
for (k0=0; k0<k_range;k0++) {
for (l0=0; l0<l_range;l0++) {
Fhkl[h0][k0][l0] = default_F;
}
}
}
printf("done initializing:\n");
}
printf("re-reading %s\n",hklfilename);
while(4 == fscanf(infile,"%d%d%d%lg",&h0,&k0,&l0,&F_cell)){
Fhkl[h0-h_min][k0-k_min][l0-l_min]=F_cell;
}
fclose(infile);
// for(h0=h_min;h0<=h_max;++h0){
// for(k0=k_min;k0<=k_max;++k0){
// for(l0=l_min;l0<=l_max;++l0){
// if ( (h0<=h_max) && (h0>=h_min) && (k0<=k_max) && (k0>=k_min) && (l0<=l_max) && (l0>=l_min) ) {
// /* just take nearest-neighbor */
// F_cell = Fhkl[h0-h_min][k0-k_min][l0-l_min];
// }
// else
// {
// F_cell = 0.0;
// }
// printf("%d %d %d = %f\n",h0,k0,l0,F_cell);
// }
// }
// }
/* make dump file */
outfile = fopen(dumpfilename,"wb");
if(outfile == NULL)
{
printf("WARNING: unable to open dump file: %s\n",dumpfilename);
}
else
{
printf("writing dump file for next time: %s\n",dumpfilename);
fprintf(outfile,"%d %d %d %d %d %d\n\f",h_min,h_max,k_min,k_max,l_min,l_max);
for (h0=0; h0<=h_range;h0++) {
for (k0=0; k0<=k_range;k0++) {
fwrite(*(*(Fhkl +h0)+k0),sizeof(double),l_range+1,outfile);
}
}
fclose(outfile);
}
}
/* no point in interpolating if nothing to interpolate */
if(hkls == 0) interpolate = 0;
if(interpolate){
/* allocate interpolation array */
sub_Fhkl = (double***) calloc(6,sizeof(double**));
for (h0=0; h0<=5;h0++) {
*(sub_Fhkl +h0) = (double**) calloc(6,sizeof(double*));
for (k0=0; k0<=5;k0++) {
*(*(sub_Fhkl +h0)+k0) = (double*) calloc(6,sizeof(double));
}
}
}
/* now read in amorphous material structure factors */
stols = 0;
if(stolfilename != NULL)
{
printf("reading %s\n",stolfilename);
stols = read_text_file(stolfilename,2,&stol_of,&F_of);
if(stols == 0){
perror("no data in input file");
exit(9);
}
}
if(stols == 0 && water_size != 0.0)
{
/* do something clever here */
}
if(stols > 0)
{
/* add two values at either end for interpolation */
stols += 4;
F_highangle = NAN;
for(i=stols-3;i>1;--i){
stol_of[i] = stol_of[i-2] * stol_file_mult;
F_of[i] = F_of[i-2];
if(! isnan(F_of[i])) {
F_lowangle = F_of[i];
if(isnan(F_highangle)) {
F_highangle = F_of[i];
}
}
else
{
/* missing values are zero */
F_of[i] = 0.0;
}
}
stol_of[0] = -1e99;
stol_of[1] = -1e98;
F_of[0] = F_of[1] = F_lowangle;
stol_of[stols-2] = 1e98;
stol_of[stols-1] = 1e99;
F_of[stols-1] = F_of[stols-2] = F_highangle;
}
/* print out detector sensor thickness with sweep over all sensor layers */
for(thick_tic=0;thick_tic<detector_thicksteps;++thick_tic){
printf("thick%d = %g um\n",thick_tic,detector_thickstep*thick_tic*1e6);
}
/* show phi steps with sweep over spindle axis */
for(phi_tic = 0; phi_tic < phisteps; ++phi_tic){
phi = phi0 + phistep*phi_tic;
printf("phi%d = %g\n",phi_tic,phi*RTD);
}
/* import sources from user file */
sources = 0;
if(sourcefilename != NULL) {
sources = read_text_file(sourcefilename,5,&source_X,&source_Y,&source_Z,&source_I,&source_lambda);
if(sources == 0) {
perror("reading source definition file");
exit(9);
}
/* apply defaults to missing values */
for(source=0;source<sources;++source){
if(isnan(source_X[source])) {
source_X[source] = -source_distance*beam_vector[1];
}
if(isnan(source_Y[source])) {
source_Y[source] = -source_distance*beam_vector[2];
}
if(isnan(source_Z[source])) {
source_Z[source] = -source_distance*beam_vector[3];
}
if(isnan(source_I[source])) {
source_I[source] = 1.0;
}
if(isnan(source_lambda[source])) {
source_lambda[source] = lambda0;
}
}
}
if(sources == 0)
{
/* generate generic list of sources */
/* count divsteps sweep over solid angle of beam divergence */
divsteps = 0;
for(hdiv_tic=0;hdiv_tic<hdivsteps;++hdiv_tic){
for(vdiv_tic=0;vdiv_tic<vdivsteps;++vdiv_tic){
hdiv = hdivstep * hdiv_tic - hdivrange/2.0 ;
vdiv = vdivstep * vdiv_tic - vdivrange/2.0 ;
/* force an elliptical divergence */
test = (hdiv*hdiv-hdivstep*hdivstep/4.0*(1-hdivsteps%2))/hdivrange/hdivrange ;
test += (vdiv*vdiv-vdivstep*vdivstep/4.0*(1-vdivsteps%2))/vdivrange/vdivrange ;
if( round_div && test*4.0 > 1.1) continue;
++divsteps;
printf("divergence deviation: %g %g\n",hdiv,vdiv);
}
}
/* print out wavelength steps with sweep over spectral dispersion */
for(disp_tic=0;disp_tic<dispsteps;++disp_tic){
lambda = lambda0 * ( 1.0 + dispstep * disp_tic - dispersion/2.0 ) ;
printf("lambda%d = %.15g\n",disp_tic,lambda);
}
/* allocate enough space */
sources = divsteps*dispsteps;
source_X = (double *) calloc(sources+10,sizeof(double));
source_Y = (double *) calloc(sources+10,sizeof(double));
source_Z = (double *) calloc(sources+10,sizeof(double));
source_I = (double *) calloc(sources+10,sizeof(double));
source_lambda = (double *) calloc(sources+10,sizeof(double));
/* now actually create the source entries */
weight = 1.0/sources;
sources = 0;
for(hdiv_tic=0;hdiv_tic<hdivsteps;++hdiv_tic){
for(vdiv_tic=0;vdiv_tic<vdivsteps;++vdiv_tic){
hdiv = hdivstep * hdiv_tic - hdivrange/2.0 ;
vdiv = vdivstep * vdiv_tic - vdivrange/2.0 ;
/* force an elliptical divergence */
test = (hdiv*hdiv-hdivstep*hdivstep/4.0*(1-hdivsteps%2))/hdivrange/hdivrange ;
test += (vdiv*vdiv-vdivstep*vdivstep/4.0*(1-vdivsteps%2))/vdivrange/vdivrange ;
if( round_div && test*4.0 > 1.1) continue;
/* construct unit vector along "beam" */
vector[1] = -source_distance*beam_vector[1];
vector[2] = -source_distance*beam_vector[2];
vector[3] = -source_distance*beam_vector[3];
/* divergence is in angle space */
/* define "horizontal" as the E-vector of the incident beam */
rotate_axis(vector,newvector,polar_vector,vdiv);
rotate_axis(newvector,vector,vert_vector,hdiv);
/* one source at each position for each wavelength */
for(disp_tic=0;disp_tic<dispsteps;++disp_tic){
lambda = lambda0 * ( 1.0 + dispstep * disp_tic - dispersion/2.0 ) ;
source_X[sources] = vector[1];
source_Y[sources] = vector[2];
source_Z[sources] = vector[3];
source_I[sources] = weight;
source_lambda[sources] = lambda;
++sources;
}
}
}
}
printf(" created a total of %d sources:\n",sources);
for(source=0;source<sources;++source){
/* retrieve stuff from cache */
X = vector[1] = source_X[source];
Y = vector[2] = source_Y[source];
Z = vector[3] = source_Z[source];
I = source_I[source];
lambda = source_lambda[source];
/* make sure these are unit vectors */
unitize(vector,vector);
source_X[source] = vector[1];
source_Y[source] = vector[2];
source_Z[source] = vector[3];
printf("%g %g %g %g %g\n",X,Y,Z,I,lambda);
}
/* allocate enough space */
mosaic_umats = (double *) calloc(mosaic_domains+10,9*sizeof(double));
/* now actually create the orientation of each domain */
for(mos_tic=0;mos_tic<mosaic_domains;++mos_tic){
mosaic_rotation_umat(mosaic_spread, mosaic_umats+9*mos_tic, &mosaic_seed);
if(mos_tic==0)
{
/* force at least one domain to be "aligned"? */
mosaic_umats[0]=1.0;mosaic_umats[1]=0.0;mosaic_umats[2]=0.0;
mosaic_umats[3]=0.0;mosaic_umats[4]=1.0;mosaic_umats[5]=0.0;
mosaic_umats[6]=0.0;mosaic_umats[7]=0.0;mosaic_umats[8]=1.0;
}
// printf("%d diagonal %f %f %f\n",mos_tic,mosaic_umats[mos_tic*9],mosaic_umats[mos_tic*9+4],mosaic_umats[mos_tic*9+8]);
printf("%d by: %f deg\n",mos_tic,acos((mosaic_umats[mos_tic*9]+mosaic_umats[mos_tic*9+4]+mosaic_umats[mos_tic*9+8]-1)/2)*RTD);
// umat2misset(mosaic_umats+9*mos_tic,mosaic_missets);
// printf("%d by: %f %f %f deg\n",mos_tic,mosaic_missets[1]*RTD,mosaic_missets[2]*RTD,mosaic_missets[3]*RTD);
// printf("%f %f %f\n",mos_tic,*(mosaic_umats+9*mos_tic+0),*(mosaic_umats+9*mos_tic+1),*(mosaic_umats+9*mos_tic+2));
// printf("%f %f %f\n",mos_tic,*(mosaic_umats+9*mos_tic+3),*(mosaic_umats+9*mos_tic+4),*(mosaic_umats+9*mos_tic+5));
// printf("%f %f %f\n",mos_tic,*(mosaic_umats+9*mos_tic+6),*(mosaic_umats+9*mos_tic+7),*(mosaic_umats+9*mos_tic+8));
}
printf(" created a total of %d mosaic domains\n",mosaic_domains);
/* final decisions about sampling */
if(oversample <= 0) oversample = 1;
steps = sources*mosaic_domains*phisteps*oversample*oversample;
subpixel_size = pixel_size/oversample;
printf(" %d initialized hkls (all others =%g)\n",hkls,default_F);
printf(" ");
if(xtal_shape == ROUND) printf("ellipsoidal");
if(xtal_shape == SQUARE) printf("parallelpiped");
if(xtal_shape == GAUSS ) printf("gaussian");
if(xtal_shape == TOPHAT) printf("tophat-spot");
printf(" xtal: %.0fx%.0fx%.0f cells\n",Na,Nb,Nc);
printf(" wave=%g meters +/- %g%% in %d steps\n",lambda0,dispersion*100,dispsteps);
if(nopolar) { printf(" polarization effect disabled\n"); }
else { printf(" Kahn polarization factor: %f\n",polarization); }
if(curved_detector) printf(" curved detector: all pixels same distance from origin\n");
if(point_pixel) printf(" pixel obliquity effect disabled\n");
printf(" incident fluence: %lg photons/m^2\n",fluence);
printf(" distance=%lg detsize=%lgx%lg pixel=%lg meters (%dx%d pixels)\n",distance,detsize_f,detsize_s,pixel_size,fpixels,spixels);
printf(" Xbeam=%lg Ybeam=%lg\n",Xbeam,Ybeam);
printf(" Fbeam=%lg Sbeam=%lg\n",Fbeam,Sbeam);
printf(" Xclose=%lg Yclose=%lg\n",Xclose,Yclose);
printf(" Fclose=%lg Sclose=%lg\n",Fclose,Sclose);
printf(" DIRECTION_OF_DETECTOR_X-AXIS= %g %g %g\n",fdet_vector[1],fdet_vector[2],fdet_vector[3]);
printf(" DIRECTION_OF_DETECTOR_Y-AXIS= %g %g %g\n",sdet_vector[1],sdet_vector[2],sdet_vector[3]);
printf(" DIRECTION_OF_DETECTOR_Z-AXIS= %g %g %g\n",odet_vector[1],odet_vector[2],odet_vector[3]);
printf(" INCIDENT_BEAM_DIRECTION= %g %g %g\n",beam_vector[1],beam_vector[2],beam_vector[3]);
printf(" spindle ROTATION_AXIS= %g %g %g\n",spindle_vector[1],spindle_vector[2],spindle_vector[3]);
cross_product(beam_vector,polar_vector,vector);
printf(" POLARIZATION_PLANE_NORMAL= %g %g %g\n",vector[1],vector[2],vector[3]);
printf(" dials origin= %g %g %g\n",dials_origin[1],dials_origin[2],dials_origin[3]);
printf(" roi: %d < x < %d && %d < y < %d\n",roi_xmin,roi_xmax,roi_ymin,roi_ymax);
printf(" hdivrange=%g hdivstep=%g radians\n",hdivrange,hdivstep);
printf(" vdivrange=%g vdivstep=%g radians\n",vdivrange,vdivstep);
printf(" %d divergence steps\n",divsteps);
printf(" %d sources\n",sources);
printf(" %d mosaic domains over mosaic spread of %g degrees\n",mosaic_domains,mosaic_spread*RTD);
printf(" %d phi steps from %g to %g degrees\n",phisteps,phi0*RTD,(phi0+osc)*RTD);
printf(" %dx%d pixel oversample steps",oversample,oversample);
if(oversample_thick) printf(" +thick");
if(oversample_polar) printf(" +polar");
if(oversample_omega) printf(" +omega");
printf("\n");
if(maskimage != NULL) printf(" skipping zero-flagged pixels in %s\n",maskfilename);
// printf(" coherent source: %d\n",coherent);
if(calculate_noise){
printf("\n noise image paramters:\n");
printf(" seed: %ld\n",seed);
printf(" water droplet size: %g m\n",water_size);
}
/* pre-calculaate background from something amorphous */
F_bg = water_F;
I_bg = F_bg*F_bg*r_e_sqr*fluence*water_size*water_size*water_size*1e6*Avogadro/water_MW;
/* sweep over detector */
sum = sumsqr = 0.0;
sumn = 0;
progress_pixel = 0;
omega_sum = 0.0;
#if defined(_OPENMP)
// omp_set_num_threads(72);
#endif
int debug_printed_thread = 0;
int debug_printed = 0;
#pragma omp parallel for \
schedule(auto) \
private(fpixel,spixel)\
firstprivate(imgidx,subS,subF,Fdet,Sdet,Fdet0,Sdet0,Odet,stol,twotheta,\
theta,vector,newvector,pixel_pos,\
airpath,source_path,lambda,\
diffracted,diffracted0,d_r,incident,scattering,parallax,\
fdet_vector,sdet_vector,odet_vector,beam_vector,pix0_vector,polar_vector,spindle_vector,\
hdiv_tic,vdiv_tic,disp_tic,mos_tic,phi_tic,thick_tic,source,\
phi,\
phi0,osc,phistep,phisteps,\
a,b,c,ap,bp,cp,a_star,b_star,c_star,a_cross_b,b_cross_c,c_cross_a,\
h,k,l,h0,k0,l0,h0_flr,k0_flr,l0_flr,\
h_interp,k_interp,l_interp,h_interp_d,k_interp_d,l_interp_d,hrad_sqr,\
i1,i2,i3,\
Ewald0,Ewald,relp,\
xd,yd,zd,xd0,yd0,zd0,\
capture_fraction,\
I,I_bg,F_bg,\
F_cell,F_latt,polar,omega_pixel,\
test,i,sub_Fhkl,\
Fhkl,\
debug_printed_thread)\
shared(debug_printed,\
floatimage,maskimage,\
fpixels,spixels,pixels,pixel_size,subpixel_size,\
oversample,oversample_thick,oversample_polar,oversample_omega,\
Xbeam,Ybeam,\
interpolate,integral_form,curved_detector,\
polarization,nopolar,\
point_pixel,coherent,babble,\
distance,close_distance,\
source_X,source_Y,source_Z,source_lambda,\
sources,\
progress_meter,progress_pixels,\
a0,b0,c0,V_cell,\
Na,Nb,Nc,\
h_min,h_max,h_range,k_min,k_max,k_range,l_min,l_max,l_range,hkls,\
dmin,\
xtal_shape,fudge,\
fluence,r_e_sqr,\
lambda0,dispersion,dispstep,dispsteps,\
source_distance,\
default_F,water_F,water_size,water_MW,\
steps,\
hdiv,hdivrange,hdivstep,hdivsteps,vdiv,vdivrange,vdivstep,vdivsteps,round_div,\
mosaic_spread,mosaic_umats,mosaic_domains,\
detector_thick,detector_thickstep,detector_thicksteps,detector_mu,\
roi_xmin,roi_xmax,roi_ymin,roi_ymax,\
max_I,max_I_x,max_I_y,\
printout,printout_fpixel,printout_spixel,stdout)\
reduction(+:sum,sumsqr,sumn,omega_sum,progress_pixel)\
default(none)
for(spixel=0;spixel<spixels;++spixel)
{
#if defined(_OPENMP)
//if(! debug_printed) {
// debug_printed = 1;
// printf("OMP: %d of %d threads\n", omp_get_thread_num(),omp_get_num_threads());
//}
if(! debug_printed_thread) {
/* avoid memory contention: make a copy of each dynamically-allocated array for each thread *
double *newptr;
double **newpptr;
double ***newFhkl;
newptr = (double *) calloc((h_range+1)*(k_range+1)*(l_range+1),sizeof(double));
newpptr = (double **) calloc((h_range+1)*(k_range+1),sizeof(double *));
newFhkl = (double ***) calloc((h_range+1),sizeof(double **));
for (h0=0; h0<=h_range;h0++) {
newFhkl[h0] = newpptr;
for (k0=0; k0<=k_range;k0++) {
newFhkl[h0][k0] = newptr;
memcpy(newptr,*(*(Fhkl +h0)+k0),(l_range+1)*sizeof(double));
newptr += l_range+1;
}
++newpptr;
}
Fhkl = newFhkl;
/* */
// newptr = (double *) calloc(sources+10,sizeof(double));
// memcpy(newptr,source_X,sources*sizeof(double));
// source_X = newptr;
// newptr = (double *) calloc(sources+10,sizeof(double));
// memcpy(newptr,source_Y,sources*sizeof(double));
// source_Y = newptr;
// newptr = (double *) calloc(sources+10,sizeof(double));
// memcpy(newptr,source_Z,sources*sizeof(double));
// source_Z = newptr;
// newptr = (double *) calloc(sources+10,sizeof(double));
// memcpy(newptr,source_lambda,sources*sizeof(double));
// source_lambda = newptr;
// newptr = (double *) calloc(mosaic_domains+10,9*sizeof(double));
// memcpy(newptr,mosaic_umats,9*mosaic_domains*sizeof(double));
// printf("thread: %d mosaic_umats = %p\n", omp_get_thread_num(),mosaic_umats);
// mosaic_umats = newptr;
printf("thread: %d mosaic_umats = %p\n", omp_get_thread_num(),mosaic_umats);
debug_printed_thread = 1;
}
#endif
for(fpixel=0;fpixel<fpixels;++fpixel)
{
/* allow for just one part of detector to be rendered */
if(fpixel < roi_xmin || fpixel > roi_xmax || spixel < roi_ymin || spixel > roi_ymax)
{
continue;
}
/* position in pixel array */
imgidx = spixel*fpixels+fpixel;
/* allow for the use of a mask */
if(maskimage != NULL)
{
/* skip any flagged pixels in the mask */
if(maskimage[imgidx] == 0)
{
continue;
}
}
/* reset uncorrected photon count for this pixel */
I = I_bg;
/* reset polarization factor, in case we want to cache it */
polar = 0.0;
if (nopolar) polar = 1.0;
/* reset pixel solid angle, in case we want to cache it */
omega_pixel = 0.0;
/* add this now to avoid problems with skipping later? */
// floatimage[imgidx] = I_bg;
/* loop over detector layers */
for(thick_tic=0;thick_tic<detector_thicksteps;++thick_tic)
{
/* assume "distance" is to the front of the detector sensor layer */
Odet = thick_tic*detector_thickstep;
/* reset capture fraction, in case we want to cache it */
capture_fraction = 0.0;
/* or if we are not modelling detector thickness */
if(detector_thick == 0.0) capture_fraction = 1.0;
/* loop over sub-pixels */
for(subS=0;subS<oversample;++subS)
{
for(subF=0;subF<oversample;++subF)
{
/* absolute mm position on detector (relative to its origin) */
Fdet = subpixel_size*(fpixel*oversample + subF ) + subpixel_size/2.0;
Sdet = subpixel_size*(spixel*oversample + subS ) + subpixel_size/2.0;
// Fdet = pixel_size*fpixel;
// Sdet = pixel_size*spixel;
/* construct detector subpixel position in 3D space */
// pixel_X = distance;
// pixel_Y = Sdet-Ybeam;
// pixel_Z = Fdet-Xbeam;
pixel_pos[1] = Fdet*fdet_vector[1]+Sdet*sdet_vector[1]+Odet*odet_vector[1]+pix0_vector[1];
pixel_pos[2] = Fdet*fdet_vector[2]+Sdet*sdet_vector[2]+Odet*odet_vector[2]+pix0_vector[2];
pixel_pos[3] = Fdet*fdet_vector[3]+Sdet*sdet_vector[3]+Odet*odet_vector[3]+pix0_vector[3];
pixel_pos[0] = 0.0;
if(curved_detector) {
/* construct detector pixel that is always "distance" from the sample */
vector[1] = distance*beam_vector[1];
vector[2] = distance*beam_vector[2] ;
vector[3] = distance*beam_vector[3];
/* treat detector pixel coordinates as radians */
rotate_axis(vector,newvector,sdet_vector,pixel_pos[2]/distance);
rotate_axis(newvector,pixel_pos,fdet_vector,pixel_pos[3]/distance);
// rotate(vector,pixel_pos,0,pixel_pos[3]/distance,pixel_pos[2]/distance);
}
/* construct the diffracted-beam unit vector to this sub-pixel */
airpath = unitize(pixel_pos,diffracted);
/* solid angle subtended by a pixel: (pix/airpath)^2*cos(2theta) */
if(omega_pixel == 0.0 || oversample_omega)
{
/* this is either the first time for this pixel, or we are oversampling omega */
omega_pixel = pixel_size*pixel_size/airpath/airpath*close_distance/airpath;
/* option to turn off obliquity effect, inverse-square-law only */
if(point_pixel) omega_pixel = 1.0/airpath/airpath;
}
/* keep track for final statistics */
omega_sum += omega_pixel;
/* now calculate detector thickness effects */
if(capture_fraction == 0.0 || oversample_thick)
{
/* inverse of effective thickness increase */
parallax = dot_product(diffracted,odet_vector);
/* fraction of incoming photons absorbed by this detector layer */
capture_fraction = exp(-thick_tic*detector_thickstep*detector_mu/parallax)
-exp(-(thick_tic+1)*detector_thickstep*detector_mu/parallax);
}
/* loop over sources now */
for(source=0;source<sources;++source){
/* retrieve stuff from cache */
incident[1] = -source_X[source];
incident[2] = -source_Y[source];
incident[3] = -source_Z[source];
lambda = source_lambda[source];
/* construct the incident beam unit vector while recovering source distance */
/* source arrays should already be unit vectors */
// source_path = unitize(incident,incident);
/* construct the scattering vector for this pixel */
scattering[1] = (diffracted[1]-incident[1])/lambda;
scattering[2] = (diffracted[2]-incident[2])/lambda;
scattering[3] = (diffracted[3]-incident[3])/lambda;
/* sin(theta)/lambda is half the scattering vector length */
stol = 0.5*magnitude(scattering);
/* rough cut to speed things up when we aren't using whole detector */
if(dmin > 0.0 && stol > 0.0)
{
if(dmin > 0.5/stol)
{
continue;
}
}
/* we now have enough to fix the polarization factor */
if (polar == 0.0 || oversample_polar)
{
/* need to compute polarization factor */
polar = polarization_factor(polarization,incident,diffracted,polar_vector);
}
/* sweep over phi angles */
for(phi_tic = 0; phi_tic < phisteps; ++phi_tic)
{
phi = phi0 + phistep*phi_tic;
if( phi != 0.0 )
{
/* rotate about spindle if neccesary */
rotate_axis(a0,ap,spindle_vector,phi);
rotate_axis(b0,bp,spindle_vector,phi);
rotate_axis(c0,cp,spindle_vector,phi);
}
/* enumerate mosaic domains */
for(mos_tic=0;mos_tic<mosaic_domains;++mos_tic)
{
/* apply mosaic rotation after phi rotation */
if( mosaic_spread > 0.0 )
{
rotate_umat(ap,a,&mosaic_umats[mos_tic*9]);
rotate_umat(bp,b,&mosaic_umats[mos_tic*9]);
rotate_umat(cp,c,&mosaic_umats[mos_tic*9]);
}
else
{
a[1]=ap[1];a[2]=ap[2];a[3]=ap[3];
b[1]=bp[1];b[2]=bp[2];b[3]=bp[3];
c[1]=cp[1];c[2]=cp[2];c[3]=cp[3];
}
// printf("%d %f %f %f\n",mos_tic,mosaic_umats[mos_tic*9+0],mosaic_umats[mos_tic*9+1],mosaic_umats[mos_tic*9+2]);
// printf("%d %f %f %f\n",mos_tic,mosaic_umats[mos_tic*9+3],mosaic_umats[mos_tic*9+4],mosaic_umats[mos_tic*9+5]);
// printf("%d %f %f %f\n",mos_tic,mosaic_umats[mos_tic*9+6],mosaic_umats[mos_tic*9+7],mosaic_umats[mos_tic*9+8]);
/* construct fractional Miller indicies */
h = dot_product(a,scattering);
k = dot_product(b,scattering);
l = dot_product(c,scattering);
/* round off to nearest whole index */
h0 = ceil(h-0.5);
k0 = ceil(k-0.5);
l0 = ceil(l-0.5);
/* structure factor of the lattice (paralelpiped crystal)
F_latt = sin(M_PI*Na*h)*sin(M_PI*Nb*k)*sin(M_PI*Nc*l)/sin(M_PI*h)/sin(M_PI*k)/sin(M_PI*l);
*/
F_latt = 1.0;
if(xtal_shape == SQUARE)
{
/* xtal is a paralelpiped */
if(Na>1){
F_latt *= sincg(M_PI*h,Na);
}
if(Nb>1){
F_latt *= sincg(M_PI*k,Nb);
}
if(Nc>1){
F_latt *= sincg(M_PI*l,Nc);
}
}
else
{
/* handy radius in reciprocal space, squared */
hrad_sqr = (h-h0)*(h-h0)*Na*Na + (k-k0)*(k-k0)*Nb*Nb + (l-l0)*(l-l0)*Nc*Nc ;
}
if(xtal_shape == ROUND)
{
/* use sinc3 for elliptical xtal shape,
correcting for sqrt of volume ratio between cube and sphere */
F_latt = Na*Nb*Nc*0.723601254558268*sinc3(M_PI*sqrt( hrad_sqr * fudge ) );
}
if(xtal_shape == GAUSS)
{
/* fudge the radius so that volume and FWHM are similar to square_xtal spots */
F_latt = Na*Nb*Nc*exp(-( hrad_sqr / 0.63 * fudge ));
}
if(xtal_shape == TOPHAT)
{
/* make a flat-top spot of same height and volume as square_xtal spots */
F_latt = Na*Nb*Nc*(hrad_sqr*fudge < 0.3969 );
}
/* no need to go further if result will be zero? */
if(F_latt == 0.0 && water_size == 0.0) continue;
/* find nearest point on Ewald sphere surface? */
if( integral_form )
{
if( phi != 0.0 || mos_tic > 0 )
{
/* need to re-calculate reciprocal matrix */
/* various cross products */
cross_product(a,b,a_cross_b);
cross_product(b,c,b_cross_c);
cross_product(c,a,c_cross_a);
/* new reciprocal-space cell vectors */
vector_scale(b_cross_c,a_star,1e20/V_cell);
vector_scale(c_cross_a,b_star,1e20/V_cell);
vector_scale(a_cross_b,c_star,1e20/V_cell);
}
/* reciprocal-space coordinates of nearest relp */
relp[1] = h0*a_star[1] + k0*b_star[1] + l0*c_star[1];
relp[2] = h0*a_star[2] + k0*b_star[2] + l0*c_star[2];
relp[3] = h0*a_star[3] + k0*b_star[3] + l0*c_star[3];
// d_star = magnitude(relp)
/* reciprocal-space coordinates of center of Ewald sphere */
Ewald0[1] = -incident[1]/lambda/1e10;
Ewald0[2] = -incident[2]/lambda/1e10;
Ewald0[3] = -incident[3]/lambda/1e10;
// 1/lambda = magnitude(Ewald0)
/* distance from Ewald sphere in lambda=1 units */
vector[1] = relp[1]-Ewald0[1];
vector[2] = relp[2]-Ewald0[2];
vector[3] = relp[3]-Ewald0[3];
d_r = magnitude(vector)-1.0;
/* unit vector of diffracted ray through relp */
unitize(vector,diffracted0);
/* intersection with detector plane */
xd = dot_product(fdet_vector,diffracted0);
yd = dot_product(sdet_vector,diffracted0);
zd = dot_product(odet_vector,diffracted0);
/* where does the central direct-beam hit */
xd0 = dot_product(fdet_vector,incident);
yd0 = dot_product(sdet_vector,incident);
zd0 = dot_product(odet_vector,incident);
/* convert to mm coordinates */
Fdet0 = distance*(xd/zd) + Xbeam;
Sdet0 = distance*(yd/zd) + Ybeam;
//printf("GOTHERE %g %g %g %g\n",Fdet,Sdet,Fdet0,Sdet0);
test = exp(-( (Fdet-Fdet0)*(Fdet-Fdet0)+(Sdet-Sdet0)*(Sdet-Sdet0) + d_r*d_r )/1e-8);
} // end of integral form
/* structure factor of the unit cell */
if(interpolate){
h0_flr = floor(h);
k0_flr = floor(k);
l0_flr = floor(l);
if ( ((h-h_min+3)>h_range) ||
(h-2<h_min) ||
((k-k_min+3)>k_range) ||
(k-2<k_min) ||
((l-l_min+3)>l_range) ||
(l-2<l_min) ) {
if(babble){
babble=0;
printf ("WARNING: out of range for three point interpolation: h,k,l,h0,k0,l0: %g,%g,%g,%d,%d,%d \n", h,k,l,h0,k0,l0);
printf("WARNING: further warnings will not be printed! ");
}
F_cell = default_F;
interpolate=0;
}
}
/* only interpolate if it is safe */
if(interpolate){
/* integer versions of nearest HKL indicies */
h_interp[0]=h0_flr-1;
h_interp[1]=h0_flr;
h_interp[2]=h0_flr+1;
h_interp[3]=h0_flr+2;
k_interp[0]=k0_flr-1;
k_interp[1]=k0_flr;
k_interp[2]=k0_flr+1;
k_interp[3]=k0_flr+2;
l_interp[0]=l0_flr-1;
l_interp[1]=l0_flr;
l_interp[2]=l0_flr+1;
l_interp[3]=l0_flr+2;
/* polin function needs doubles */
h_interp_d[0] = (double) h_interp[0];
h_interp_d[1] = (double) h_interp[1];
h_interp_d[2] = (double) h_interp[2];
h_interp_d[3] = (double) h_interp[3];
k_interp_d[0] = (double) k_interp[0];
k_interp_d[1] = (double) k_interp[1];
k_interp_d[2] = (double) k_interp[2];
k_interp_d[3] = (double) k_interp[3];
l_interp_d[0] = (double) l_interp[0];
l_interp_d[1] = (double) l_interp[1];
l_interp_d[2] = (double) l_interp[2];
l_interp_d[3] = (double) l_interp[3];
/* now populate the "y" values (nearest four structure factors in each direction) */
for (i1=0;i1<4;i1++) {
for (i2=0;i2<4;i2++) {
for (i3=0;i3<4;i3++) {
sub_Fhkl[i1][i2][i3]= Fhkl[h_interp[i1]-h_min][k_interp[i2]-k_min][l_interp[i3]-l_min];
}
}
}
/* run the tricubic polynomial interpolation */
polin3(h_interp_d,k_interp_d,l_interp_d,sub_Fhkl,h,k,l,&F_cell);
}
if(! interpolate)
{
if ( hkls && (h0<=h_max) && (h0>=h_min) && (k0<=k_max) && (k0>=k_min) && (l0<=l_max) && (l0>=l_min) ) {
/* just take nearest-neighbor */
F_cell = Fhkl[h0-h_min][k0-k_min][l0-l_min];
}
else
{
F_cell = default_F; // usually zero
}
}
/* now we have the structure factor for this pixel */
/* convert amplitudes into intensity (photons per steradian) */
I += F_cell*F_cell*F_latt*F_latt;
/* only do this if we need to */
if(oversample_thick) I *= capture_fraction;
if(oversample_polar) I *= polar;
if(oversample_omega) I *= omega_pixel;
}
/* end of mosaic loop */
}
/* end of phi loop */
}
/* end of source loop */
}
/* end of sub-pixel y loop */
}
/* end of sub-pixel x loop */
}
/* end of detector thickness loop */
/* convert pixel intensity into photon units */
test = r_e_sqr*fluence*I/steps;
/* do the corrections now, if they haven't been applied already */
if(! oversample_thick) test *= capture_fraction;
if(! oversample_polar) test *= polar;
if(! oversample_omega) test *= omega_pixel;
floatimage[imgidx] += test;
/* now keep track of statistics */
if(floatimage[imgidx] > max_I) {
max_I = floatimage[imgidx];
max_I_x = Fdet;
max_I_y = Sdet;
}
sum += floatimage[imgidx];
sumsqr += floatimage[imgidx]*floatimage[imgidx];
++sumn;
if( printout )
{
if((fpixel==printout_fpixel && spixel==printout_spixel) || printout_fpixel < 0)
{
twotheta = atan2(sqrt(pixel_pos[2]*pixel_pos[2]+pixel_pos[3]*pixel_pos[3]),pixel_pos[1]);
test = sin(twotheta/2.0)/(lambda0*1e10);
printf("%4d %4d : stol = %g or %g\n", fpixel,spixel,stol,test);
printf("at %g %g %g\n", pixel_pos[1],pixel_pos[2],pixel_pos[3]);
printf("hkl= %f %f %f hkl0= %d %d %d\n", h,k,l,h0,k0,l0);
printf(" F_cell=%g F_latt=%g I = %g\n", F_cell,F_latt,I);
printf("I/steps %15.10g\n", I/steps);
printf("polar %15.10g\n", polar);
printf("omega %15.10g\n", omega_pixel);
printf("capfrac %15.10g\n", capture_fraction);
printf("pixel %15.10g\n", floatimage[imgidx]);
printf("real-space cell vectors (Angstrom):\n");
printf(" %-10s %-10s %-10s\n","a","b","c");
printf("X: %11.8f %11.8f %11.8f\n",a[1]*1e10,b[1]*1e10,c[1]*1e10);
printf("Y: %11.8f %11.8f %11.8f\n",a[2]*1e10,b[2]*1e10,c[2]*1e10);
printf("Z: %11.8f %11.8f %11.8f\n",a[3]*1e10,b[3]*1e10,c[3]*1e10);
}
}
else
{
if(progress_meter && progress_pixels/100 > 0)
{
if(progress_pixel % ( progress_pixels/20 ) == 0 ||
((10*progress_pixel<progress_pixels ||
10*progress_pixel>9*progress_pixels) &&
(progress_pixel % (progress_pixels/100) == 0)))
{
printf("%lu%% done\n",progress_pixel*100/progress_pixels);
fflush(stdout);
}
}
}
++progress_pixel;
}
}
printf("\n");
printf("solid angle subtended by detector = %g steradian ( %g%% sphere)\n",omega_sum/steps,100*omega_sum/steps/4/M_PI);
/* do some stats? */
if(sumn<=0) sumn=1;
avg = sum/sumn;
if(sumn<=1) sumn=2;
rms = sqrt(sumsqr/(sumn-1));
sumsqr = 0.0;
sumn = 0;
for(spixel=0;spixel<spixels;++spixel)
{
for(fpixel=0;fpixel<fpixels;++fpixel)
{
/* position in pixel array */
imgidx = spixel*fpixels+fpixel;
if(fpixel < roi_xmin || fpixel > roi_xmax || spixel < roi_ymin || spixel > roi_ymax)
{
continue;
}
test = floatimage[imgidx]-avg;
sumsqr += test*test;
++sumn;
}
}
if(sumn<=1) sumn=2;
rmsd = sqrt(sumsqr/(sumn-1));
printf("writing %s as %d %lu-byte floats\n",floatfilename,pixels,sizeof(float));
outfile = fopen(floatfilename,"wb");
if(outfile == NULL)
{
perror("ERROR: fopen");
exit(9);
}
fwrite(floatimage,sizeof(float),pixels,outfile);
fclose(outfile);
/* output as ints */
imgidx = 0;
printf("max_I = %g at %g %g\n",max_I,max_I_x,max_I_y);
printf("mean= %g rms= %g rmsd= %g\n",avg,rms,rmsd);
if(intfile_scale <= 0.0){
intfile_scale = 1.0;
if(max_I > 0.0) intfile_scale = 55000.0/max_I;
}
printf("intfile_scale = %g\n",intfile_scale);
for(spixel=0;spixel<spixels;++spixel)
{
for(fpixel=0;fpixel<fpixels;++fpixel)
{
if(fpixel < roi_xmin || fpixel > roi_xmax || spixel < roi_ymin || spixel > roi_ymax)
{
continue;
}
/* position in pixel array */
imgidx = spixel*fpixels+fpixel;
test = floatimage[imgidx] *intfile_scale+adc_offset;
if(test > 65535.0) test = 65535.0;
if(test < 0.0) test = 0.0;
intimage[imgidx] = (unsigned short int) ( floorf(test+0.5) );
// printf("%d %d = %d\n",fpixel,spixel,intimage[imgidx]);
}
}
printf("writing %s as %lu-byte integers\n",intfilename,sizeof(unsigned short int));
outfile = fopen(intfilename,"wb");
if(outfile == NULL)
{
perror("ERROR: fopen");
exit(9);
}
fprintf(outfile,"{\nHEADER_BYTES=512;\nDIM=2;\nBYTE_ORDER=%s;\nTYPE=unsigned_short;\n",byte_order);
fprintf(outfile,"SIZE1=%d;\nSIZE2=%d;\nPIXEL_SIZE=%g;\nDISTANCE=%g;\n",fpixels,spixels,pixel_size*1000.0,distance*1000.0);
fprintf(outfile,"WAVELENGTH=%g;\n",lambda0*1e10);
fprintf(outfile,"BEAM_CENTER_X=%g;\nBEAM_CENTER_Y=%g;\n",Xbeam*1000.0,Ybeam*1000);
fprintf(outfile,"ADXV_CENTER_X=%g;\nADXV_CENTER_Y=%g;\n",Fbeam*1000.0,(detsize_s-Sbeam)*1000);
fprintf(outfile,"MOSFLM_CENTER_X=%g;\nMOSFLM_CENTER_Y=%g;\n",(Sbeam-0.5*pixel_size)*1000.0,(Fbeam-0.5*pixel_size)*1000);
fprintf(outfile,"DENZO_X_BEAM=%g;\nDENZO_Y_BEAM=%g;\n",(Sbeam-0.0*pixel_size)*1000.0,(Fbeam-0.0*pixel_size)*1000);
fprintf(outfile,"DIALS_ORIGIN=%g,%g,%g\n",dials_origin[1],dials_origin[2],dials_origin[3]);
fprintf(outfile,"XDS_ORGX=%g;\nXDS_ORGY=%g;\n",ORGX,ORGY);
fprintf(outfile,"CLOSE_DISTANCE=%g;\n",close_distance*1000.0);
fprintf(outfile,"PHI=%g;\nOSC_START=%g;\nOSC_RANGE=%g;\n",phi0*RTD,phi0*RTD,osc*RTD);
fprintf(outfile,"TWOTHETA=%g;\n",detector_twotheta*RTD);
fprintf(outfile,"DETECTOR_SN=000;\n");
fprintf(outfile,"BEAMLINE=fake;\n");
fprintf(outfile,"}\f");
while ( ftell(outfile) < 512 ){ fprintf(outfile," "); };
fwrite(intimage,sizeof(unsigned short int),pixels,outfile);
fclose(outfile);
if(write_pgm)
{
/* output as pgm */
imgidx = 0;
if(pgm_scale <= 0.0){
pgm_scale = intfile_scale;
if(rmsd > 0.0) pgm_scale = 250.0/(5.0*rmsd);
}
printf("pgm_scale = %g\n",pgm_scale);
imgidx = 0;
for(spixel=0;spixel<spixels;++spixel)
{
for(fpixel=0;fpixel<fpixels;++fpixel)
{
if(fpixel < roi_xmin || fpixel > roi_xmax || spixel < roi_ymin || spixel > roi_ymax)
{
++imgidx; continue;
}
test = floatimage[imgidx] * pgm_scale;
if(test > 255.0) test = 255.0;
pgmimage[imgidx] = (unsigned char) ( test );
// printf("%d %d = %d\n",fpixel,spixel,pgmimage[imgidx]);
++imgidx;
}
}
printf("writing %s as %lu-byte integers\n",pgmfilename,sizeof(unsigned char));
outfile = fopen(pgmfilename,"wb");
if(outfile == NULL)
{
perror("ERROR: fopen");
exit(9);
}
fprintf(outfile, "P5\n%d %d\n", fpixels, spixels);
fprintf(outfile, "# pixels scaled by %lg\n", pgm_scale);
fprintf(outfile, "255\n");
fwrite(pgmimage,sizeof(unsigned char),pixels,outfile);
fclose(outfile);
}
/* quit now if there is nothing else to do */
if(calculate_noise == 0){
return 0;
}
/* simulate Poisson noise */
imgidx = 0;
sum = 0.0;
overloads = 0;
for(spixel=0;spixel<spixels;++spixel)
{
for(fpixel=0;fpixel<fpixels;++fpixel)
{
if(fpixel < roi_xmin || fpixel > roi_xmax || spixel < roi_ymin || spixel > roi_ymax)
{
++imgidx; continue;
}
test = poidev( floatimage[imgidx], &seed );
sum += test;
test += adc_offset;
if(test > 65535.0)
{
test = 65535.0;
++overloads;
}
intimage[imgidx] = (unsigned short int) test;
// printf("%d %d = %d\n",fpixel,spixel,intimage[imgidx]);
++imgidx;
}
}
printf("%.0f photons on noise image (%d overloads)\n",sum,overloads);
printf("writing %s as %lu-byte integers\n",noisefilename,sizeof(unsigned short int));
outfile = fopen(noisefilename,"wb");
if(outfile == NULL)
{
perror("ERROR: fopen");
exit(9);
}
fprintf(outfile,"{\nHEADER_BYTES=512;\nDIM=2;\nBYTE_ORDER=%s;\nTYPE=unsigned_short;\n",byte_order);
fprintf(outfile,"SIZE1=%d;\nSIZE2=%d;\nPIXEL_SIZE=%g;\nDISTANCE=%g;\n",fpixels,spixels,pixel_size*1000.0,distance*1000.0);
fprintf(outfile,"WAVELENGTH=%g;\n",lambda0*1e10);
fprintf(outfile,"BEAM_CENTER_X=%g;\nBEAM_CENTER_Y=%g;\n",Xbeam*1000.0,Ybeam*1000);
fprintf(outfile,"ADXV_CENTER_X=%g;\nADXV_CENTER_Y=%g;\n",Fbeam*1000.0,(detsize_s-Sbeam)*1000);
fprintf(outfile,"MOSFLM_CENTER_X=%g;\nMOSFLM_CENTER_Y=%g;\n",(Sbeam-0.5*pixel_size)*1000.0,(Fbeam-0.5*pixel_size)*1000);
fprintf(outfile,"DENZO_X_BEAM=%g;\nDENZO_Y_BEAM=%g;\n",(Sbeam+0.0*pixel_size)*1000.0,(Fbeam+0.0*pixel_size)*1000);
fprintf(outfile,"DIALS_ORIGIN=%g,%g,%g\n",dials_origin[1],dials_origin[2],dials_origin[3]);
fprintf(outfile,"XDS_ORGX=%g;\nXDS_ORGY=%g;\n",ORGX,ORGY);
fprintf(outfile,"CLOSE_DISTANCE=%g;\n",close_distance*1000.0);
fprintf(outfile,"PHI=%g;\nOSC_START=%g;\nOSC_RANGE=%g;\n",phi0*RTD,phi0*RTD,osc*RTD);
fprintf(outfile,"TWOTHETA=%g;\n",detector_twotheta*RTD);
fprintf(outfile,"DETECTOR_SN=000;\n");
fprintf(outfile,"BEAMLINE=fake;\n");
fprintf(outfile,"}\f");
while ( ftell(outfile) < 512 ){ fprintf(outfile," "); };
fwrite(intimage,sizeof(unsigned short int),pixels,outfile);
fclose(outfile);
return 0;
}
/* Fourier transform of a grating */
double sincg(double x,double N) {
if(x==0.0) return N;
return sin(x*N)/sin(x);
}
/* Fourier transform of a sphere */
double sinc3(double x) {
if(x==0.0) return 1.0;
return 3.0*(sin(x)/x-cos(x))/(x*x);
}
double sinc_conv_sinc3(double x) {
if(x==0.0) return 1.0;
return 3.0*(sin(x)-x*cos(x))/(x*x*x);
}
double *rotate(double *v, double *newv, double phix, double phiy, double phiz) {
double rxx,rxy,rxz,ryx,ryy,ryz,rzx,rzy,rzz;
double new_x,new_y,new_z,rotated_x,rotated_y,rotated_z;
new_x=v[1];
new_y=v[2];
new_z=v[3];
if(phix != 0){
/* rotate around x axis */
//rxx= 1; rxy= 0; rxz= 0;
ryx= 0; ryy= cos(phix); ryz=-sin(phix);
rzx= 0; rzy= sin(phix); rzz= cos(phix);
rotated_x = new_x;
rotated_y = new_y*ryy + new_z*ryz;
rotated_z = new_y*rzy + new_z*rzz;
new_x = rotated_x; new_y = rotated_y; new_z = rotated_z;
}
if(phiy != 0) {
/* rotate around y axis */
rxx= cos(phiy); rxy= 0; rxz= sin(phiy);
//ryx= 0; ryy= 1; ryz= 0;
rzx=-sin(phiy); rzy= 0; rzz= cos(phiy);
rotated_x = new_x*rxx + new_y*rxy + new_z*rxz;
rotated_y = new_y;
rotated_z = new_x*rzx + new_y*rzy + new_z*rzz;
new_x = rotated_x; new_y = rotated_y; new_z = rotated_z;
}
if(phiz != 0){
/* rotate around z axis */
rxx= cos(phiz); rxy=-sin(phiz); rxz= 0;
ryx= sin(phiz); ryy= cos(phiz); ryz= 0;
//rzx= 0; rzy= 0; rzz= 1;
rotated_x = new_x*rxx + new_y*rxy ;
rotated_y = new_x*ryx + new_y*ryy;
rotated_z = new_z;
new_x = rotated_x; new_y = rotated_y; new_z = rotated_z;
}
newv[1]=new_x;
newv[2]=new_y;
newv[3]=new_z;
return newv;
}
/* rotate a point about a unit vector axis */
double *rotate_axis(double *v, double *newv, double *axis, double phi) {
double sinphi = sin(phi);
double cosphi = cos(phi);
double dot = (axis[1]*v[1]+axis[2]*v[2]+axis[3]*v[3])*(1.0-cosphi);
double temp[4];
temp[1] = axis[1]*dot+v[1]*cosphi+(-axis[3]*v[2]+axis[2]*v[3])*sinphi;
temp[2] = axis[2]*dot+v[2]*cosphi+(+axis[3]*v[1]-axis[1]*v[3])*sinphi;
temp[3] = axis[3]*dot+v[3]*cosphi+(-axis[2]*v[1]+axis[1]*v[2])*sinphi;
newv[1]=temp[1]; newv[2]=temp[2]; newv[3]=temp[3];
return newv;
}
/* rotate a vector using a 9-element unitary matrix */
double *rotate_umat(double *v, double *newv, double umat[9]) {
double uxx,uxy,uxz,uyx,uyy,uyz,uzx,uzy,uzz;
/* for convenience, assign matrix x-y coordinate */
uxx = umat[0];
uxy = umat[1];
uxz = umat[2];
uyx = umat[3];
uyy = umat[4];
uyz = umat[5];
uzx = umat[6];
uzy = umat[7];
uzz = umat[8];
/* rotate the vector (x=1,y=2,z=3) */
newv[1] = uxx*v[1] + uxy*v[2] + uxz*v[3];
newv[2] = uyx*v[1] + uyy*v[2] + uyz*v[3];
newv[3] = uzx*v[1] + uzy*v[2] + uzz*v[3];
return newv;
}
/* returns a unit vector in a random direction in arguments dx,dy,dz */
/* also returns a random magnitude within the unit sphere as a return value */
float uniform3Ddev(float *dx, float *dy, float *dz, long *seed)
{
float ran1(long *idum);
float dr;
/* pick a random direction by cutting a sphere out of a cube */
dr = 0;
while(dr>1 || dr < 1e-2)
{
*dx = 2.1*(ran1(seed)-0.5);
*dy = 2.1*(ran1(seed)-0.5);
*dz = 2.1*(ran1(seed)-0.5);
dr = sqrt(*dx**dx+*dy**dy+*dz**dz);
}
/* turn this into a unit vector */
*dx/=dr;
*dy/=dr;
*dz/=dr;
/* dx,dy,dz should now be a random unit vector */
return dr;
}
/* returns a 9-element unitary matrix for a random isotropic rotation on a spherical cap of diameter "mosaicity" */
/* mosaic = 90 deg is a full sphere */
double *mosaic_rotation_umat(float mosaicity, double umat[9], long *seed)
{
float ran1(long *idum);
double r1,r2,r3,xyrad,rot;
double v1,v2,v3;
double t1,t2,t3,t6,t7,t8,t9,t11,t12,t15,t19,t20,t24;
double uxx,uxy,uxz,uyx,uyy,uyz,uzx,uzy,uzz;
/* make three random uniform deviates on [-1:1] */
r1= (double) 2.0*ran1(seed)-1.0;
r2= (double) 2.0*ran1(seed)-1.0;
r3= (double) 2.0*ran1(seed)-1.0;
xyrad = sqrt(1.0-r2*r2);
rot = mosaicity*powf((1.0-r3*r3),(1.0/3.0));
v1 = xyrad*sin(M_PI*r1);
v2 = xyrad*cos(M_PI*r1);
v3 = r2;
/* commence incomprehensible quaternion calculation */
t1 = cos(rot);
t2 = 1.0 - t1;
t3 = v1*v1;
t6 = t2*v1;
t7 = t6*v2;
t8 = sin(rot);
t9 = t8*v3;
t11 = t6*v3;
t12 = t8*v2;
t15 = v2*v2;
t19 = t2*v2*v3;
t20 = t8*v1;
t24 = v3*v3;
/* populate the unitary rotation matrix */
umat[0] = uxx = t1 + t2*t3;
umat[1] = uxy = t7 - t9;
umat[2] = uxz = t11 + t12;
umat[3] = uyx = t7 + t9;
umat[4] = uyy = t1 + t2*t15;
umat[5] = uyz = t19 - t20;
umat[6] = uzx = t11 - t12;
umat[7] = uzy = t19 + t20;
umat[8] = uzz = t1 + t2*t24;
/* return pointer to the provided array, in case that is useful */
return umat;
}
/* convert a unitary rotation matrix into misseting angles
rotx roty rotz are returned as missets[1] missets[2] missets[3] */
double *umat2misset(double umat[9],double *missets)
{
double uxx,uxy,uxz,uyx,uyy,uyz,uzx,uzy,uzz;
double m,mx,my,mz;
double xcy_x,xcy_y,xcy_z;
double ycz_x,ycz_y,ycz_z;
double zcx_x,zcx_y,zcx_z;
double rotx,roty,rotz;
uxx=umat[0];uxy=umat[1];uxz=umat[2];
uyx=umat[3];uyy=umat[4];uyz=umat[5];
uzx=umat[6];uzy=umat[7];uzz=umat[8];
/* or transpose? */
// uxx=umat[1];uyx=umat[2];uzx=umat[3];
// uxy=umat[4];uyy=umat[5];uzy=umat[6];
// uxz=umat[7];uyz=umat[8];uzz=umat[9];
/* make sure it is unitary */
mx = sqrt(uxx*uxx+uxy*uxy+uxz*uxz);
my = sqrt(uyx*uyx+uyy*uyy+uyz*uyz);
mz = sqrt(uzx*uzx+uzy*uzy+uzz*uzz);
if(mx>0){uxx/=mx;uxy/=mx;uxz/=mx;};
if(my>0){uyx/=my;uyy/=my;uyz/=my;};
if(mz>0){uzx/=mz;uzy/=mz;uzz/=mz;};
if(mx>=0 && my<=0 && mz<=0)
{
uyx=0;uyy=1;uyz=0;
uzx=0;uzy=0;uzz=1;
}
if(mx<=0 && my>=0 && mz<=0)
{
uxx=1;uxy=0;uxz=0;
uzx=0;uzy=0;uzz=1;
}
if(mx<=0 && my<=0 && mz>=0)
{
uxx=1;uxy=0;uxz=0;
uyx=0;uyy=1;uyz=0;
}
/* cross products to check normality */
xcy_x = uxy*uyz - uxz*uyy;
xcy_y = uxz*uyx - uxx*uyz;
xcy_z = uxx*uyy - uxy*uyx;
m=sqrt(xcy_x*xcy_x+xcy_y*xcy_y+xcy_z*xcy_z);
if(m>0){xcy_x/=m;xcy_y/=m;xcy_z/=m;};
ycz_x = uyy*uzz - uyz*uzy;
ycz_y = uyz*uzx - uyx*uzz;
ycz_z = uyx*uzy - uyy*uzx;
m=sqrt(ycz_x*ycz_x+ycz_y*ycz_y+ycz_z*ycz_z);
if(m>0){ycz_x/=m;ycz_y/=m;ycz_z/=m;};
zcx_x = uzy*uxz - uzz*uxy;
zcx_y = uzz*uxx - uzx*uxz;
zcx_z = uzx*uxy - uzy*uxx;
m=sqrt(zcx_x*zcx_x+zcx_y*zcx_y+zcx_z*zcx_z);
if(m>0){zcx_x/=m;zcx_y/=m;zcx_z/=m;};
/* substitute any empty vectors for cross-product of other two */
if(mx<=0){uxx=ycz_x;uxy=ycz_y;uxz=ycz_z;};
if(my<=0){uyx=zcx_x;uyy=zcx_y;uyz=zcx_z;};
if(mz<=0){uzx=xcy_x;uzy=xcy_y;uzz=xcy_z;};
/* cross products to check normality */
xcy_x = uxy*uyz - uxz*uyy;
xcy_y = uxz*uyx - uxx*uyz;
xcy_z = uxx*uyy - uxy*uyx;
m=sqrt(xcy_x*xcy_x+xcy_y*xcy_y+xcy_z*xcy_z);
if(m>0){xcy_x/=m;xcy_y/=m;xcy_z/=m;}
ycz_x = uyy*uzz - uyz*uzy;
ycz_y = uyz*uzx - uyx*uzz;
ycz_z = uyx*uzy - uyy*uzx;
m=sqrt(ycz_x*ycz_x+ycz_y*ycz_y+ycz_z*ycz_z);
if(m>0){ycz_x/=m;ycz_y/=m;ycz_z/=m;};
zcx_x = uzy*uxz - uzz*uxy;
zcx_y = uzz*uxx - uzx*uxz;
zcx_z = uzx*uxy - uzy*uxx;
m=sqrt(zcx_x*zcx_x+zcx_y*zcx_y+zcx_z*zcx_z);
if(m>0){zcx_x/=m;zcx_y/=m;zcx_z/=m;};
/* substitute any empty vectors for cross-product of other two */
if(mx<=0){uxx=ycz_x;uxy=ycz_y;uxz=ycz_z;};
if(my<=0){uyx=zcx_x;uyy=zcx_y;uyz=zcx_z;};
if(mz<=0){uzx=xcy_x;uzy=xcy_y;uzz=xcy_z;};
/* make sure it is unitary */
mx = sqrt(uxx*uxx+uxy*uxy+uxz*uxz);
my = sqrt(uyx*uyx+uyy*uyy+uyz*uyz);
mz = sqrt(uzx*uzx+uzy*uzy+uzz*uzz);
if(mx>0){uxx/=mx;uxy/=mx;uxz/=mx;};
if(my>0){uyx/=my;uyy/=my;uyz/=my;};
if(mz>0){uzx/=mz;uzy/=mz;uzz/=mz;};
/* see if its really orthonormal? */
if(uzx*uzx < 1.0)
{
rotx = atan2(uzy,uzz);
roty = atan2(-uzx,sqrt(uzy*uzy+uzz*uzz));
rotz = atan2(uyx,uxx);
}
else
{
rotx = atan2(1,1)*4;
roty = atan2(1,1)*2;
rotz = atan2(uxy,-uyy);
}
missets[1] = rotx;
missets[2] = roty;
missets[3] = rotz;
return missets;
}
float poidev(float xm, long *idum)
{
float gammln(float xx);
float ran1(long *idum);
/* oldm is a flag for whether xm has changed since last call */
static float sq,alxm,g,oldm=(-1.0);
float em,t,y;
/* routine below locks up for > 1e6 photons? */
if (xm > 1.0e6) {
return xm+sqrt(xm)*gaussdev(idum);
}
if (xm < 12.0) {
/* use direct method: simulate exponential delays between events */
if(xm != oldm) {
/* xm is new, compute the exponential */
oldm=xm;
g=exp(-xm);
}
/* adding exponential deviates is equivalent to multiplying uniform deviates */
/* final comparison is to the pre-computed exponential */
em = -1;
t = 1.0;
do {
++em;
t *= ran1(idum);
} while (t > g);
} else {
/* Use rejection method */
if(xm != oldm) {
/* xm has changed, pre-compute a few things... */
oldm=xm;
sq=sqrt(2.0*xm);
alxm=log(xm);
g=xm*alxm-gammln(xm+1.0);
}
do {
do {
/* y is a deviate from a lorentzian comparison function */
y=tan(M_PI*ran1(idum));
/* shift and scale */
em=sq*y+xm;
} while (em < 0.0); /* there are no negative Poisson deviates */
/* round off to nearest integer */
em=floor(em);
/* ratio of Poisson distribution to comparison function */
/* scale it back by 0.9 to make sure t is never > 1.0 */
t=0.9*(1.0+y*y)*exp(em*alxm-gammln(em+1.0)-g);
} while (ran1(idum) > t);
}
return em;
}
/* return gaussian deviate with rms=1 and FWHM = 2/sqrt(log(2)) */
float gaussdev(long *idum)
{
float ran1(long *idum);
static int iset=0;
static float gset;
float fac,rsq,v1,v2;
if (iset == 0) {
/* no extra deviats handy ... */
/* so pick two uniform deviates on [-1:1] */
do {
v1=2.0*ran1(idum)-1.0;
v2=2.0*ran1(idum)-1.0;
rsq=v1*v1+v2*v2;
} while (rsq >= 1.0 || rsq == 0);
/* restrained to the unit circle */
/* apply Box-Muller transformation to convert to a normal deviate */
fac=sqrt(-2.0*log(rsq)/rsq);
gset=v1*fac;
iset=1; /* we now have a spare deviate */
return v2*fac;
} else {
/* there is an extra deviate in gset */
iset=0;
return gset;
}
}
/* generate Lorentzian deviate with FWHM = 2 */
float lorentzdev(long *seed) {
float ran1(long *idum);
return tan(M_PI*(ran1(seed)-0.5));
}
/* return triangular deviate with FWHM = 1 */
float triangledev(long *seed) {
float ran1(long *idum);
float value;
value = ran1(seed);
if(value > 0.5){
value = sqrt(2*(value-0.5))-1;
}else{
value = 1-sqrt(2*value);
}
return value;
}
float expdev(long *idum)
{
float dum;
do
dum=ran1(idum);
while( dum == 0.0);
return -log(dum);
}
/* ln of the gamma function */
float gammln(float xx)
{
double x,y,tmp,ser;
static double cof[6]={76.18009172947146,-86.50532032941677,
24.01409824083091,-1.231739572450155,
0.1208650973866179e-2,-0.5395239384953e-5};
int j;
y=x=xx;
tmp=x+5.5;
tmp -= (x+0.5)*log(tmp);
ser = 1.000000000190015;
for(j=0;j<=5;++j) ser += cof[j]/++y;
return -tmp+log(2.5066282746310005*ser/x);
}
/* returns a uniform random deviate between 0 and 1 */
#define IA 16807
#define IM 2147483647
#define AM (1.0/IM)
#define IQ 127773
#define IR 2836
#define NTAB 32
#define NDIV (1+(IM-1)/NTAB)
#define EPS 1.2e-7
#define RNMX (1.0-EPS)
float ran1(long *idum)
{
int j;
long k;
static long iy=0;
static long iv[NTAB];
float temp;
if (*idum <= 0 || !iy) {
/* first time around. don't want idum=0 */
if(-(*idum) < 1) *idum=1;
else *idum = -(*idum);
/* load the shuffle table */
for(j=NTAB+7;j>=0;j--) {
k=(*idum)/IQ;
*idum=IA*(*idum-k*IQ)-IR*k;
if(*idum < 0) *idum += IM;
if(j < NTAB) iv[j] = *idum;
}
iy=iv[0];
}
/* always start here after initializing */
k=(*idum)/IQ;
*idum=IA*(*idum-k*IQ)-IR*k;
if (*idum < 0) *idum += IM;
j=iy/NDIV;
iy=iv[j];
iv[j] = *idum;
if((temp=AM*iy) > RNMX) return RNMX;
else return temp;
}
void polint(double *xa, double *ya, double x, double *y)
{
double x0,x1,x2,x3;
x0 = (x-xa[1])*(x-xa[2])*(x-xa[3])*ya[0]/((xa[0]-xa[1])*(xa[0]-xa[2])*(xa[0]-xa[3]));
x1 = (x-xa[0])*(x-xa[2])*(x-xa[3])*ya[1]/((xa[1]-xa[0])*(xa[1]-xa[2])*(xa[1]-xa[3]));
x2 = (x-xa[0])*(x-xa[1])*(x-xa[3])*ya[2]/((xa[2]-xa[0])*(xa[2]-xa[1])*(xa[2]-xa[3]));
x3 = (x-xa[0])*(x-xa[1])*(x-xa[2])*ya[3]/((xa[3]-xa[0])*(xa[3]-xa[1])*(xa[3]-xa[2]));
*y = x0+x1+x2+x3;
}
void polin2(double *x1a, double *x2a, double **ya, double x1, double x2, double *y)
{
void polint(double *xa, double *ya, double x, double *y);
int j;
double ymtmp[4];
for (j=1;j<=4;j++) {
polint(x2a,ya[j-1],x2,&ymtmp[j-1]);
}
polint(x1a,ymtmp,x1,y);
}
void polin3(double *x1a, double *x2a, double *x3a, double ***ya, double x1,
double x2, double x3, double *y)
{
void polint(double *xa, double ya[], double x, double *y);
void polin2(double *x1a, double *x2a, double **ya, double x1,double x2, double *y);
void polin1(double *x1a, double *ya, double x1, double *y);
int j;
double ymtmp[4];
for (j=1;j<=4;j++) {
polin2(x2a,x3a,&ya[j-1][0],x2,x3,&ymtmp[j-1]);
}
polint(x1a,ymtmp,x1,y);
}
/* FWHM = integral = 1 */
double ngauss2D(double x,double y)
{
return log(16.)/M_PI*exp(-log(16.)*(x*x+y*y));
}
double ngauss2Dinteg(double x,double y)
{
return 0.125*(erf(2.*x*sqrt(log(2.)))*erf(y*sqrt(log(16.)))*sqrt(log(16.)/log(2.)));
}
/* read in multi-column text file to list of double arrays */
/* provide address of undeclared arrays on command line */
size_t read_text_file(char *filename, size_t nargs, ... )
{
/* maximum of 10240-character lines? */
char text[10240];
char *token;
const char delimiters[] = " \t,;:!";
const char numberstuf[] = "0123456789-+.EGeg";
unsigned long line,lines;
unsigned long i,j;
double value;
double *data;
double **pointer;
va_list arglist;
FILE *infile = NULL;
infile = fopen(filename,"r");
if(infile == NULL) {
perror("fopen()");
return 0;
}
lines=0;
while ( fgets ( text, sizeof text, infile ) != NULL ) {
token = text;
token += strspn(token,delimiters);
if(strcmp(token,"\n")==0) {
//printf("blank\n");
continue;
}
++lines;
}
rewind(infile);
/* allocate memory for arrays */
va_start( arglist, nargs);
for(i=0;i<nargs;++i){
/* allocate the array */
data = (double*) malloc((lines+10)*sizeof(double));
/* initialize with missing number flags */
for(j=0;j<lines+10;++j) {
data[j] = NAN;
}
/* get argument (pointer to pointer) */
pointer = va_arg(arglist, double **);
/* change the value of what the arg points to */
*pointer = data;
/* now the pointer provided as an argument points to
something */
}
va_end(arglist);
line = 0;
while ( fgets ( text, sizeof text, infile ) != NULL ) { /* read a line */
token = text;
token += strspn(token,delimiters);
if(strcmp(token,"\n")==0) {
//printf("blank\n");
continue;
}
i=0;
va_start( arglist, nargs);
do
{
value=atof(token);
/* get argument */
pointer = va_arg(arglist, double **);
/* retrieve data array's address */
data = *pointer;
data[line] = value;
token += strspn(token,numberstuf);
if (strcmp(token,"\n")==0) continue;
token += strcspn(token,delimiters);
token += strspn(token,delimiters);
if (strcmp(token,"\n")==0) continue;
++i;
if(i>=nargs) {
break;
}
}
while (strcmp(token,"\n")!=0) ;
va_end(arglist);
// printf("initializing:");
// va_start( arglist, nargs);
// for(i=0;i<nargs;++i){
// pointer = va_arg(arglist, double **);
// data = *pointer;
// printf(" %g",data[line]);
// }
// va_end(arglist);
// printf("\n");
++line;
}
fclose(infile);
return lines;
}
/* measure magnitude of provided vector */
double magnitude(double *vector) {
/* measure the magnitude */
vector[0] = sqrt(vector[1]*vector[1]+vector[2]*vector[2]+vector[3]*vector[3]);
return vector[0];
}
/* make provided vector a unit vector */
double unitize(double *vector, double *new_unit_vector) {
double mag;
/* measure the magnitude */
mag = magnitude(vector);
if(mag != 0.0){
/* normalize it */
new_unit_vector[1]=vector[1]/mag;
new_unit_vector[2]=vector[2]/mag;
new_unit_vector[3]=vector[3]/mag;
}
else
{
/* can't normalize, report zero vector */
new_unit_vector[0] = 0.0;
new_unit_vector[1] = 0.0;
new_unit_vector[2] = 0.0;
new_unit_vector[3] = 0.0;
}
return mag;
}
/* scale magnitude of provided vector */
double vector_scale(double *vector, double *new_vector, double scale) {
new_vector[1] = scale*vector[1];
new_vector[2] = scale*vector[2];
new_vector[3] = scale*vector[3];
return magnitude(new_vector);
}
/* enforce magnitude of provided vector */
double vector_rescale(double *vector, double *new_vector, double new_magnitude) {
double oldmag;
oldmag = magnitude(vector);
if(oldmag <= 0.0) oldmag = 1.0;
new_vector[1] = new_magnitude/oldmag*vector[1];
new_vector[2] = new_magnitude/oldmag*vector[2];
new_vector[3] = new_magnitude/oldmag*vector[3];
return magnitude(new_vector);
}
/* difference between two given vectors */
double vector_diff(double *vector, double *origin_vector, double *new_vector) {
new_vector[1] = vector[1]-origin_vector[1];
new_vector[2] = vector[2]-origin_vector[2];
new_vector[3] = vector[3]-origin_vector[3];
return magnitude(new_vector);
}
/* vector cross product where vector magnitude is 0th element */
double *cross_product(double *x, double *y, double *z) {
z[1] = x[2]*y[3] - x[3]*y[2];
z[2] = x[3]*y[1] - x[1]*y[3];
z[3] = x[1]*y[2] - x[2]*y[1];
z[0] = 0.0;
return z;
}
/* vector inner product where vector magnitude is 0th element */
double dot_product(double *x, double *y) {
return x[1]*y[1]+x[2]*y[2]+x[3]*y[3];
}
/* polarization factor */
double polarization_factor(double kahn_factor, double *incident, double *diffracted, double *axis)
{
double cos2theta,cos2theta_sqr,sin2theta_sqr;
double psi=0;
double E_in[4];
double B_in[4];
double E_out[4];
double B_out[4];
unitize(incident,incident);
unitize(diffracted,diffracted);
unitize(axis,axis);
/* component of diffracted unit vector along incident beam unit vector */
cos2theta = dot_product(incident,diffracted);
cos2theta_sqr = cos2theta*cos2theta;
sin2theta_sqr = 1-cos2theta_sqr;
if(kahn_factor != 0.0){
/* tricky bit here is deciding which direciton the E-vector lies in for each source
here we assume it is closest to the "axis" defined above */
/* cross product to get "vertical" axis that is orthogonal to the cannonical "polarization" */
cross_product(axis,incident,B_in);
/* make it a unit vector */
unitize(B_in,B_in);
/* cross product with incident beam to get E-vector direction */
cross_product(incident,B_in,E_in);
/* make it a unit vector */
unitize(E_in,E_in);
/* get components of diffracted ray projected onto the E-B plane */
E_out[0] = dot_product(diffracted,E_in);
B_out[0] = dot_product(diffracted,B_in);
/* compute the angle of the diffracted ray projected onto the incident E-B plane */
psi = -atan2(B_out[0],E_out[0]);
}
/* correction for polarized incident beam */
return 0.5*(1.0 + cos2theta_sqr - kahn_factor*cos(2*psi)*sin2theta_sqr);
}
char *get_byte_order()
{
static char *byte_order;
typedef union
{
unsigned char string[2];
unsigned short integer;
} TWOBYTES;
TWOBYTES twobytes;
twobytes.integer = 24954;
/* determine byte order on this machine */
if(0==strncmp((const char *) twobytes.string, "az", 2))
{
byte_order = "big_endian";
}
else
{
byte_order = "little_endian";
}
return byte_order;
}
SMVinfo GetFrame(char *filename)
{
char *string;
SMVinfo frame;
char *byte_order = get_byte_order();
// unsigned short int tempint;
/* try to open the file... */
frame.handle = fopen(filename, "rb");
if(frame.handle != NULL)
{
/* just assume header will be 512 bytes?... */
frame.header = (char *) calloc(1024,sizeof(char));
if(! fread(frame.header, 512, 1, frame.handle))
{
perror("SMV file header");
exit(9);
}
string = frame.header + 512;
*string = (char) 0;
/* remember the file name */
frame.filename = (char *) calloc(strlen(filename)+10,sizeof(char));
strcpy(frame.filename,filename);
/* What kind of file is this? */
if(0!=strncmp(frame.header, "{\nHEADER_BYTES= 512;\nDIM=2;\nBYTE_ORDER=", 12))
{
/* probably not an ADSC frame */
/* inform the user */
printf("ERROR: %s does not look like an ADSC frame!\n", filename);
/* skip this file */
fclose(frame.handle);
frame.handle = NULL;
}
else
{
/* store the full header */
frame.header_size = (int) ValueOf("HEADER_BYTES",frame);
if(frame.header_size != 512)
{
free(frame.header);
fseek(frame.handle,0,SEEK_SET);
frame.header = (char *) calloc(2*frame.header_size,sizeof(char));
if(! fread(frame.header, frame.header_size, 1, frame.handle))
{
perror("SMV file fread");
exit(9);
}
string = frame.header + frame.header_size;
*string = (char) 0;
}
/* see if we will need to swap bytes */
string = (char *) strstr(frame.header, "BYTE_ORDER=")+11;
/* find last instance of keyword in the header */
while ((char *) strstr(string, "BYTE_ORDER=") != NULL)
{
string = (char *) strstr(string, "BYTE_ORDER=")+11;
}
if(0==strncmp(byte_order, string, 10))
{
frame.swap_bytes = FALSE;
}
else
{
frame.swap_bytes = TRUE;
}
/* store a couple of things */
frame.width = (int) ValueOf("SIZE1",frame);
frame.height = (int) ValueOf("SIZE2",frame);
if(frame.width == 0)
{
/* try other formats? */
frame.width = frame.height = (int) ValueOf("DETECTOR_DIMENSIONS",frame);
}
// frame.mmapdata = mmap(NULL,2*frame.width*frame.height+frame.header_size,PROT_READ,MAP_SHARED,fileno(frame.handle),0);
frame.mmapdata = (unsigned short int *) calloc(2,frame.width*frame.height+frame.header_size);
if(frame.mmapdata == NULL)
{
perror("calloc:");
exit(9);
}
fseek(frame.handle,0,SEEK_SET);
printf("reading %s\n",frame.filename);
if(! fread(frame.mmapdata,1,2*frame.width*frame.height+frame.header_size,frame.handle))
{
perror("SMV file fread");
exit(9);
}
printf("mmap(%s) = %p\n",frame.filename,frame.mmapdata);
}
}
else
{
/* fopen() failed */
perror(filename);
frame.header_size=0;
}
return frame;
}
/* read floating-point values from keywords in an SMV header */
double ValueOf(const char *keyword, SMVinfo frame)
{
double value;
char *string;
int keylen = strlen(keyword);
/* start at the beginning */
string = frame.header;
/* find first instance of keyword in the header */
// string = (char *) strstr(frame.header, keyword);
// string = string + keylen;
/* find last instance of keyword in the header */
while ((char *) strstr(string, keyword) != NULL)
{
string = (char *) strstr(string, keyword)+keylen;
}
if(string == frame.header) return NAN;
/* advance to just after the "=" sign */
string = (char *) strstr(string, "=");
if(string == NULL) return 0.0;
++string;
value = atof(string);
return value;
}
unsigned char *read_pgm5_bytes(char *filename,unsigned int *returned_width,unsigned int *returned_height)
{
unsigned char test[512];
unsigned char *array = NULL;
FILE *handle = NULL;
unsigned int width=0,height=0,maxvalue=0;
handle = fopen(filename,"rb");
if(handle)
{
if(! fread(test,512,1,handle))
{
perror("PGM fread header");
exit(9);
}
if(strstr((const char *) test,"P5"))
{
/* PGM header: "P5<whitespace>width<whitespace>height<whitespace>maxvalue<single whitespace character>" */
fseek(handle,3,SEEK_SET);
if(! fscanf(handle," %u %u %u",&width,&height,&maxvalue))
{
perror("PGM fscanf");
exit(9);
}
/* skip final single whitespsce character (first pixel could have value of "20") */
fseek(handle,1,SEEK_CUR);
array = (unsigned char *) calloc(sizeof(unsigned char),width*height);
if(! fread(array,width,height,handle))
{
perror("PGM fread");
exit(9);
}
}
fclose(handle);
}
else
{
perror("PGM fopen");
}
*returned_width = width;
*returned_height = height;
return array;
}
|
GB_binop__rdiv_fc64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rdiv_fc64)
// A.*B function (eWiseMult): GB (_AemultB_08__rdiv_fc64)
// A.*B function (eWiseMult): GB (_AemultB_02__rdiv_fc64)
// A.*B function (eWiseMult): GB (_AemultB_04__rdiv_fc64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_fc64)
// A*D function (colscale): GB (_AxD__rdiv_fc64)
// D*A function (rowscale): GB (_DxB__rdiv_fc64)
// C+=B function (dense accum): GB (_Cdense_accumB__rdiv_fc64)
// C+=b function (dense accum): GB (_Cdense_accumb__rdiv_fc64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_fc64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_fc64)
// C=scalar+B GB (_bind1st__rdiv_fc64)
// C=scalar+B' GB (_bind1st_tran__rdiv_fc64)
// C=A+scalar GB (_bind2nd__rdiv_fc64)
// C=A'+scalar GB (_bind2nd_tran__rdiv_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// A pattern? 0
// B type: GxB_FC64_t
// B pattern? 0
// BinaryOp: cij = GB_FC64_div (bij, aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_FC64_div (y, x) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_FC64 || GxB_NO_RDIV_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rdiv_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__rdiv_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rdiv_fc64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rdiv_fc64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rdiv_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rdiv_fc64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rdiv_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
GxB_FC64_t alpha_scalar ;
GxB_FC64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((GxB_FC64_t *) alpha_scalar_in)) ;
beta_scalar = (*((GxB_FC64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rdiv_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rdiv_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rdiv_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rdiv_fc64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rdiv_fc64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_FC64_div (bij, x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rdiv_fc64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_FC64_div (y, aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_div (aij, x) ; \
}
GrB_Info GB (_bind1st_tran__rdiv_fc64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_div (y, aij) ; \
}
GrB_Info GB (_bind2nd_tran__rdiv_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ErdosRenyiEnumerator.h | /*
* ErdosRenyiGenerator.h
*
* Created on: 07.08.2018
* Author: Manuel Penschuck (networkit@manuel.jetzt)
*/
#ifndef ERDOSRENYIENUMERATOR_H_
#define ERDOSRENYIENUMERATOR_H_
#include <omp.h>
#include <atomic>
#include <cassert>
#include <cmath>
#include <random>
#include "../Globals.h"
#include "../auxiliary/Random.h"
#include "../auxiliary/SignalHandling.h"
namespace NetworKit {
/**
* Generates a stream of edges of a G(n,p) graph. The edges are not
* written to memory, but emitted only via usual @a forEdges semantics
* to a callback.
*
* Use @ref ErdosRenyiGenerator as a wrapper to output a graph.
*
* The enumerator implements both floating point and fixed point arithmetic,
* to compute the edges which can be selected via the template parameter
* @a UseFixedPoint. It defaults to true, as this algorithm is typically
* 2 to 3 times faster. In theory, floating point arithmetic allows for
* longer runs of consecutive edges not selected. As those events virtually
* never occur, there are no measurable implications of using the faster
* variant.
*/
template <bool UseFixedPoint = true>
class ErdosRenyiEnumerator {
//! this type is used only internally for fixed-point arithmetic
using integral_t = unsigned long long;
public:
/**
* Generates a G(n, p) graph for n > 1 and 0 < p < 1.
*
* For an @b directed graph, the resulting edge stream is equivalent
* to throwing a coin for each node pair (u, v) and accepting it
* with probability @a prob; i.e. the stream may include self-loops.
* Hence the expected number of edges is n*n*prob.
*
* For an @b undirected graph, all node pairs (u, v) with 1 < v < u < n
* are considered. Hence the expected number of edges is n*(n-1)/2*prob
*
* @param n Number of nodes to generate
* @param prob Probability that an edge exists
* @param directed Selects an directed graph
*/
ErdosRenyiEnumerator(node n, double prob, bool directed) :
n{n},
prob{prob},
directed{directed}
{
assert(n > 0);
}
/**
* Generates an Erdos-Renyi-Graph as specified in the constructor on the fly.
* The stream is generated in parallel on as many core as an OpenMP parallel
* section is allotted to. For each edge the callback @a handle is invoked.
* Two signatures are supported for callback:
*
* (unsigned tid, node u, node v)
* (node u, node v)
*
* where tid is the current thread id as returned by omp_get_thread_num() and
* u and v are the edge's nodes. In case of an undirected graph u > v.
*
* It is guaranteed that no two threads emit edges for the same u.
*
* It can be expected that all threads emit a similar number of edges.
*
* Returns number of edges produced.
*/
template<typename Handle>
count forEdgesParallel(Handle handle) {
std::atomic<count> numEdges {0};
if (directed) {
#pragma omp parallel
{
const unsigned threads = omp_get_num_threads();
const unsigned tid = omp_get_thread_num();
const node chunk_size = (n + threads - 1) / threads;
const node first_node = std::min<node>(n, tid * chunk_size);
const node last_node = std::min<node>(n, (tid+1) * chunk_size);
const auto localNumEdges = enumerate<true>(handle, tid, prob, first_node, last_node);
numEdges.fetch_add(localNumEdges, std::memory_order_relaxed);
}
} else {
#pragma omp parallel
{
const unsigned threads = omp_get_num_threads();
const unsigned tid = omp_get_thread_num();
// cells in adj matrix per thread
const node chunk_size = (n * (n-1) / 2 + threads - 1) / threads;
node first_node;
node last_node = 0;
for(unsigned i = 0; i <= tid; i++) {
first_node = last_node;
node upper_node = std::ceil(std::sqrt(
0.25 + first_node * first_node + first_node + 2*chunk_size));
last_node = std::min<node>(n, upper_node);
}
if (tid + 1 == threads) last_node = n;
if (first_node < last_node) {
const auto localNumEdges = enumerate<false>(handle, tid, prob, first_node, last_node);
numEdges.fetch_add(localNumEdges, std::memory_order_relaxed);
}
}
}
return numEdges.load();
}
/**
* Similarly to @ref forEdgesParallel but computed on one thread only.
* If the callback accepts three arguments tid is always 0.
*/
template<typename Handle>
count forEdges(Handle handle) {
if (directed) {
return enumerate<true>(handle, 0, prob, 0, n);
} else {
return enumerate<false>(handle, 0, prob, 0, n);
}
}
/**
* Returns the expected number of edges to be generated.
*/
count expectedNumberOfEdges() const {
return prob * n * (directed ? n : (n-1) * 0.5);
}
private:
const node n; //< number of nodes
const double prob; //< probability p
const bool directed; //< true if a directed graph should be generated
// In the undirected case we only traverse the lower triangle (excluding the
// diagonal) of the adjacency matrix
template <bool Directed, typename Handle>
count enumerate(Handle handle, unsigned tid, double prob, const node node_begin, const node node_end) const {
if (prob > 0.9) {
// for p > 0.5 we invert the generator and draw the edges NOT in the graph.
// While this does not change the asymptotical work, it decrease the
// random bits drawn from prng.
node cur_u = node_begin;
node cur_v = 0;
count num_edges = 0;
if (!Directed && cur_u == 0)
cur_u = 1; // all edges need to be of form u > v!
auto complement_graph = [&] (unsigned tid, node u, node v) {
while(!(cur_u == u && cur_v == v)) {
callHandle(handle, tid, cur_u, cur_v);
num_edges++;
if (++cur_v == (Directed ? n : cur_u)) {
cur_v = 0;
cur_u++;
}
}
};
enumerate_<Directed>(
complement_graph, tid, 1.0 - prob, node_begin, node_end);
complement_graph(tid, node_end, 0);
return num_edges;
}
return enumerate_<Directed>(handle, tid, prob, node_begin, node_end);
}
template <bool Directed, typename Handle>
count enumerate_(Handle handle, unsigned tid, double prob, const node node_begin, const node node_end) const {
Aux::SignalHandler handler;
if (prob < std::pow(n, -3.0)) {
// Even with a union bound over all candidates, WHP no edge will be emited
return 0;
}
const double inv_log2_cp = 1.0 / std::log2(1.0 - prob);
// random source
auto& prng = Aux::Random::getURNG(); // this is thread local
auto distr = get_distribution<UseFixedPoint>();
count curr = node_begin;
if (!Directed && !curr) curr = 1;
node next = -1;
node max_skip = 0;
count numEdges = 0;
while (curr < node_end) {
handler.assureRunning();
// compute new step length
auto skip = skip_distance(distr(prng), inv_log2_cp);
next += skip;
if (skip > max_skip) max_skip = skip;
// check if at end of row (assuming an average degree of 1,
// its typically faster to repeatedly subtract than to
// divide; it is in any case easier easier ...)
while (next >= (Directed ? n : curr)) {
// adapt to next row
next = next - (Directed ? n : curr);
curr++;
}
// insert edge
if (curr < node_end) {
numEdges++;
callHandle(handle, tid, curr, next);
}
}
return numEdges;
}
// Optimized version of the computation of the skip distance as
// proposed Batagelj and Brandes. It basically converts a uniform
// variate to a geometric random variable.
count skip_distance(integral_t random_prob, double inv_log2_cp) const {
/*
* The original idea is to compute
* 1 + floor(log(1 - x) / log(1 - prob)) where x is
* a uniform real from (0, 1).
*
* We now precompute inv_log_cp := 1.0 / log(1 - prob) once
* to avoid recomputing the second log and to replace a
* division by a multiplication. Hence we compute
* 1 + floor(log(1 - x) * inv_log_cp).
*
* Observe that P[x = k] = P[x = 1-k] and hence we avoid the subtraction:
* 1 + floor(log(x) * inv_log_cp).
*
* Then, typically log() is slower than log2(). On my
* machines its a factor of roughly 1.5. Thus we replace
* log by log2 in both equations:
* inv_log2_cp := 1.0 / log2(1 - prob)
* 1 + floor(log2(x) * inv_log2_cp)
*
* Now we optimise the generation of the random number.
* uniform_real_distribution is roughly 3 times slower than
* uniform_int_distribution. Hence let's switch to fix-point arithmetic.
* Let X a real drawn uniformly from (0, 2**64), i.e. we model
* X = (2**64) * x:
*
* 1 + floor(log2(x) * inv_log2_cp)
* = 1 + floor(log2(X/2**64) * inv_log2_cp)
* = 1 + floor((log2(X) - 64) * inv_log2_cp).
*/
return 1 + static_cast<count>(
floor((log2(random_prob) - 8*sizeof(integral_t)) * inv_log2_cp)
);
}
count skip_distance(double random_prob, double inv_log2_cp) const {
return 1 + static_cast<count>(floor((log2(random_prob)) * inv_log2_cp));
}
// SFINAE to determine and construct the right uniform distribution
template <bool FixedPoint>
auto get_distribution() const -> typename std::enable_if<FixedPoint, std::uniform_int_distribution<integral_t>>::type {
return std::uniform_int_distribution<integral_t>{1, std::numeric_limits<integral_t>::max()};
}
template <bool FixedPoint>
auto get_distribution() const -> typename std::enable_if<!FixedPoint, std::uniform_real_distribution<double>>::type {
return std::uniform_real_distribution<double>{std::nextafter(0.0, 1.0), std::nextafter(1.0, 0.0)};
}
// SFINAE to allow using functors with and without thread id as parameter
template <typename Handle>
auto callHandle(Handle h, unsigned tid, node u, node v) const -> decltype(h(0u, node{0}, node{0})) {
return h(tid, u, v);
}
template <typename Handle>
auto callHandle(Handle h, unsigned /*tid*/, node u, node v) const -> decltype(h(node{0}, node{0})) {
return h(u, v);
}
};
using ErdosRenyiEnumeratorDefault = ErdosRenyiEnumerator<true>;
}
#endif // ERDOSRENYIENUMERATOR_H_ |
grad.c | #include <mpi.h>
extern int *cn_c;
extern int *ce_c;
extern int *ec_c;
extern int *cn_crem;
extern int *ce_crem;
extern int *ec_crem;
extern int *neighbor_map;
extern int *cedge_map;
extern int *ecell_map;
extern int *neighbor_maprem;
extern int *cedge_maprem;
extern int *ecell_maprem;
extern GVAL **neighbor_2Dbuf;
extern GVAL **neighbor_3Dbuf;
extern GVAL **cedge_2Dbuf;
extern GVAL **cedge_3Dbuf;
extern GVAL **ecell_2Dbuf;
extern GVAL **ecell_3Dbuf;
extern GVAL **neighbor_2Dbufrem;
extern GVAL **neighbor_3Dbufrem;
extern GVAL **cedge_2Dbufrem;
extern GVAL **cedge_3Dbufrem;
extern GVAL **ecell_2Dbufrem;
extern GVAL **ecell_3Dbufrem;
extern MPI_Request *mpi_send_requests;
extern MPI_Request *mpi_recv_requests;
extern int comm_tag;
#include "grid.h"
extern struct {
char *name;
int loc;
int dim;
union {
GVAL *restrict * restrict p2;
GVAL *restrict * restrict * restrict p3;
} data_pointer;
} *gv_temp;
extern struct {
char *name;
int loc;
int dim;
union {
GVAL *restrict * restrict p2;
GVAL *restrict * restrict * restrict p3;
} data_pointer;
} *gv_grad;
void grad(GRID * g)
{
{
{
comm_tag++;
for (int pn = 0; pn < g->mpi_world_size; pn++) {
if (pn != g->mpi_rank) {
for (int i = 0; i < (ec_crem[pn] - (pn ? ec_crem[pn - 1] : 0)); i++) {
for (int k = 0; k < g->height; k++)
ecell_3Dbufrem[pn][g->height * i + k] = gv_temp->data_pointer.p3[ecell_maprem[(pn ? ec_crem[pn - 1] * 2 : 0) + 2 * i]][k][ecell_maprem[(pn ? ec_crem[pn - 1] * 2 : 0) + 2 * i + 1]];
}
MPI_Isend(ecell_3Dbufrem[pn], (ec_crem[pn] - (pn ? ec_crem[pn - 1] : 0)) * g->height, MPI_FLOAT, pn, comm_tag, MPI_COMM_WORLD, &mpi_send_requests[pn]);
MPI_Irecv(ecell_3Dbuf[pn], (ec_c[pn] - (pn ? ec_c[pn - 1] : 0)) * g->height, MPI_FLOAT, pn, comm_tag, MPI_COMM_WORLD, &mpi_recv_requests[pn]);
}
}
MPI_Waitall(g->mpi_world_size * 2, mpi_send_requests, MPI_STATUSES_IGNORE);
for (int pn = 0; pn < g->mpi_world_size; pn++) {
if (pn != g->mpi_rank) {
for (int i = 0; i < (ec_c[pn] - (pn ? ec_c[pn - 1] : 0)); i++) {
for (int k = 0; k < g->height; k++)
gv_temp->data_pointer.p3[ecell_map[(pn ? ec_c[pn - 1] * 5 : 0) + 5 * i + 3]][k][ecell_map[(pn ? ec_c[pn - 1] * 5 : 0) + 5 * i + 4]] = ecell_3Dbuf[pn][g->height * i + k];
}
}
}
}
size_t min_block = g->mpi_rank == (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0;
size_t max_block = g->mpi_rank < (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size);
#pragma omp parallel for
for (size_t block_index = (min_block); block_index < (max_block); block_index++) {
for (size_t height_index = (0); height_index < (g->height); height_index++) {
for (size_t edge_index = (0); edge_index < (g->blkSize); edge_index++) {
gv_grad->data_pointer.p3[(block_index)][(height_index)][(edge_index)] = gv_temp->data_pointer.p3[(g->eCellBlk[(0)]->data_pointer.p2[(block_index)][(edge_index)])][(height_index)][(g->eCellIdx[(0)]->data_pointer.p2[(block_index)][(edge_index)])] - gv_temp->data_pointer.p3[(g->eCellBlk[(1)]->data_pointer.p2[(block_index)][(edge_index)])][(height_index)][(g->eCellIdx[(1)]->data_pointer.p2[(block_index)][(edge_index)])];
}
}
}
}
}
|
GB_binop__second_uint16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__second_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__second_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__second_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__second_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__second_uint16)
// A*D function (colscale): GB (_AxD__second_uint16)
// D*A function (rowscale): GB (_DxB__second_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__second_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__second_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__second_uint16)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: uint16_t
// A type: uint16_t
// A pattern? 1
// B type: uint16_t
// B pattern? 0
// BinaryOp: cij = bij
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// true if values of A are not used
#define GB_A_IS_PATTERN \
1 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = y ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
1
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SECOND || GxB_NO_UINT16 || GxB_NO_SECOND_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__second_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__second_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__second_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__second_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__second_uint16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__second_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint16_t alpha_scalar ;
uint16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__second_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__second_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__second_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__second_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = bij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = y ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = y ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
compare.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP AAA RRRR EEEEE %
% C O O MM MM P P A A R R E %
% C O O M M M PPPP AAAAA RRRR EEE %
% C O O M M P A A R R E %
% CCCC OOO M M P A A R R EEEEE %
% %
% %
% MagickCore Image Comparison Methods %
% %
% Software Design %
% Cristy %
% December 2003 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/compare.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p a r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompareImages() compares one or more pixel channels of an image to a
% reconstructed image and returns the difference image.
%
% The format of the CompareImages method is:
%
% Image *CompareImages(const Image *image,const Image *reconstruct_image,
% const MetricType metric,double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t GetImageChannels(const Image *image)
{
register ssize_t
i;
size_t
channels;
channels=0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) != 0)
channels++;
}
return(channels == 0 ? (size_t) 1 : channels);
}
MagickExport Image *CompareImages(Image *image,const Image *reconstruct_image,
const MetricType metric,double *distortion,ExceptionInfo *exception)
{
CacheView
*highlight_view,
*image_view,
*reconstruct_view;
const char
*artifact;
double
fuzz;
Image
*clone_image,
*difference_image,
*highlight_image;
MagickBooleanType
status;
PixelInfo
highlight,
lowlight,
masklight;
RectangleInfo
geometry;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageDistortion(image,reconstruct_image,metric,distortion,
exception);
if (status == MagickFalse)
return((Image *) NULL);
columns=MagickMax(image->columns,reconstruct_image->columns);
rows=MagickMax(image->rows,reconstruct_image->rows);
SetGeometry(image,&geometry);
geometry.width=columns;
geometry.height=rows;
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageMask(clone_image,ReadPixelMask,(Image *) NULL,exception);
difference_image=ExtentImage(clone_image,&geometry,exception);
clone_image=DestroyImage(clone_image);
if (difference_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageAlphaChannel(difference_image,OpaqueAlphaChannel,exception);
highlight_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (highlight_image == (Image *) NULL)
{
difference_image=DestroyImage(difference_image);
return((Image *) NULL);
}
status=SetImageStorageClass(highlight_image,DirectClass,exception);
if (status == MagickFalse)
{
difference_image=DestroyImage(difference_image);
highlight_image=DestroyImage(highlight_image);
return((Image *) NULL);
}
(void) SetImageMask(highlight_image,ReadPixelMask,(Image *) NULL,exception);
(void) SetImageAlphaChannel(highlight_image,OpaqueAlphaChannel,exception);
(void) QueryColorCompliance("#f1001ecc",AllCompliance,&highlight,exception);
artifact=GetImageArtifact(image,"compare:highlight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&highlight,exception);
(void) QueryColorCompliance("#ffffffcc",AllCompliance,&lowlight,exception);
artifact=GetImageArtifact(image,"compare:lowlight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&lowlight,exception);
(void) QueryColorCompliance("#888888cc",AllCompliance,&masklight,exception);
artifact=GetImageArtifact(image,"compare:masklight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&masklight,exception);
/*
Generate difference image.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
highlight_view=AcquireAuthenticCacheView(highlight_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,highlight_image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p,
*magick_restrict q;
register Quantum
*magick_restrict r;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
r=QueueCacheViewAuthenticPixels(highlight_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL) ||
(r == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
MagickStatusType
difference;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
SetPixelViaPixelInfo(highlight_image,&masklight,r);
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
r+=GetPixelChannels(highlight_image);
continue;
}
difference=MagickFalse;
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q);
else
distance=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q);
if ((distance*distance) > fuzz)
{
difference=MagickTrue;
break;
}
}
if (difference == MagickFalse)
SetPixelViaPixelInfo(highlight_image,&lowlight,r);
else
SetPixelViaPixelInfo(highlight_image,&highlight,r);
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
r+=GetPixelChannels(highlight_image);
}
sync=SyncCacheViewAuthenticPixels(highlight_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
highlight_view=DestroyCacheView(highlight_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
(void) CompositeImage(difference_image,highlight_image,image->compose,
MagickTrue,0,0,exception);
highlight_image=DestroyImage(highlight_image);
if (status == MagickFalse)
difference_image=DestroyImage(difference_image);
return(difference_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D i s t o r t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDistortion() compares one or more pixel channels of an image to a
% reconstructed image and returns the specified distortion metric.
%
% The format of the GetImageDistortion method is:
%
% MagickBooleanType GetImageDistortion(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
fuzz;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
/*
Compute the absolute difference in pixels between two images.
*/
status=MagickTrue;
fuzz=MagickMin(GetPixelChannels(image),GetPixelChannels(reconstruct_image))*
GetFuzzyColorDistance(image,reconstruct_image);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
j,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
distance,
Sa;
MagickBooleanType
difference;
register ssize_t
i;
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
difference=MagickFalse;
distance=0.0;
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
pixel=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q);
else
pixel=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q);
distance+=pixel*pixel;
if (distance > fuzz)
{
channel_distortion[i]++;
difference=MagickTrue;
}
}
if (difference != MagickFalse)
channel_distortion[CompositePixelChannel]++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetAbsoluteDistortion)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static MagickBooleanType GetFuzzDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
register ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image,
channel,q));
else
distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance*distance;
channel_distortion[CompositePixelChannel]+=distance*distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetFuzzDistortion)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]);
return(status);
}
static MagickBooleanType GetMeanAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
register ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*fabs((double) p[i]-
GetPixelChannel(reconstruct_image,channel,q));
else
distance=QuantumScale*fabs(Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q));
channel_distortion[i]+=distance;
channel_distortion[CompositePixelChannel]+=distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanAbsoluteError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
return(status);
}
static MagickBooleanType GetMeanErrorPerPixel(Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
double
area,
maximum_error,
mean_error;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
area=0.0;
maximum_error=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=fabs((double) p[i]-
GetPixelChannel(reconstruct_image,channel,q));
else
distance=fabs(Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q));
distortion[i]+=distance;
distortion[CompositePixelChannel]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=distortion[CompositePixelChannel]/area;
image->error.normalized_mean_error=QuantumScale*QuantumScale*mean_error/area;
image->error.normalized_maximum_error=QuantumScale*maximum_error;
return(status);
}
static MagickBooleanType GetMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
register ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image,
channel,q));
else
distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance*distance;
channel_distortion[CompositePixelChannel]+=distance*distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanSquaredError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=GetImageChannels(image);
return(status);
}
static MagickBooleanType GetNormalizedCrossCorrelationDistortion(
const Image *image,const Image *reconstruct_image,double *distortion,
ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*image_view,
*reconstruct_view;
ChannelStatistics
*image_statistics,
*reconstruct_statistics;
double
area;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
/*
Normalize to account for variation due to lighting and exposure condition.
*/
image_statistics=GetImageStatistics(image,exception);
reconstruct_statistics=GetImageStatistics(reconstruct_image,exception);
if ((image_statistics == (ChannelStatistics *) NULL) ||
(reconstruct_statistics == (ChannelStatistics *) NULL))
{
if (image_statistics != (ChannelStatistics *) NULL)
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
if (reconstruct_statistics != (ChannelStatistics *) NULL)
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
area=PerceptibleReciprocal(area);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
{
distortion[i]+=area*QuantumScale*(p[i]-
image_statistics[channel].mean)*(GetPixelChannel(
reconstruct_image,channel,q)-
reconstruct_statistics[channel].mean);
}
else
{
distortion[i]+=area*QuantumScale*(Sa*p[i]-
image_statistics[channel].mean)*(Da*GetPixelChannel(
reconstruct_image,channel,q)-
reconstruct_statistics[channel].mean);
}
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SimilarityImageTag,progress++,rows);
if (proceed == MagickFalse)
{
status=MagickFalse;
break;
}
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
/*
Divide by the standard deviation.
*/
distortion[CompositePixelChannel]=0.0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
gamma;
PixelChannel channel = GetPixelChannelChannel(image,i);
gamma=image_statistics[channel].standard_deviation*
reconstruct_statistics[channel].standard_deviation;
gamma=PerceptibleReciprocal(gamma);
distortion[i]=QuantumRange*gamma*distortion[i];
distortion[CompositePixelChannel]+=distortion[i]*distortion[i];
}
distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]/
GetImageChannels(image));
/*
Free resources.
*/
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
return(status);
}
static MagickBooleanType GetPeakAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
j,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*fabs((double) p[i]-
GetPixelChannel(reconstruct_image,channel,q));
else
distance=QuantumScale*fabs(Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q));
if (distance > channel_distortion[i])
channel_distortion[i]=distance;
if (distance > channel_distortion[CompositePixelChannel])
channel_distortion[CompositePixelChannel]=distance;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPeakAbsoluteError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
if (channel_distortion[j] > distortion[j])
distortion[j]=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
static MagickBooleanType GetPeakSignalToNoiseRatio(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
register ssize_t
i;
status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
if (fabs(distortion[i]) < MagickEpsilon)
distortion[i]=INFINITY;
else
distortion[i]=20.0*MagickLog10(1.0/sqrt(distortion[i]));
return(status);
}
static MagickBooleanType GetPerceptualHashDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
ChannelPerceptualHash
*channel_phash,
*reconstruct_phash;
const char
*artifact;
MagickBooleanType
normalize;
ssize_t
channel;
/*
Compute perceptual hash in the sRGB colorspace.
*/
channel_phash=GetImagePerceptualHash(image,exception);
if (channel_phash == (ChannelPerceptualHash *) NULL)
return(MagickFalse);
reconstruct_phash=GetImagePerceptualHash(reconstruct_image,exception);
if (reconstruct_phash == (ChannelPerceptualHash *) NULL)
{
channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
channel_phash);
return(MagickFalse);
}
artifact=GetImageArtifact(image,"phash:normalize");
normalize=(artifact == (const char *) NULL) ||
(IsStringTrue(artifact) == MagickFalse) ? MagickFalse : MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4)
#endif
for (channel=0; channel < MaxPixelChannels; channel++)
{
double
difference;
register ssize_t
i;
difference=0.0;
for (i=0; i < MaximumNumberOfImageMoments; i++)
{
double
alpha,
beta;
register ssize_t
j;
for (j=0; j < (ssize_t) channel_phash[0].number_colorspaces; j++)
{
alpha=channel_phash[channel].phash[j][i];
beta=reconstruct_phash[channel].phash[j][i];
if (normalize == MagickFalse)
difference+=(beta-alpha)*(beta-alpha);
else
difference=sqrt((beta-alpha)*(beta-alpha)/
channel_phash[0].number_channels);
}
}
distortion[channel]+=difference;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPerceptualHashDistortion)
#endif
distortion[CompositePixelChannel]+=difference;
}
/*
Free resources.
*/
reconstruct_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
reconstruct_phash);
channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(channel_phash);
return(MagickTrue);
}
static MagickBooleanType GetRootMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
register ssize_t
i;
status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=sqrt(distortion[i]);
return(status);
}
static MagickBooleanType GetStructuralSimilarityDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
#define SSIMRadius 5.0
#define SSIMSigma 1.5
#define SSIMBlocksize 8
#define SSIMK1 0.01
#define SSIMK2 0.03
#define SSIML 1.0
CacheView
*image_view,
*reconstruct_view;
char
geometry[MagickPathExtent];
const char
*artifact;
double
c1,
c2,
radius,
sigma;
KernelInfo
*kernel_info;
MagickBooleanType
status;
register ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
/*
Compute structural similarity index @
https://en.wikipedia.org/wiki/Structural_similarity.
*/
radius=SSIMRadius;
artifact=GetImageArtifact(image,"compare:ssim-radius");
if (artifact != (const char *) NULL)
radius=StringToDouble(artifact,(char **) NULL);
sigma=SSIMSigma;
artifact=GetImageArtifact(image,"compare:ssim-sigma");
if (artifact != (const char *) NULL)
sigma=StringToDouble(artifact,(char **) NULL);
(void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g",
radius,sigma);
kernel_info=AcquireKernelInfo(geometry,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
c1=pow(SSIMK1*SSIML,2.0);
artifact=GetImageArtifact(image,"compare:ssim-k1");
if (artifact != (const char *) NULL)
c1=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0);
c2=pow(SSIMK2*SSIML,2.0);
artifact=GetImageArtifact(image,"compare:ssim-k2");
if (artifact != (const char *) NULL)
c2=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0);
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,reconstruct_image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) kernel_info->width/2L),y-
((ssize_t) kernel_info->height/2L),columns+kernel_info->width,
kernel_info->height,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,-((ssize_t) kernel_info->width/
2L),y-((ssize_t) kernel_info->height/2L),columns+kernel_info->width,
kernel_info->height,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
x_pixel_mu[MaxPixelChannels+1],
x_pixel_sigma_squared[MaxPixelChannels+1],
xy_sigma[MaxPixelChannels+1],
y_pixel_mu[MaxPixelChannels+1],
y_pixel_sigma_squared[MaxPixelChannels+1];
register const Quantum
*magick_restrict reference,
*magick_restrict target;
register double
*k;
ssize_t
v;
(void) ResetMagickMemory(x_pixel_mu,0,sizeof(x_pixel_mu));
(void) ResetMagickMemory(x_pixel_sigma_squared,0,
sizeof(x_pixel_sigma_squared));
(void) ResetMagickMemory(xy_sigma,0,sizeof(xy_sigma));
(void) ResetMagickMemory(x_pixel_sigma_squared,0,
sizeof(y_pixel_sigma_squared));
(void) ResetMagickMemory(y_pixel_mu,0,sizeof(y_pixel_mu));
(void) ResetMagickMemory(y_pixel_sigma_squared,0,
sizeof(y_pixel_sigma_squared));
k=kernel_info->values;
reference=p;
target=q;
for (v=0; v < (ssize_t) kernel_info->height; v++)
{
register ssize_t
u;
for (u=0; u < (ssize_t) kernel_info->width; u++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
x_pixel,
y_pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(
reconstruct_image,channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
x_pixel=QuantumScale*reference[i];
x_pixel_mu[i]+=(*k)*x_pixel;
x_pixel_sigma_squared[i]+=(*k)*x_pixel*x_pixel;
y_pixel=QuantumScale*
GetPixelChannel(reconstruct_image,channel,target);
y_pixel_mu[i]+=(*k)*y_pixel;
y_pixel_sigma_squared[i]+=(*k)*y_pixel*y_pixel;
xy_sigma[i]+=(*k)*x_pixel*y_pixel;
}
k++;
reference+=GetPixelChannels(image);
target+=GetPixelChannels(reconstruct_image);
}
reference+=GetPixelChannels(image)*columns;
target+=GetPixelChannels(reconstruct_image)*columns;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
ssim,
x_pixel_mu_squared,
x_pixel_sigmas_squared,
xy_mu,
xy_sigmas,
y_pixel_mu_squared,
y_pixel_sigmas_squared;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(
reconstruct_image,channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
x_pixel_mu_squared=x_pixel_mu[i]*x_pixel_mu[i];
y_pixel_mu_squared=y_pixel_mu[i]*y_pixel_mu[i];
xy_mu=x_pixel_mu[i]*y_pixel_mu[i];
xy_sigmas=xy_sigma[i]-xy_mu;
x_pixel_sigmas_squared=x_pixel_sigma_squared[i]-x_pixel_mu_squared;
y_pixel_sigmas_squared=y_pixel_sigma_squared[i]-y_pixel_mu_squared;
ssim=((2.0*xy_mu+c1)*(2.0*xy_sigmas+c2))/
((x_pixel_mu_squared+y_pixel_mu_squared+c1)*
(x_pixel_sigmas_squared+y_pixel_sigmas_squared+c2));
channel_distortion[i]+=ssim;
channel_distortion[CompositePixelChannel]+=ssim;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetStructuralSimilarityDistortion)
#endif
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]+=channel_distortion[i];
}
image_view=DestroyCacheView(image_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0))
continue;
distortion[i]/=((double) columns*rows);
}
distortion[CompositePixelChannel]/=((double) columns*rows);
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
kernel_info=DestroyKernelInfo(kernel_info);
return(status);
}
static MagickBooleanType GetStructuralDisimilarityDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
register ssize_t
i;
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=(1.0-(distortion[i]))/2.0;
return(status);
}
MagickExport MagickBooleanType GetImageDistortion(Image *image,
const Image *reconstruct_image,const MetricType metric,double *distortion,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Get image distortion.
*/
length=MaxPixelChannels+1;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(channel_distortion,0,length*
sizeof(*channel_distortion));
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelErrorMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioErrorMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetPerceptualHashDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralSimilarityErrorMetric:
{
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralDissimilarityErrorMetric:
{
status=GetStructuralDisimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
}
*distortion=channel_distortion[CompositePixelChannel];
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
(void) FormatImageProperty(image,"distortion","%.*g",GetMagickPrecision(),
*distortion);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D i s t o r t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDistortions() compares the pixel channels of an image to a
% reconstructed image and returns the specified distortion metric for each
% channel.
%
% The format of the GetImageDistortions method is:
%
% double *GetImageDistortions(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport double *GetImageDistortions(Image *image,
const Image *reconstruct_image,const MetricType metric,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Get image distortion.
*/
length=MaxPixelChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(channel_distortion,0,length*
sizeof(*channel_distortion));
status=MagickTrue;
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelErrorMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioErrorMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralSimilarityErrorMetric:
{
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralDissimilarityErrorMetric:
{
status=GetStructuralDisimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
}
if (status == MagickFalse)
{
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
return((double *) NULL);
}
return(channel_distortion);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e s E q u a l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImagesEqual() compare the pixels of two images and returns immediately
% if any pixel is not identical.
%
% The format of the IsImagesEqual method is:
%
% MagickBooleanType IsImagesEqual(const Image *image,
% const Image *reconstruct_image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsImagesEqual(const Image *image,
const Image *reconstruct_image,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs(p[i]-(double) GetPixelChannel(reconstruct_image,
channel,q));
if (distance >= MagickEpsilon)
break;
}
if (i < (ssize_t) GetPixelChannels(image))
break;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
if (x < (ssize_t) columns)
break;
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r M e t r i c %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColorMetric() measures the difference between colors at each pixel
% location of two images. A value other than 0 means the colors match
% exactly. Otherwise an error measure is computed by summing over all
% pixels in an image the distance squared in RGB space between each image
% pixel and its corresponding pixel in the reconstruct image. The error
% measure is assigned to these image members:
%
% o mean_error_per_pixel: The mean error for any single pixel in
% the image.
%
% o normalized_mean_error: The normalized mean quantization error for
% any single pixel in the image. This distance measure is normalized to
% a range between 0 and 1. It is independent of the range of red, green,
% and blue values in the image.
%
% o normalized_maximum_error: The normalized maximum quantization
% error for any single pixel in the image. This distance measure is
% normalized to a range between 0 and 1. It is independent of the range
% of red, green, and blue values in your image.
%
% A small normalized mean square error, accessed as
% image->normalized_mean_error, suggests the images are very similar in
% spatial layout and color.
%
% The format of the SetImageColorMetric method is:
%
% MagickBooleanType SetImageColorMetric(Image *image,
% const Image *reconstruct_image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageColorMetric(Image *image,
const Image *reconstruct_image,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area,
maximum_error,
mean_error,
mean_error_per_pixel;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
area=0.0;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs(p[i]-(double) GetPixelChannel(reconstruct_image,
channel,q));
if (distance >= MagickEpsilon)
{
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
}
area++;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=(double) (mean_error_per_pixel/area);
image->error.normalized_mean_error=(double) (QuantumScale*QuantumScale*
mean_error/area);
image->error.normalized_maximum_error=(double) (QuantumScale*maximum_error);
status=image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i m i l a r i t y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SimilarityImage() compares the reference image of the image and returns the
% best match offset. In addition, it returns a similarity image such that an
% exact match location is completely white and if none of the pixels match,
% black, otherwise some gray level in-between.
%
% The format of the SimilarityImageImage method is:
%
% Image *SimilarityImage(const Image *image,const Image *reference,
% const MetricType metric,const double similarity_threshold,
% RectangleInfo *offset,double *similarity,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reference: find an area of the image that closely resembles this image.
%
% o metric: the metric.
%
% o similarity_threshold: minimum distortion for (sub)image match.
%
% o offset: the best match offset of the reference image within the image.
%
% o similarity: the computed similarity between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double GetSimilarityMetric(const Image *image,const Image *reference,
const MetricType metric,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
double
distortion;
Image
*similarity_image;
MagickBooleanType
status;
RectangleInfo
geometry;
SetGeometry(reference,&geometry);
geometry.x=x_offset;
geometry.y=y_offset;
similarity_image=CropImage(image,&geometry,exception);
if (similarity_image == (Image *) NULL)
return(0.0);
distortion=0.0;
status=GetImageDistortion(similarity_image,reference,metric,&distortion,
exception);
similarity_image=DestroyImage(similarity_image);
if (status == MagickFalse)
return(0.0);
return(distortion);
}
MagickExport Image *SimilarityImage(const Image *image,const Image *reference,
const MetricType metric,const double similarity_threshold,
RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*similarity_view;
Image
*similarity_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(offset != (RectangleInfo *) NULL);
SetGeometry(reference,offset);
*similarity_metric=MagickMaximumValue;
similarity_image=CloneImage(image,image->columns-reference->columns+1,
image->rows-reference->rows+1,MagickTrue,exception);
if (similarity_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(similarity_image,DirectClass,exception);
if (status == MagickFalse)
{
similarity_image=DestroyImage(similarity_image);
return((Image *) NULL);
}
(void) SetImageAlphaChannel(similarity_image,DeactivateAlphaChannel,
exception);
/*
Measure similarity of reference image against image.
*/
status=MagickTrue;
progress=0;
similarity_view=AcquireAuthenticCacheView(similarity_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
shared(progress,status,similarity_metric) \
magick_number_threads(image,image,image->rows-reference->rows+1,1)
#endif
for (y=0; y < (ssize_t) (image->rows-reference->rows+1); y++)
{
double
similarity;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
continue;
q=GetCacheViewAuthenticPixels(similarity_view,0,y,similarity_image->columns,
1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->columns-reference->columns+1); x++)
{
register ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
break;
similarity=GetSimilarityMetric(image,reference,metric,x,y,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SimilarityImage)
#endif
if ((metric == NormalizedCrossCorrelationErrorMetric) ||
(metric == UndefinedErrorMetric))
similarity=1.0-similarity;
if (similarity < *similarity_metric)
{
offset->x=x;
offset->y=y;
*similarity_metric=similarity;
}
if (metric == PerceptualHashErrorMetric)
similarity=MagickMin(0.01*similarity,1.0);
if (GetPixelWriteMask(similarity_image,q) <= (QuantumRange/2))
{
SetPixelBackgoundColor(similarity_image,q);
q+=GetPixelChannels(similarity_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait similarity_traits=GetPixelChannelTraits(similarity_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(similarity_traits == UndefinedPixelTrait) ||
((similarity_traits & UpdatePixelTrait) == 0))
continue;
SetPixelChannel(similarity_image,channel,ClampToQuantum(QuantumRange-
QuantumRange*similarity),q);
}
q+=GetPixelChannels(similarity_image);
}
if (SyncCacheViewAuthenticPixels(similarity_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SimilarityImage)
#endif
proceed=SetImageProgress(image,SimilarityImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
similarity_view=DestroyCacheView(similarity_view);
if (status == MagickFalse)
similarity_image=DestroyImage(similarity_image);
return(similarity_image);
}
|
versaoC.c | // Fernanda Lyra Alves
// Ivan Dos Santos Muniz
// Programação Concorrente e Distribuída - 2020.2
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
static const int cel_morta = 0;
static const int cel_viva = 1;
static const unsigned int num_geracoes = 2000;
static const unsigned int srand_value = 1985;
static const unsigned int n_threads = 8;
int tabuleiro_n = 2048;
int coord_lim(int coord);
void copia_tabuleiro(int **origem, int **destino);
int vivos(int **tab);
int vizinhos(int **tab, int l, int c);
int decide_vida(int **tab, int **tab_novo, int l, int c);
int main()
{
struct timeval inicio_exe;
gettimeofday(&inicio_exe, NULL);
// Declaração e inicialização da memória dos tabuleiros
int **tabuleiro = NULL;
int **tabuleiro_novo = NULL;
tabuleiro = calloc(tabuleiro_n, sizeof(int *));
for (int col = 0; col < tabuleiro_n; col++)
tabuleiro[col] = calloc(tabuleiro_n, sizeof(int));
tabuleiro_novo = calloc(tabuleiro_n, sizeof(int *));
for (int col = 0; col < tabuleiro_n; col++)
tabuleiro_novo[col] = calloc(tabuleiro_n, sizeof(int));
// Geração do tabuleiro inicial e cópia para a nova geração
srand(srand_value);
for (int i = 0; i < tabuleiro_n; i++)
for (int j = 0; j < tabuleiro_n; j++)
tabuleiro[i][j] = rand() % 2;
printf("Condição inicial: %d\n", vivos(tabuleiro));
struct timeval inicio_ger;
gettimeofday(&inicio_ger, NULL);
// Execução das gerações
for (unsigned int g = 0; g < num_geracoes; g++)
{
int l = 0;
int c = 0;
#pragma omp parallel private(l, c) shared(tabuleiro, tabuleiro_novo) num_threads(n_threads)
{
#pragma omp for
for (l = 0; l < tabuleiro_n; l++)
for (c = 0; c < tabuleiro_n; c++)
decide_vida(tabuleiro, tabuleiro_novo, l, c);
}
copia_tabuleiro(tabuleiro_novo, tabuleiro);
printf("Geração %u: %d\n", g + 1, vivos(tabuleiro));
}
// Liberação da memória dos tabuleiros
for (int col = 0; col < tabuleiro_n; col++)
free(tabuleiro[col]);
free(tabuleiro);
for (int col = 0; col < tabuleiro_n; col++)
free(tabuleiro_novo[col]);
free(tabuleiro_novo);
struct timeval fim;
gettimeofday(&fim, NULL);
printf("Tempo de execução total: %lf\nTempo de execução das gerações: %lf\n",
(double)(fim.tv_usec - inicio_exe.tv_usec)/1000000 + (double)(fim.tv_sec - inicio_exe.tv_sec),
(double)(fim.tv_usec - inicio_ger.tv_usec)/1000000 + (double)(fim.tv_sec - inicio_ger.tv_sec));
return 0;
}
int coord_lim(int coord)
{
int r;
if (coord >= 0)
r = coord % tabuleiro_n;
else
r = tabuleiro_n + coord;
return r;
}
void copia_tabuleiro(int **origem, int **destino)
{
int l = 0;
#pragma omp parallel shared(origem, destino) private(l) num_threads(n_threads)
{
#pragma omp for
for (l = 0; l < tabuleiro_n; l++)
memcpy(destino[l], origem[l], sizeof(int) * tabuleiro_n);
}
}
int vivos(int **tab)
{
int n_vivos = 0;
int l = 0;
int c = 0;
int thread_atual = -1;
#pragma omp parallel shared(n_vivos, thread_atual) private (l, c) num_threads(n_threads)
{
int n_vivos_local = 0;
#pragma omp for
for (l = 0; l < tabuleiro_n; l++)
for (c = 0; c < tabuleiro_n; c++)
{
while (thread_atual != omp_get_thread_num())
{
if (thread_atual == -1)
thread_atual = omp_get_thread_num();
}
n_vivos_local += tab[l][c];
thread_atual = -1;
}
n_vivos += n_vivos_local;
}
return n_vivos;
}
int vizinhos(int **tab, int l, int c)
{
int vizinhos_linhaacima = tab[coord_lim(l - 1)][coord_lim(c - 1)] + tab[coord_lim(l - 1)][coord_lim(c)] + tab[coord_lim(l - 1)][coord_lim(c + 1)];
int vizinhos_linhaatual = tab[coord_lim(l)][coord_lim(c - 1)] + tab[coord_lim(l)][coord_lim(c + 1)];
int vizinhos_linhaabaixo = tab[coord_lim(l + 1)][coord_lim(c - 1)] + tab[coord_lim(l + 1)][coord_lim(c)] + tab[coord_lim(l + 1)][coord_lim(c + 1)];
return vizinhos_linhaabaixo + vizinhos_linhaatual + vizinhos_linhaacima;
}
int decide_vida(int **tab, int **tab_novo, int l, int c)
{
int vizinhos_celula = vizinhos(tab, l, c);
if (tab[l][c] == cel_viva && (vizinhos_celula < 2 || vizinhos_celula >= 4))
tab_novo[l][c] = cel_morta;
else if (tab[l][c] == cel_morta && vizinhos_celula == 3)
tab_novo[l][c] = cel_viva;
else
tab_novo[l][c] = tab[l][c];
}
|
requantize_leakyrelu_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void requantize_leakyrelu_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& scale_in_data, const Mat& scale_out_data, const Mat& bias_data, float slope, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int size = w * h;
int outc = top_blob.c;
int out_elempack = top_blob.elempack;
int scale_in_data_size = scale_in_data.w;
int scale_out_data_size = scale_out_data.w;
int bias_data_size = bias_data.w;
// int8(leakyrelu(v * scale_in, slope) * scale_out)
// int8_leakyrelu(v * (scale_in * scale_out), slope)
// int8(leakyrelu(v * scale_in + bias, slope) * scale_out)
// int8_leakyrelu(v * (scale_in * scale_out) + (bias * scale_out), slope)
if (out_elempack == 8)
{
if (bias_data_size == 0)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < outc; q++)
{
const int* intptr0 = bottom_blob.channel(q * 2);
const int* intptr1 = bottom_blob.channel(q * 2 + 1);
signed char* ptr = top_blob.channel(q);
float32x4_t _scale_in0 = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 8);
float32x4_t _scale_in1 = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 8 + 4);
float32x4_t _scale_out0 = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 8);
float32x4_t _scale_out1 = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 8 + 4);
float32x4_t _scale0 = vmulq_f32(_scale_in0, _scale_out0);
float32x4_t _scale1 = vmulq_f32(_scale_in1, _scale_out1);
float32x4_t _slope = vdupq_n_f32(slope);
int i = 0;
#if __aarch64__
for (; i + 3 < size; i += 4)
{
float32x4_t _v00 = vcvtq_f32_s32(vld1q_s32(intptr0));
float32x4_t _v01 = vcvtq_f32_s32(vld1q_s32(intptr0 + 4));
float32x4_t _v02 = vcvtq_f32_s32(vld1q_s32(intptr0 + 8));
float32x4_t _v03 = vcvtq_f32_s32(vld1q_s32(intptr0 + 12));
float32x4_t _v10 = vcvtq_f32_s32(vld1q_s32(intptr1));
float32x4_t _v11 = vcvtq_f32_s32(vld1q_s32(intptr1 + 4));
float32x4_t _v12 = vcvtq_f32_s32(vld1q_s32(intptr1 + 8));
float32x4_t _v13 = vcvtq_f32_s32(vld1q_s32(intptr1 + 12));
_v00 = vmulq_f32(_v00, _scale0);
_v01 = vmulq_f32(_v01, _scale0);
_v02 = vmulq_f32(_v02, _scale0);
_v03 = vmulq_f32(_v03, _scale0);
_v10 = vmulq_f32(_v10, _scale1);
_v11 = vmulq_f32(_v11, _scale1);
_v12 = vmulq_f32(_v12, _scale1);
_v13 = vmulq_f32(_v13, _scale1);
vst1_s8(ptr, float2int8leakyrelu(_v00, _v10, _slope));
vst1_s8(ptr + 8, float2int8leakyrelu(_v01, _v11, _slope));
vst1_s8(ptr + 16, float2int8leakyrelu(_v02, _v12, _slope));
vst1_s8(ptr + 24, float2int8leakyrelu(_v03, _v13, _slope));
intptr0 += 16;
intptr1 += 16;
ptr += 32;
}
#endif // __aarch64__
for (; i < size; i++)
{
float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr0));
float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr1));
_v0 = vmulq_f32(_v0, _scale0);
_v1 = vmulq_f32(_v1, _scale1);
vst1_s8(ptr, float2int8leakyrelu(_v0, _v1, _slope));
intptr0 += 4;
intptr1 += 4;
ptr += 8;
}
}
}
else
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < outc; q++)
{
const int* intptr0 = bottom_blob.channel(q * 2);
const int* intptr1 = bottom_blob.channel(q * 2 + 1);
signed char* ptr = top_blob.channel(q);
float32x4_t _scale_in0 = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 8);
float32x4_t _scale_in1 = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 8 + 4);
float32x4_t _scale_out0 = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 8);
float32x4_t _scale_out1 = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 8 + 4);
float32x4_t _bias0 = bias_data_size == 1 ? vdupq_n_f32(bias_data[0]) : vld1q_f32((const float*)bias_data + q * 8);
float32x4_t _bias1 = bias_data_size == 1 ? vdupq_n_f32(bias_data[0]) : vld1q_f32((const float*)bias_data + q * 8 + 4);
float32x4_t _scale0 = vmulq_f32(_scale_in0, _scale_out0);
float32x4_t _scale1 = vmulq_f32(_scale_in1, _scale_out1);
_bias0 = vmulq_f32(_bias0, _scale_out0);
_bias1 = vmulq_f32(_bias1, _scale_out1);
float32x4_t _slope = vdupq_n_f32(slope);
int i = 0;
#if __aarch64__
for (; i + 3 < size; i += 4)
{
float32x4_t _v00 = vcvtq_f32_s32(vld1q_s32(intptr0));
float32x4_t _v01 = vcvtq_f32_s32(vld1q_s32(intptr0 + 4));
float32x4_t _v02 = vcvtq_f32_s32(vld1q_s32(intptr0 + 8));
float32x4_t _v03 = vcvtq_f32_s32(vld1q_s32(intptr0 + 12));
float32x4_t _v10 = vcvtq_f32_s32(vld1q_s32(intptr1));
float32x4_t _v11 = vcvtq_f32_s32(vld1q_s32(intptr1 + 4));
float32x4_t _v12 = vcvtq_f32_s32(vld1q_s32(intptr1 + 8));
float32x4_t _v13 = vcvtq_f32_s32(vld1q_s32(intptr1 + 12));
_v00 = vfmaq_f32(_bias0, _v00, _scale0);
_v01 = vfmaq_f32(_bias0, _v01, _scale0);
_v02 = vfmaq_f32(_bias0, _v02, _scale0);
_v03 = vfmaq_f32(_bias0, _v03, _scale0);
_v10 = vfmaq_f32(_bias1, _v10, _scale1);
_v11 = vfmaq_f32(_bias1, _v11, _scale1);
_v12 = vfmaq_f32(_bias1, _v12, _scale1);
_v13 = vfmaq_f32(_bias1, _v13, _scale1);
vst1_s8(ptr, float2int8leakyrelu(_v00, _v10, _slope));
vst1_s8(ptr + 8, float2int8leakyrelu(_v01, _v11, _slope));
vst1_s8(ptr + 16, float2int8leakyrelu(_v02, _v12, _slope));
vst1_s8(ptr + 24, float2int8leakyrelu(_v03, _v13, _slope));
intptr0 += 16;
intptr1 += 16;
ptr += 32;
}
#endif // __aarch64__
for (; i + 1 < size; i += 2)
{
float32x4_t _v00 = vcvtq_f32_s32(vld1q_s32(intptr0));
float32x4_t _v01 = vcvtq_f32_s32(vld1q_s32(intptr0 + 4));
float32x4_t _v10 = vcvtq_f32_s32(vld1q_s32(intptr1));
float32x4_t _v11 = vcvtq_f32_s32(vld1q_s32(intptr1 + 4));
#if __aarch64__
_v00 = vfmaq_f32(_bias0, _v00, _scale0);
_v01 = vfmaq_f32(_bias0, _v01, _scale0);
_v10 = vfmaq_f32(_bias1, _v10, _scale1);
_v11 = vfmaq_f32(_bias1, _v11, _scale1);
#else // __aarch64__
_v00 = vmlaq_f32(_bias0, _v00, _scale0);
_v01 = vmlaq_f32(_bias0, _v01, _scale0);
_v10 = vmlaq_f32(_bias1, _v10, _scale1);
_v11 = vmlaq_f32(_bias1, _v11, _scale1);
#endif // __aarch64__
vst1_s8(ptr, float2int8leakyrelu(_v00, _v10, _slope));
vst1_s8(ptr + 8, float2int8leakyrelu(_v01, _v11, _slope));
intptr0 += 8;
intptr1 += 8;
ptr += 16;
}
for (; i < size; i++)
{
float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr0));
float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr1));
#if __aarch64__
_v0 = vfmaq_f32(_bias0, _v0, _scale0);
_v1 = vfmaq_f32(_bias1, _v1, _scale1);
#else // __aarch64__
_v0 = vmlaq_f32(_bias0, _v0, _scale0);
_v1 = vmlaq_f32(_bias1, _v1, _scale1);
#endif // __aarch64__
vst1_s8(ptr, float2int8leakyrelu(_v0, _v1, _slope));
intptr0 += 4;
intptr1 += 4;
ptr += 8;
}
}
}
}
if (out_elempack == 1)
{
if (bias_data_size == 0)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const int* intptr = bottom_blob.channel(q);
signed char* ptr0 = top_blob.channel(q * 4);
signed char* ptr1 = top_blob.channel(q * 4 + 1);
signed char* ptr2 = top_blob.channel(q * 4 + 2);
signed char* ptr3 = top_blob.channel(q * 4 + 3);
float32x4_t _scale_in = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 4);
float32x4_t _scale_out = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 4);
float32x4_t _scale = vmulq_f32(_scale_in, _scale_out);
float32x4_t _slope = vdupq_n_f32(slope);
int i = 0;
for (; i < size; i++)
{
float32x4_t _v = vcvtq_f32_s32(vld1q_s32(intptr));
_v = vmulq_f32(_v, _scale);
int8x8_t v = float2int8leakyrelu(_v, _v, _slope);
ptr0[0] = vget_lane_s8(v, 0);
ptr1[0] = vget_lane_s8(v, 1);
ptr2[0] = vget_lane_s8(v, 2);
ptr3[0] = vget_lane_s8(v, 3);
intptr += 4;
ptr0 += 1;
ptr1 += 1;
ptr2 += 1;
ptr3 += 1;
}
}
}
else
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const int* intptr = bottom_blob.channel(q);
signed char* ptr0 = top_blob.channel(q * 4);
signed char* ptr1 = top_blob.channel(q * 4 + 1);
signed char* ptr2 = top_blob.channel(q * 4 + 2);
signed char* ptr3 = top_blob.channel(q * 4 + 3);
float32x4_t _scale_in = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 4);
float32x4_t _scale_out = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 4);
float32x4_t _bias = bias_data_size == 1 ? vdupq_n_f32(bias_data[0]) : vld1q_f32((const float*)bias_data + q * 4);
float32x4_t _scale = vmulq_f32(_scale_in, _scale_out);
_bias = vmulq_f32(_bias, _scale_out);
float32x4_t _slope = vdupq_n_f32(slope);
int i = 0;
for (; i < size; i++)
{
float32x4_t _v = vcvtq_f32_s32(vld1q_s32(intptr));
#if __aarch64__
_v = vfmaq_f32(_bias, _v, _scale);
#else
_v = vmlaq_f32(_bias, _v, _scale);
#endif
int8x8_t v = float2int8leakyrelu(_v, _v, _slope);
ptr0[0] = vget_lane_s8(v, 0);
ptr1[0] = vget_lane_s8(v, 1);
ptr2[0] = vget_lane_s8(v, 2);
ptr3[0] = vget_lane_s8(v, 3);
intptr += 4;
ptr0 += 1;
ptr1 += 1;
ptr2 += 1;
ptr3 += 1;
}
}
}
}
}
|
cell_division_gpu.h | // -----------------------------------------------------------------------------
//
// Copyright (C) 2021 CERN & Newcastle University for the benefit of the
// BioDynaMo collaboration. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
//
// See the LICENSE file distributed with this work for details.
// See the NOTICE file distributed with this work for additional information
// regarding copyright ownership.
//
// -----------------------------------------------------------------------------
#ifndef SYSTEM_CELL_DIVISION_GPU_SRC_CELL_DIVISION_GPU_H_
#define SYSTEM_CELL_DIVISION_GPU_SRC_CELL_DIVISION_GPU_H_
#include <array>
#include "biodynamo.h"
#include "core/param/command_line_options.h"
#include "core/util/math.h"
#include "core/util/timing.h"
namespace bdm {
// ----------------------------------------------------------------------------
// Starting with 8 cells, we let each cell grow in volume up until a point
// a cell must divide. This tests whether the GPU accelerated mechanical
// interactions properly handle the creation of new cells.
// -----------------------------------------------------------------------------
inline void ExpectArrayNear(const Double3& actual, const Double3& expected,
bool* wrong) {
for (size_t i = 0; i < actual.size(); i++) {
if (std::fabs(expected[i] - actual[i]) > 1e-9) {
*wrong = true;
std::cout << "Wrong result! Expected " << expected[i]
<< ", but instead got " << actual[i]
<< ", which is a difference of "
<< std::fabs(expected[i] - actual[i])
<< ", which is larger than 1e-9" << std::endl;
}
}
}
inline void RunTest(bool* wrong, OpComputeTarget mode, uint64_t timesteps,
uint64_t cells_per_dim) {
std::cout << "Running simulation on ";
auto set_param = [&](auto* param) {
switch (mode) {
case kCpu:
std::cout << "CPU (" << omp_get_max_threads() << " threads)\n";
break;
case kOpenCl:
std::cout << "GPU (OpenCL)\n";
param->compute_target = "opencl";
break;
case kCuda:
std::cout << "GPU (CUDA)\n";
param->compute_target = "cuda";
break;
}
};
Simulation simulation("cell_division_gpu", set_param);
auto* rm = simulation.GetResourceManager();
rm->ClearAgents();
// We need to give every test the same seed for the RNG, because in the cell
// division, random numbers are used. Within a single executable these numbers
// vary. Also within the threads this needs to be enforced
#pragma omp parallel
simulation.GetRandom()->SetSeed(1);
auto construct = [](const Double3& position) {
auto* cell = new Cell(position);
cell->SetDiameter(30);
cell->SetAdherence(0.4);
cell->SetMass(1.0);
cell->AddBehavior(new GrowthDivision(30.05, 5000));
return cell;
};
for (size_t x = 0; x < cells_per_dim; x++) {
double x_pos = x * 20.0;
for (size_t y = 0; y < cells_per_dim; y++) {
double y_pos = y * 20.0;
for (size_t z = 0; z < cells_per_dim; z++) {
auto new_simulation_object = construct({x_pos, y_pos, z * 20.0});
rm->AddAgent(new_simulation_object);
}
}
}
{
Timing timer("Execution time");
simulation.GetScheduler()->Simulate(timesteps);
}
// TODO: add verification of results
}
inline int Simulate(int argc, const char** argv) {
auto options = CommandLineOptions(argc, argv);
options.AddOption<bool>("verify", "false");
options.AddOption<uint64_t>("cells-per-dim", "64");
options.AddOption<uint64_t>("timesteps", "5");
uint64_t cells_per_dim = options.Get<uint64_t>("cells-per-dim");
uint64_t timesteps = options.Get<uint64_t>("timesteps");
bool wrong = true;
bool is_opencl = options.Get<bool>("opencl");
bool is_cuda = options.Get<bool>("cuda");
// TODO(ahmad): after Trello card ("Fix inconsistency in cell state due to
// direct updates in Biology Modules") enable multithreading, and adjust
// results if necessary
// omp_set_num_threads(1);
if (!is_cuda && !is_opencl) {
// Run CPU version
RunTest(&wrong, kCpu, timesteps, cells_per_dim);
}
#ifdef USE_CUDA
if (is_cuda) {
// Run GPU (CUDA) version
RunTest(&wrong, kCuda, timesteps, cells_per_dim);
}
#endif // USE_CUDA
#ifdef USE_OPENCL
if (is_opencl) {
// Run GPU (OpenCL) version
RunTest(&wrong, kOpenCl, timesteps, cells_per_dim);
}
#endif // USE_OPENCL
return !wrong;
}
} // namespace bdm
#endif // SYSTEM_CELL_DIVISION_GPU_SRC_CELL_DIVISION_GPU_H_
|
SybasePROP_fmt_plug.c | /* SybasePROP cracker. Hacked together during November of 2013 by Dhiru Kholia
* <dhiru [at] openwall.com>.
*
* This software is Copyright (c) 2013, Dhiru Kholia <dhiru [at] openwall.com>,
* Frank Benhamou, Gregory Terrien and Marcel Major and it is hereby released
* to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* All credits for reversing this algorithm go to Marcel Major, Frank Benhamou
* and Gregory Terrien. Dhiru Kholia just glued together the bits (as usual!).
*
* [1] http://www.nes.fr/securitylab/?p=1128 (in French!)
*
* [2] https://hacktivity.com/hu/letoltesek/archivum/57/
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_sybaseprop;
#elif FMT_REGISTERS_H
john_register_one(&fmt_sybaseprop);
#else
#include "arch.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "options.h"
#include "syb-prop_repro.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 16
#endif
static int omp_t = 1;
#endif
#include "memdbg.h"
#define BLOCK_SIZE 8
#define FORMAT_LABEL "Sybase-PROP"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "salted FEAL-8 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 64
#define CIPHERTEXT_LENGTH (6 + 56)
#define PREFIX_VALUE "0x"
#define PREFIX_LENGTH 2
#define BINARY_SIZE 56 / 2
#define BINARY_ALIGN 4
#define SALT_SIZE 1 // see the definition of generate_hash, note "unsigned char seed" argument
#define SALT_SIZE_HEX 2
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 128
static struct fmt_tests SybasePROP_tests[] = {
{"0x2905aeb3d00e3b80fb0695cb34c9fa9080f84ae1824b24cc51a3849dcb06", "test11"},
{"0x3f05fc3d526946d9936c63dd798c5fa1b980747b1d81d0b9b2e8197d2aca", "test12"},
{NULL}
};
static unsigned char saved_salt;
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
if (omp_t > 1) {
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
}
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p = ciphertext + PREFIX_LENGTH;
int extra;
if (strncmp(ciphertext, PREFIX_VALUE, PREFIX_LENGTH))
return 0;
if (hexlenl(p, &extra) != CIPHERTEXT_LENGTH-PREFIX_LENGTH || extra)
return 0;
return 1;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE+1];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = ciphertext + PREFIX_LENGTH + SALT_SIZE_HEX + 2; // last 2 bytes always seem to be "05"
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static void *get_salt(char *ciphertext)
{
char *p = ciphertext + PREFIX_LENGTH;
static unsigned char salt;
salt = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])];
return (void*)&salt;
}
static void set_salt(void *salt)
{
saved_salt = ((unsigned char*)salt)[0];
}
static void set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++) {
generate_hash((unsigned char*)saved_key[index], saved_salt,
(unsigned char*)crypt_out[index]);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
struct fmt_main fmt_sybaseprop = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ PREFIX_VALUE },
SybasePROP_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
project.c | //-----------------------------------------------------------------------------
// project.c
//
// Project: EPA SWMM5
// Version: 5.1
// Date: 03/19/14 (Build 5.1.000)
// 04/14/14 (Build 5.1.004)
// 09/15/14 (Build 5.1.007)
// 03/19/15 (Build 5.1.008)
// 04/30/15 (Build 5.1.009)
// 08/01/16 (Build 5.1.011)
// 03/14/17 (Build 5.1.012)
// Author: L. Rossman
//
// Project management functions.
//
// This module provides project-related services such as:
// o opening a new project and reading its input data
// o allocating and freeing memory for project objects
// o setting default values for object properties and options
// o initializing the internal state of all objects
// o managing hash tables for identifying objects by ID name
//
// Build 5.1.004:
// - Ignore RDII option added.
//
// Build 5.1.007:
// - Default monthly adjustments for climate variables included.
// - User-supplied GW flow equaitions initialized to NULL.
// - Storage node exfiltration object initialized to NULL.
// - Freeing of memory used for storage node exfiltration included.
//
// Build 5.1.008:
// - Constants used for dynamic wave routing moved to dynwave.c.
// - Input processing of minimum time step & number of
// parallel threads for dynamic wave routing added.
// - Default values of hyd. conductivity adjustments added.
// - Freeing of memory used for outfall pollutant load added.
//
// Build 5.1.009:
// - Fixed bug in computing total duration introduced in 5.1.008.
//
// Build 5.1.011:
// - Memory management of hydraulic event dates array added.
//
// Build 5.1.012:
// - Minimum conduit slope option initialized to 0 (none).
// - NO/YES no longer accepted as options for NORMAL_FLOW_LIMITED.
//
//-----------------------------------------------------------------------------
#define _CRT_SECURE_NO_DEPRECATE
#include <stdlib.h>
#include <string.h>
#include <malloc.h>
#include <math.h> //(5.1.008)
#include <omp.h> //(5.1.008)
#include "headers.h"
#include "lid.h"
#include "hash.h"
#include "mempool.h"
//-----------------------------------------------------------------------------
// Constants
//-----------------------------------------------------------------------------
//// Constants for DYNWAVE flow routing moved to dynwave.c. //// //(5.1.008)
//-----------------------------------------------------------------------------
// Shared variables
//-----------------------------------------------------------------------------
static HTtable* Htable[MAX_OBJ_TYPES]; // Hash tables for object ID names
static char MemPoolAllocated; // TRUE if memory pool allocated
//-----------------------------------------------------------------------------
// External Functions (declared in funcs.h)
//-----------------------------------------------------------------------------
// project_open (called from swmm_open in swmm5.c)
// project_close (called from swmm_close in swmm5.c)
// project_readInput (called from swmm_open in swmm5.c)
// project_readOption (called from readOption in input.c)
// project_validate (called from swmm_open in swmm5.c)
// project_init (called from swmm_start in swmm5.c)
// project_addObject (called from addObject in input.c)
// project_createMatrix (called from openFileForInput in iface.c)
// project_freeMatrix (called from iface_closeRoutingFiles)
// project_findObject
// project_findID
//-----------------------------------------------------------------------------
// Function declarations
//-----------------------------------------------------------------------------
static void initPointers(void);
static void setDefaults(void);
static void openFiles(char *f1, char *f2, char *f3);
static void createObjects(void);
static void deleteObjects(void);
static void createHashTables(void);
static void deleteHashTables(void);
//=============================================================================
void project_open(char *f1, char *f2, char *f3)
//
// Input: f1 = pointer to name of input file
// f2 = pointer to name of report file
// f3 = pointer to name of binary output file
// Output: none
// Purpose: opens a new SWMM project.
//
{
initPointers();
setDefaults();
openFiles(f1, f2, f3);
}
//=============================================================================
void project_readInput()
//
// Input: none
// Output: none
// Purpose: retrieves project data from input file.
//
{
// --- create hash tables for fast retrieval of objects by ID names
createHashTables();
// --- count number of objects in input file and create them
input_countObjects();
createObjects();
// --- read project data from input file
input_readData();
if ( ErrorCode ) return;
// --- establish starting & ending date/time
StartDateTime = StartDate + StartTime;
EndDateTime = EndDate + EndTime;
ReportStart = ReportStartDate + ReportStartTime;
ReportStart = MAX(ReportStart, StartDateTime);
// --- check for valid starting & ending date/times
if ( EndDateTime <= StartDateTime )
{
report_writeErrorMsg(ERR_START_DATE, "");
}
else if ( EndDateTime <= ReportStart )
{
report_writeErrorMsg(ERR_REPORT_DATE, "");
}
else
{
//// Following code segment was modified for release 5.1.009. //// //(5.1.009)
////
// --- compute total duration of simulation in seconds
TotalDuration = floor((EndDateTime - StartDateTime) * SECperDAY);
// --- reporting step must be <= total duration
if ( (double)ReportStep > TotalDuration )
{
ReportStep = (int)(TotalDuration);
}
// --- reporting step can't be < routing step
if ( (double)ReportStep < RouteStep )
{
report_writeErrorMsg(ERR_REPORT_STEP, "");
}
// --- convert total duration to milliseconds
TotalDuration *= 1000.0;
}
////
}
//=============================================================================
void project_validate()
//
// Input: none
// Output: none
// Purpose: checks validity of project data.
//
{
int i;
int j;
int err;
// --- validate Curves and TimeSeries
for ( i=0; i<Nobjects[CURVE]; i++ )
{
err = table_validate(&Curve[i]);
if ( err ) report_writeErrorMsg(ERR_CURVE_SEQUENCE, Curve[i].ID);
}
for ( i=0; i<Nobjects[TSERIES]; i++ )
{
err = table_validate(&Tseries[i]);
if ( err ) report_writeTseriesErrorMsg(err, &Tseries[i]);
}
// --- validate hydrology objects
// (NOTE: order is important !!!!)
climate_validate();
lid_validate();
if ( Nobjects[SNOWMELT] == 0 ) IgnoreSnowmelt = TRUE;
if ( Nobjects[AQUIFER] == 0 ) IgnoreGwater = TRUE;
for ( i=0; i<Nobjects[GAGE]; i++ ) gage_validate(i);
for ( i=0; i<Nobjects[AQUIFER]; i++ ) gwater_validateAquifer(i);
for ( i=0; i<Nobjects[SUBCATCH]; i++ ) subcatch_validate(i);
for ( i=0; i<Nobjects[SNOWMELT]; i++ ) snow_validateSnowmelt(i);
// --- compute geometry tables for each shape curve
j = 0;
for ( i=0; i<Nobjects[CURVE]; i++ )
{
if ( Curve[i].curveType == SHAPE_CURVE )
{
Curve[i].refersTo = j;
Shape[j].curve = i;
if ( !shape_validate(&Shape[j], &Curve[i]) )
report_writeErrorMsg(ERR_CURVE_SEQUENCE, Curve[i].ID);
j++;
}
}
// --- validate links before nodes, since the latter can
// result in adjustment of node depths
for ( i=0; i<Nobjects[NODE]; i++) Node[i].oldDepth = Node[i].fullDepth;
for ( i=0; i<Nobjects[LINK]; i++) link_validate(i);
for ( i=0; i<Nobjects[NODE]; i++) node_validate(i);
// --- adjust time steps if necessary
if ( DryStep < WetStep )
{
report_writeWarningMsg(WARN06, "");
DryStep = WetStep;
}
if ( RouteStep > (double)WetStep )
{
report_writeWarningMsg(WARN07, "");
RouteStep = WetStep;
}
// --- adjust individual reporting flags to match global reporting flag
if ( RptFlags.subcatchments == ALL )
for (i=0; i<Nobjects[SUBCATCH]; i++) Subcatch[i].rptFlag = TRUE;
if ( RptFlags.nodes == ALL )
for (i=0; i<Nobjects[NODE]; i++) Node[i].rptFlag = TRUE;
if ( RptFlags.links == ALL )
for (i=0; i<Nobjects[LINK]; i++) Link[i].rptFlag = TRUE;
// --- validate dynamic wave options
if ( RouteModel == DW ) dynwave_validate(); //(5.1.008)
#pragma omp parallel //(5.1.008)
{
if ( NumThreads == 0 ) NumThreads = omp_get_num_threads(); //(5.1.008)
else NumThreads = MIN(NumThreads, omp_get_num_threads()); //(5.1.008)
}
if ( Nobjects[LINK] < 4 * NumThreads ) NumThreads = 1; //(5.1.008)
}
//=============================================================================
void project_close()
//
// Input: none
// Output: none
// Purpose: closes a SWMM project.
//
{
deleteObjects();
deleteHashTables();
}
//=============================================================================
int project_init(void)
//
// Input: none
// Output: returns an error code
// Purpose: initializes the internal state of all objects.
//
{
int j;
climate_initState();
lid_initState();
for (j=0; j<Nobjects[TSERIES]; j++) table_tseriesInit(&Tseries[j]);
for (j=0; j<Nobjects[GAGE]; j++) gage_initState(j);
for (j=0; j<Nobjects[SUBCATCH]; j++) subcatch_initState(j);
for (j=0; j<Nobjects[NODE]; j++) node_initState(j);
for (j=0; j<Nobjects[LINK]; j++) link_initState(j);
return ErrorCode;
}
//=============================================================================
int project_addObject(int type, char *id, int n)
//
// Input: type = object type
// id = object ID string
// n = object index
// Output: returns 0 if object already added, 1 if not, -1 if hashing fails
// Purpose: adds an object ID to a hash table
//
{
int result;
int len;
char *newID;
// --- do nothing if object already placed in hash table
if ( project_findObject(type, id) >= 0 ) return 0;
// --- use memory from the hash tables' common memory pool to store
// a copy of the object's ID string
len = strlen(id) + 1;
newID = (char *) Alloc(len*sizeof(char));
strcpy(newID, id);
// --- insert object's ID into the hash table for that type of object
result = HTinsert(Htable[type], newID, n);
if ( result == 0 ) result = -1;
return result;
}
//=============================================================================
int project_findObject(int type, char *id)
//
// Input: type = object type
// id = object ID
// Output: returns index of object with given ID, or -1 if ID not found
// Purpose: uses hash table to find index of an object with a given ID.
//
{
return HTfind(Htable[type], id);
}
//=============================================================================
char *project_findID(int type, char *id)
//
// Input: type = object type
// id = ID name being sought
// Output: returns pointer to location where object's ID string is stored
// Purpose: uses hash table to find address of given string entry.
//
{
return HTfindKey(Htable[type], id);
}
//=============================================================================
double ** project_createMatrix(int nrows, int ncols)
//
// Input: nrows = number of rows (0-based)
// ncols = number of columns (0-based)
// Output: returns a pointer to a matrix
// Purpose: allocates memory for a matrix of doubles.
//
{
int i,j;
double **a;
// --- allocate pointers to rows
a = (double **) malloc(nrows * sizeof(double *));
if ( !a ) return NULL;
// --- allocate rows and set pointers to them
a[0] = (double *) malloc (nrows * ncols * sizeof(double));
if ( !a[0] ) return NULL;
for ( i = 1; i < nrows; i++ ) a[i] = a[i-1] + ncols;
for ( i = 0; i < nrows; i++)
{
for ( j = 0; j < ncols; j++) a[i][j] = 0.0;
}
// --- return pointer to array of pointers to rows
return a;
}
//=============================================================================
void project_freeMatrix(double **a)
//
// Input: a = matrix of floats
// Output: none
// Purpose: frees memory allocated for a matrix of doubles.
//
{
if ( a != NULL )
{
if ( a[0] != NULL ) free( a[0] );
free( a );
}
}
//=============================================================================
int project_readOption(char* s1, char* s2)
//
// Input: s1 = option keyword
// s2 = string representation of option's value
// Output: returns error code
// Purpose: reads a project option from a pair of string tokens.
//
// NOTE: all project options have default values assigned in setDefaults().
//
{
int k, m, h, s;
double tStep;
char strDate[25];
DateTime aTime;
DateTime aDate;
// --- determine which option is being read
k = findmatch(s1, OptionWords);
if ( k < 0 ) return error_setInpError(ERR_KEYWORD, s1);
switch ( k )
{
// --- choice of flow units
case FLOW_UNITS:
m = findmatch(s2, FlowUnitWords);
if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2);
FlowUnits = m;
if ( FlowUnits <= MGD ) UnitSystem = US;
else UnitSystem = SI;
break;
// --- choice of infiltration modeling method
case INFIL_MODEL:
m = findmatch(s2, InfilModelWords);
if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2);
InfilModel = m;
break;
// --- choice of flow routing method
case ROUTE_MODEL:
m = findmatch(s2, RouteModelWords);
if ( m < 0 ) m = findmatch(s2, OldRouteModelWords);
if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2);
if ( m == NO_ROUTING ) IgnoreRouting = TRUE;
else RouteModel = m;
if ( RouteModel == EKW ) RouteModel = KW;
break;
// --- simulation start date
case START_DATE:
if ( !datetime_strToDate(s2, &StartDate) )
{
return error_setInpError(ERR_DATETIME, s2);
}
break;
// --- simulation start time of day
case START_TIME:
if ( !datetime_strToTime(s2, &StartTime) )
{
return error_setInpError(ERR_DATETIME, s2);
}
break;
// --- simulation ending date
case END_DATE:
if ( !datetime_strToDate(s2, &EndDate) )
{
return error_setInpError(ERR_DATETIME, s2);
}
break;
// --- simulation ending time of day
case END_TIME:
if ( !datetime_strToTime(s2, &EndTime) )
{
return error_setInpError(ERR_DATETIME, s2);
}
break;
// --- reporting start date
case REPORT_START_DATE:
if ( !datetime_strToDate(s2, &ReportStartDate) )
{
return error_setInpError(ERR_DATETIME, s2);
}
break;
// --- reporting start time of day
case REPORT_START_TIME:
if ( !datetime_strToTime(s2, &ReportStartTime) )
{
return error_setInpError(ERR_DATETIME, s2);
}
break;
// --- day of year when street sweeping begins or when it ends
// (year is arbitrarily set to 1947 so that the dayOfYear
// function can be applied)
case SWEEP_START:
case SWEEP_END:
strcpy(strDate, s2);
strcat(strDate, "/1947");
if ( !datetime_strToDate(strDate, &aDate) )
{
return error_setInpError(ERR_DATETIME, s2);
}
m = datetime_dayOfYear(aDate);
if ( k == SWEEP_START ) SweepStart = m;
else SweepEnd = m;
break;
// --- number of antecedent dry days
case START_DRY_DAYS:
StartDryDays = atof(s2);
if ( StartDryDays < 0.0 )
{
return error_setInpError(ERR_NUMBER, s2);
}
break;
// --- runoff or reporting time steps
// (input is in hrs:min:sec format, time step saved as seconds)
case WET_STEP:
case DRY_STEP:
case REPORT_STEP:
if ( !datetime_strToTime(s2, &aTime) )
{
return error_setInpError(ERR_DATETIME, s2);
}
datetime_decodeTime(aTime, &h, &m, &s);
h += 24*(int)aTime;
s = s + 60*m + 3600*h;
if ( s <= 0 ) return error_setInpError(ERR_NUMBER, s2);
switch ( k )
{
case WET_STEP: WetStep = s; break;
case DRY_STEP: DryStep = s; break;
case REPORT_STEP: ReportStep = s; break;
}
break;
// --- type of damping applied to inertial terms of dynamic wave routing
case INERT_DAMPING:
m = findmatch(s2, InertDampingWords);
if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2);
else InertDamping = m;
break;
// --- Yes/No options (NO = 0, YES = 1)
case ALLOW_PONDING:
case SLOPE_WEIGHTING:
case SKIP_STEADY_STATE:
case IGNORE_RAINFALL:
case IGNORE_SNOWMELT:
case IGNORE_GWATER:
case IGNORE_ROUTING:
case IGNORE_QUALITY:
case IGNORE_RDII: //(5.1.004)
m = findmatch(s2, NoYesWords);
if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2);
switch ( k )
{
case ALLOW_PONDING: AllowPonding = m; break;
case SLOPE_WEIGHTING: SlopeWeighting = m; break;
case SKIP_STEADY_STATE: SkipSteadyState = m; break;
case IGNORE_RAINFALL: IgnoreRainfall = m; break;
case IGNORE_SNOWMELT: IgnoreSnowmelt = m; break;
case IGNORE_GWATER: IgnoreGwater = m; break;
case IGNORE_ROUTING: IgnoreRouting = m; break;
case IGNORE_QUALITY: IgnoreQuality = m; break;
case IGNORE_RDII: IgnoreRDII = m; break; //(5.1.004)
}
break;
case NORMAL_FLOW_LTD:
m = findmatch(s2, NormalFlowWords);
//if ( m < 0 ) m = findmatch(s2, NoYesWords); DEPRECATED //(5.1.012)
if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2);
NormalFlowLtd = m;
break;
case FORCE_MAIN_EQN:
m = findmatch(s2, ForceMainEqnWords);
if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2);
ForceMainEqn = m;
break;
case LINK_OFFSETS:
m = findmatch(s2, LinkOffsetWords);
if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2);
LinkOffsets = m;
break;
// --- compatibility option for selecting solution method for
// dynamic wave flow routing (NOT CURRENTLY USED)
case COMPATIBILITY:
if ( strcomp(s2, "3") ) Compatibility = SWMM3;
else if ( strcomp(s2, "4") ) Compatibility = SWMM4;
else if ( strcomp(s2, "5") ) Compatibility = SWMM5;
else return error_setInpError(ERR_KEYWORD, s2);
break;
// --- routing or lengthening time step (in decimal seconds)
// (lengthening time step is used in Courant stability formula
// to artificially lengthen conduits for dynamic wave flow routing
// (a value of 0 means that no lengthening is used))
case ROUTE_STEP:
case LENGTHENING_STEP:
if ( !getDouble(s2, &tStep) )
{
if ( !datetime_strToTime(s2, &aTime) )
{
return error_setInpError(ERR_NUMBER, s2);
}
else
{
datetime_decodeTime(aTime, &h, &m, &s);
h += 24*(int)aTime;
s = s + 60*m + 3600*h;
tStep = s;
}
}
if ( k == ROUTE_STEP )
{
if ( tStep <= 0.0 ) return error_setInpError(ERR_NUMBER, s2);
RouteStep = tStep;
}
else LengtheningStep = MAX(0.0, tStep);
break;
//// Following code section added to release 5.1.008. //// //(5.1.008)
// --- minimum variable time step for dynamic wave routing
case MIN_ROUTE_STEP:
if ( !getDouble(s2, &MinRouteStep) || MinRouteStep < 0.0 )
return error_setInpError(ERR_NUMBER, s2);
break;
case NUM_THREADS:
m = atoi(s2);
if ( m < 0 ) return error_setInpError(ERR_NUMBER, s2);
NumThreads = m;
break;
////
// --- safety factor applied to variable time step estimates under
// dynamic wave flow routing (value of 0 indicates that variable
// time step option not used)
case VARIABLE_STEP:
if ( !getDouble(s2, &CourantFactor) )
return error_setInpError(ERR_NUMBER, s2);
if ( CourantFactor < 0.0 || CourantFactor > 2.0 )
return error_setInpError(ERR_NUMBER, s2);
break;
// --- minimum surface area (ft2 or sq. meters) associated with nodes
// under dynamic wave flow routing
case MIN_SURFAREA:
MinSurfArea = atof(s2);
break;
// --- minimum conduit slope (%)
case MIN_SLOPE:
if ( !getDouble(s2, &MinSlope) )
return error_setInpError(ERR_NUMBER, s2);
if ( MinSlope < 0.0 || MinSlope >= 100 )
return error_setInpError(ERR_NUMBER, s2);
MinSlope /= 100.0;
break;
// --- maximum trials / time step for dynamic wave routing
case MAX_TRIALS:
m = atoi(s2);
if ( m < 0 ) return error_setInpError(ERR_NUMBER, s2);
MaxTrials = m;
break;
// --- head convergence tolerance for dynamic wave routing
case HEAD_TOL:
if ( !getDouble(s2, &HeadTol) )
{
return error_setInpError(ERR_NUMBER, s2);
}
break;
// --- steady state tolerance on system inflow - outflow
case SYS_FLOW_TOL:
if ( !getDouble(s2, &SysFlowTol) )
{
return error_setInpError(ERR_NUMBER, s2);
}
SysFlowTol /= 100.0;
break;
// --- steady state tolerance on nodal lateral inflow
case LAT_FLOW_TOL:
if ( !getDouble(s2, &LatFlowTol) )
{
return error_setInpError(ERR_NUMBER, s2);
}
LatFlowTol /= 100.0;
break;
case TEMPDIR: // Temporary Directory
sstrncpy(TempDir, s2, MAXFNAME);
break;
}
return 0;
}
//=============================================================================
void initPointers()
//
// Input: none
// Output: none
// Purpose: assigns NULL to all dynamic arrays for a new project.
//
{
Gage = NULL;
Subcatch = NULL;
Node = NULL;
Outfall = NULL;
Divider = NULL;
Storage = NULL;
Link = NULL;
Conduit = NULL;
Pump = NULL;
Orifice = NULL;
Weir = NULL;
Outlet = NULL;
Pollut = NULL;
Landuse = NULL;
Pattern = NULL;
Curve = NULL;
Tseries = NULL;
Transect = NULL;
Shape = NULL;
Aquifer = NULL;
UnitHyd = NULL;
Snowmelt = NULL;
Event = NULL; //(5.1.011)
MemPoolAllocated = FALSE;
}
//=============================================================================
void setDefaults()
//
// Input: none
// Output: none
// Purpose: assigns default values to project variables.
//
{
int i, j;
// Project title & temp. file path
for (i = 0; i < MAXTITLE; i++) strcpy(Title[i], "");
strcpy(TempDir, "");
// Interface files
Frain.mode = SCRATCH_FILE; // Use scratch rainfall file
Fclimate.mode = NO_FILE;
Frunoff.mode = NO_FILE;
Frdii.mode = NO_FILE;
Fhotstart1.mode = NO_FILE;
Fhotstart2.mode = NO_FILE;
Finflows.mode = NO_FILE;
Foutflows.mode = NO_FILE;
Frain.file = NULL;
Fclimate.file = NULL;
Frunoff.file = NULL;
Frdii.file = NULL;
Fhotstart1.file = NULL;
Fhotstart2.file = NULL;
Finflows.file = NULL;
Foutflows.file = NULL;
Fout.file = NULL;
Fout.mode = NO_FILE;
// Analysis options
UnitSystem = US; // US unit system
FlowUnits = CFS; // CFS flow units
InfilModel = HORTON; // Horton infiltration method
RouteModel = KW; // Kin. wave flow routing method
AllowPonding = FALSE; // No ponding at nodes
InertDamping = SOME; // Partial inertial damping
NormalFlowLtd = BOTH; // Default normal flow limitation
ForceMainEqn = H_W; // Hazen-Williams eqn. for force mains
LinkOffsets = DEPTH_OFFSET; // Use depth for link offsets
LengtheningStep = 0; // No lengthening of conduits
CourantFactor = 0.0; // No variable time step
MinSurfArea = 0.0; // Force use of default min. surface area
MinSlope = 0.0; // No user supplied minimum conduit slope //(5.1.012)
SkipSteadyState = FALSE; // Do flow routing in steady state periods
IgnoreRainfall = FALSE; // Analyze rainfall/runoff
IgnoreRDII = FALSE; // Analyze RDII //(5.1.004)
IgnoreSnowmelt = FALSE; // Analyze snowmelt
IgnoreGwater = FALSE; // Analyze groundwater
IgnoreRouting = FALSE; // Analyze flow routing
IgnoreQuality = FALSE; // Analyze water quality
WetStep = 300; // Runoff wet time step (secs)
DryStep = 3600; // Runoff dry time step (secs)
RouteStep = 300.0; // Routing time step (secs)
MinRouteStep = 0.5; // Minimum variable time step (sec) //(5.1.008)
ReportStep = 900; // Reporting time step (secs)
StartDryDays = 0.0; // Antecedent dry days
MaxTrials = 0; // Force use of default max. trials
HeadTol = 0.0; // Force use of default head tolerance
SysFlowTol = 0.05; // System flow tolerance for steady state
LatFlowTol = 0.05; // Lateral flow tolerance for steady state
NumThreads = 0; // Number of parallel threads to use
NumEvents = 0; // Number of detailed routing events //(5.1.011)
// Deprecated options
SlopeWeighting = TRUE; // Use slope weighting
Compatibility = SWMM4; // Use SWMM 4 up/dn weighting method
// Starting & ending date/time
StartDate = datetime_encodeDate(2004, 1, 1);
StartTime = datetime_encodeTime(0,0,0);
StartDateTime = StartDate + StartTime;
EndDate = StartDate;
EndTime = 0.0;
ReportStartDate = NO_DATE;
ReportStartTime = NO_DATE;
SweepStart = 1;
SweepEnd = 365;
// Reporting options
RptFlags.input = FALSE;
RptFlags.continuity = TRUE;
RptFlags.flowStats = TRUE;
RptFlags.controls = FALSE;
RptFlags.subcatchments = FALSE;
RptFlags.nodes = FALSE;
RptFlags.links = FALSE;
RptFlags.nodeStats = FALSE;
// Temperature data
Temp.dataSource = NO_TEMP;
Temp.tSeries = -1;
Temp.ta = 70.0;
Temp.elev = 0.0;
Temp.anglat = 40.0;
Temp.dtlong = 0.0;
Temp.tmax = MISSING;
// Wind speed data
Wind.type = MONTHLY_WIND;
for ( i=0; i<12; i++ ) Wind.aws[i] = 0.0;
// Snowmelt parameters
Snow.snotmp = 34.0;
Snow.tipm = 0.5;
Snow.rnm = 0.6;
// Snow areal depletion curves for pervious and impervious surfaces
for ( i=0; i<2; i++ )
{
for ( j=0; j<10; j++) Snow.adc[i][j] = 1.0;
}
// Evaporation rates
Evap.type = CONSTANT_EVAP;
for (i=0; i<12; i++)
{
Evap.monthlyEvap[i] = 0.0;
Evap.panCoeff[i] = 1.0;
}
Evap.recoveryPattern = -1;
Evap.recoveryFactor = 1.0;
Evap.tSeries = -1;
Evap.dryOnly = FALSE;
//// Following code segment added to release 5.1.007. //// //(5.1.007)
////
// Climate adjustments
for (i = 0; i < 12; i++)
{
Adjust.temp[i] = 0.0; // additive adjustments
Adjust.evap[i] = 0.0; // additive adjustments
Adjust.rain[i] = 1.0; // multiplicative adjustments
Adjust.hydcon[i] = 1.0; // hyd. conductivity adjustments //(5.1.008)
}
Adjust.rainFactor = 1.0;
Adjust.hydconFactor = 1.0; //(5.1.008)
////
}
//=============================================================================
void openFiles(char *f1, char *f2, char *f3)
//
// Input: f1 = name of input file
// f2 = name of report file
// f3 = name of binary output file
// Output: none
// Purpose: opens a project's input and report files.
//
{
// --- initialize file pointers to NULL
Finp.file = NULL;
Frpt.file = NULL;
Fout.file = NULL;
// --- save file names
sstrncpy(Finp.name, f1, MAXFNAME);
sstrncpy(Frpt.name, f2, MAXFNAME);
sstrncpy(Fout.name, f3, MAXFNAME);
// --- check that file names are not identical
if (strcomp(f1, f2) || strcomp(f1, f3) || strcomp(f2, f3))
{
writecon(FMT11);
ErrorCode = ERR_FILE_NAME;
return;
}
// --- open input and report files
if ((Finp.file = fopen(f1,"rt")) == NULL)
{
writecon(FMT12);
writecon(f1);
ErrorCode = ERR_INP_FILE;
return;
}
if ((Frpt.file = fopen(f2,"wt")) == NULL)
{
writecon(FMT13);
ErrorCode = ERR_RPT_FILE;
return;
}
}
//=============================================================================
void createObjects()
//
// Input: none
// Output: none
// Purpose: allocates memory for project's objects.
//
// NOTE: number of each type of object has already been determined in
// project_readInput().
//
{
int j, k;
// --- allocate memory for each category of object
if ( ErrorCode ) return;
Gage = (TGage *) calloc(Nobjects[GAGE], sizeof(TGage));
Subcatch = (TSubcatch *) calloc(Nobjects[SUBCATCH], sizeof(TSubcatch));
Node = (TNode *) calloc(Nobjects[NODE], sizeof(TNode));
Outfall = (TOutfall *) calloc(Nnodes[OUTFALL], sizeof(TOutfall));
Divider = (TDivider *) calloc(Nnodes[DIVIDER], sizeof(TDivider));
Storage = (TStorage *) calloc(Nnodes[STORAGE], sizeof(TStorage));
Link = (TLink *) calloc(Nobjects[LINK], sizeof(TLink));
Conduit = (TConduit *) calloc(Nlinks[CONDUIT], sizeof(TConduit));
Pump = (TPump *) calloc(Nlinks[PUMP], sizeof(TPump));
Orifice = (TOrifice *) calloc(Nlinks[ORIFICE], sizeof(TOrifice));
Weir = (TWeir *) calloc(Nlinks[WEIR], sizeof(TWeir));
Outlet = (TOutlet *) calloc(Nlinks[OUTLET], sizeof(TOutlet));
Pollut = (TPollut *) calloc(Nobjects[POLLUT], sizeof(TPollut));
Landuse = (TLanduse *) calloc(Nobjects[LANDUSE], sizeof(TLanduse));
Pattern = (TPattern *) calloc(Nobjects[TIMEPATTERN], sizeof(TPattern));
Curve = (TTable *) calloc(Nobjects[CURVE], sizeof(TTable));
Tseries = (TTable *) calloc(Nobjects[TSERIES], sizeof(TTable));
Aquifer = (TAquifer *) calloc(Nobjects[AQUIFER], sizeof(TAquifer));
UnitHyd = (TUnitHyd *) calloc(Nobjects[UNITHYD], sizeof(TUnitHyd));
Snowmelt = (TSnowmelt *) calloc(Nobjects[SNOWMELT], sizeof(TSnowmelt));
Shape = (TShape *) calloc(Nobjects[SHAPE], sizeof(TShape));
//// Added to release 5.1.011. //// //(5.1.011)
// --- create array of detailed routing event periods
Event = (TEvent *) calloc(NumEvents+1, sizeof(TEvent));
Event[NumEvents].start = BIG;
Event[NumEvents].end = BIG + 1.0;
////
// --- create LID objects
lid_create(Nobjects[LID], Nobjects[SUBCATCH]);
// --- create control rules
ErrorCode = controls_create(Nobjects[CONTROL]);
if ( ErrorCode ) return;
// --- create cross section transects
ErrorCode = transect_create(Nobjects[TRANSECT]);
if ( ErrorCode ) return;
// --- allocate memory for infiltration data
infil_create(Nobjects[SUBCATCH], InfilModel);
// --- allocate memory for water quality state variables
for (j = 0; j < Nobjects[SUBCATCH]; j++)
{
Subcatch[j].initBuildup =
(double *) calloc(Nobjects[POLLUT], sizeof(double));
Subcatch[j].oldQual = (double *) calloc(Nobjects[POLLUT], sizeof(double));
Subcatch[j].newQual = (double *) calloc(Nobjects[POLLUT], sizeof(double));
Subcatch[j].pondedQual = (double *) calloc(Nobjects[POLLUT], sizeof(double));
Subcatch[j].totalLoad = (double *) calloc(Nobjects[POLLUT], sizeof(double));
}
for (j = 0; j < Nobjects[NODE]; j++)
{
Node[j].oldQual = (double *) calloc(Nobjects[POLLUT], sizeof(double));
Node[j].newQual = (double *) calloc(Nobjects[POLLUT], sizeof(double));
Node[j].extInflow = NULL;
Node[j].dwfInflow = NULL;
Node[j].rdiiInflow = NULL;
Node[j].treatment = NULL;
}
for (j = 0; j < Nobjects[LINK]; j++)
{
Link[j].oldQual = (double *) calloc(Nobjects[POLLUT], sizeof(double));
Link[j].newQual = (double *) calloc(Nobjects[POLLUT], sizeof(double));
Link[j].totalLoad = (double *) calloc(Nobjects[POLLUT], sizeof(double));
}
// --- allocate memory for land use buildup/washoff functions
for (j = 0; j < Nobjects[LANDUSE]; j++)
{
Landuse[j].buildupFunc =
(TBuildup *) calloc(Nobjects[POLLUT], sizeof(TBuildup));
Landuse[j].washoffFunc =
(TWashoff *) calloc(Nobjects[POLLUT], sizeof(TWashoff));
}
// --- allocate memory for subcatchment landuse factors
for (j = 0; j < Nobjects[SUBCATCH]; j++)
{
Subcatch[j].landFactor =
(TLandFactor *) calloc(Nobjects[LANDUSE], sizeof(TLandFactor));
for (k = 0; k < Nobjects[LANDUSE]; k++)
{
Subcatch[j].landFactor[k].buildup =
(double *) calloc(Nobjects[POLLUT], sizeof(double));
}
}
// --- initialize buildup & washoff functions
for (j = 0; j < Nobjects[LANDUSE]; j++)
{
for (k = 0; k < Nobjects[POLLUT]; k++)
{
Landuse[j].buildupFunc[k].funcType = NO_BUILDUP;
Landuse[j].buildupFunc[k].normalizer = PER_AREA;
Landuse[j].washoffFunc[k].funcType = NO_WASHOFF;
}
}
// --- initialize rain gage properties
for (j = 0; j < Nobjects[GAGE]; j++)
{
Gage[j].tSeries = -1;
strcpy(Gage[j].fname, "");
}
// --- initialize subcatchment properties
for (j = 0; j < Nobjects[SUBCATCH]; j++)
{
Subcatch[j].outSubcatch = -1;
Subcatch[j].outNode = -1;
Subcatch[j].infil = -1;
Subcatch[j].groundwater = NULL;
Subcatch[j].gwLatFlowExpr = NULL; //(5.1.007)
Subcatch[j].gwDeepFlowExpr = NULL; //(5.1.007)
Subcatch[j].snowpack = NULL;
Subcatch[j].lidArea = 0.0;
for (k = 0; k < Nobjects[POLLUT]; k++)
{
Subcatch[j].initBuildup[k] = 0.0;
}
}
// --- initialize RDII unit hydrograph properties
for ( j = 0; j < Nobjects[UNITHYD]; j++ ) rdii_initUnitHyd(j);
// --- initialize snowmelt properties
for ( j = 0; j < Nobjects[SNOWMELT]; j++ ) snow_initSnowmelt(j);
// --- initialize storage node exfiltration //(5.1.007)
for (j = 0; j < Nnodes[STORAGE]; j++) Storage[j].exfil = NULL; //(5.1.007)
// --- initialize link properties
for (j = 0; j < Nobjects[LINK]; j++)
{
Link[j].xsect.type = -1;
Link[j].cLossInlet = 0.0;
Link[j].cLossOutlet = 0.0;
Link[j].cLossAvg = 0.0;
Link[j].hasFlapGate = FALSE;
}
for (j = 0; j < Nlinks[PUMP]; j++) Pump[j].pumpCurve = -1;
// --- initialize reporting flags
for (j = 0; j < Nobjects[SUBCATCH]; j++) Subcatch[j].rptFlag = FALSE;
for (j = 0; j < Nobjects[NODE]; j++) Node[j].rptFlag = FALSE;
for (j = 0; j < Nobjects[LINK]; j++) Link[j].rptFlag = FALSE;
// --- initialize curves, time series, and time patterns
for (j = 0; j < Nobjects[CURVE]; j++) table_init(&Curve[j]);
for (j = 0; j < Nobjects[TSERIES]; j++) table_init(&Tseries[j]);
for (j = 0; j < Nobjects[TIMEPATTERN]; j++) inflow_initDwfPattern(j);
}
//=============================================================================
void deleteObjects()
//
// Input: none
// Output: none
// Purpose: frees memory allocated for a project's objects.
//
// NOTE: care is taken to first free objects that are properties of another
// object before the latter is freed (e.g., we must free a
// subcatchment's land use factors before freeing the subcatchment).
//
{
int j, k;
// --- free memory for landuse factors & groundwater
if ( Subcatch ) for (j = 0; j < Nobjects[SUBCATCH]; j++)
{
for (k = 0; k < Nobjects[LANDUSE]; k++)
{
FREE(Subcatch[j].landFactor[k].buildup);
}
FREE(Subcatch[j].landFactor);
FREE(Subcatch[j].groundwater);
gwater_deleteFlowExpression(j);
FREE(Subcatch[j].snowpack);
}
// --- free memory for buildup/washoff functions
if ( Landuse ) for (j = 0; j < Nobjects[LANDUSE]; j++)
{
FREE(Landuse[j].buildupFunc);
FREE(Landuse[j].washoffFunc)
}
// --- free memory for water quality state variables
if ( Subcatch ) for (j = 0; j < Nobjects[SUBCATCH]; j++)
{
FREE(Subcatch[j].initBuildup);
FREE(Subcatch[j].oldQual);
FREE(Subcatch[j].newQual);
FREE(Subcatch[j].pondedQual);
FREE(Subcatch[j].totalLoad);
}
if ( Node ) for (j = 0; j < Nobjects[NODE]; j++)
{
FREE(Node[j].oldQual);
FREE(Node[j].newQual);
}
if ( Link ) for (j = 0; j < Nobjects[LINK]; j++)
{
FREE(Link[j].oldQual);
FREE(Link[j].newQual);
FREE(Link[j].totalLoad);
}
// --- free memory used for rainfall infiltration
infil_delete();
//// Added for release 5.1.007. //// //(5.1.007)
////
// --- free memory used for storage exfiltration
if ( Node ) for (j = 0; j < Nnodes[STORAGE]; j++)
{
if ( Storage[j].exfil )
{
FREE(Storage[j].exfil->btmExfil);
FREE(Storage[j].exfil->bankExfil);
FREE(Storage[j].exfil);
}
}
////
// --- free memory used for outfall pollutants loads //(5.1.008)
if ( Node ) for (j = 0; j < Nnodes[OUTFALL]; j++) //(5.1.008)
FREE(Outfall[j].wRouted); //(5.1.008)
// --- free memory used for nodal inflows & treatment functions
if ( Node ) for (j = 0; j < Nobjects[NODE]; j++)
{
inflow_deleteExtInflows(j);
inflow_deleteDwfInflows(j);
rdii_deleteRdiiInflow(j);
treatmnt_delete(j);
}
// --- delete table entries for curves and time series
if ( Tseries ) for (j = 0; j < Nobjects[TSERIES]; j++)
table_deleteEntries(&Tseries[j]);
if ( Curve ) for (j = 0; j < Nobjects[CURVE]; j++)
table_deleteEntries(&Curve[j]);
// --- delete cross section transects
transect_delete();
// --- delete control rules
controls_delete();
// --- delete LIDs
lid_delete();
// --- now free each major category of object
FREE(Gage);
FREE(Subcatch);
FREE(Node);
FREE(Outfall);
FREE(Divider);
FREE(Storage);
FREE(Link);
FREE(Conduit);
FREE(Pump);
FREE(Orifice);
FREE(Weir);
FREE(Outlet);
FREE(Pollut);
FREE(Landuse);
FREE(Pattern);
FREE(Curve);
FREE(Tseries);
FREE(Aquifer);
FREE(UnitHyd);
FREE(Snowmelt);
FREE(Shape);
FREE(Event); //(5.1.011)
}
//=============================================================================
void createHashTables()
//
// Input: none
// Output: returns error code
// Purpose: allocates memory for object ID hash tables
//
{ int j;
MemPoolAllocated = FALSE;
for (j = 0; j < MAX_OBJ_TYPES ; j++)
{
Htable[j] = HTcreate();
if ( Htable[j] == NULL ) report_writeErrorMsg(ERR_MEMORY, "");
}
// --- initialize memory pool used to store object ID's
if ( AllocInit() == NULL ) report_writeErrorMsg(ERR_MEMORY, "");
else MemPoolAllocated = TRUE;
}
//=============================================================================
void deleteHashTables()
//
// Input: none
// Output: none
// Purpose: frees memory allocated for object ID hash tables
//
{
int j;
for (j = 0; j < MAX_OBJ_TYPES; j++)
{
if ( Htable[j] != NULL ) HTfree(Htable[j]);
}
// --- free object ID memory pool
if ( MemPoolAllocated ) AllocFreePool();
}
//=============================================================================
|
GB_binop__div_uint64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__div_uint64)
// A.*B function (eWiseMult): GB (_AemultB_08__div_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__div_uint64)
// A.*B function (eWiseMult): GB (_AemultB_04__div_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__div_uint64)
// A*D function (colscale): GB (_AxD__div_uint64)
// D*A function (rowscale): GB (_DxB__div_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__div_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__div_uint64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_uint64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_uint64)
// C=scalar+B GB (_bind1st__div_uint64)
// C=scalar+B' GB (_bind1st_tran__div_uint64)
// C=A+scalar GB (_bind2nd__div_uint64)
// C=A'+scalar GB (_bind2nd_tran__div_uint64)
// C type: uint64_t
// A type: uint64_t
// A pattern? 0
// B type: uint64_t
// B pattern? 0
// BinaryOp: cij = GB_IDIV_UNSIGNED (aij, bij, 64)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IDIV_UNSIGNED (x, y, 64) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_DIV || GxB_NO_UINT64 || GxB_NO_DIV_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__div_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__div_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__div_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__div_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__div_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__div_uint64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__div_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint64_t alpha_scalar ;
uint64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__div_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__div_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__div_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__div_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__div_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IDIV_UNSIGNED (x, bij, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__div_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IDIV_UNSIGNED (aij, y, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_UNSIGNED (x, aij, 64) ; \
}
GrB_Info GB (_bind1st_tran__div_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_UNSIGNED (aij, y, 64) ; \
}
GrB_Info GB (_bind2nd_tran__div_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB067-restrictpointer1-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
restrict pointers: no aliasing
Array initialization using assignments.
C99 is needed to compile this code
e.g. gcc -std=c99 -c Stress-1.c
*/
#include <stdlib.h>
typedef double real8;
void foo(real8 * restrict newSxx, real8 * restrict newSyy, int length)
{
int i;
#pragma omp parallel for private(i)
for (i = 0; i <= length - 1; i += 1) {
newSxx[i] = 0.0;
newSyy[i] = 0.0;
}
}
void print(real8 * restrict newSxx, real8 * restrict newSyy, int length)
{
int i;
for (i = 0; i <= length - 1; i += 1) {
printf("%lf %lf\n", newSxx[i], newSyy[i]);
}
}
int main()
{
int length=1000;
real8* newSxx = malloc (length* sizeof (real8));
real8* newSyy = malloc (length* sizeof (real8));
foo(newSxx, newSyy, length);
print(newSxx, newSyy, length);
free (newSxx);
free (newSyy);
return 0;
}
|
Example_task_dep.2.c | /*
* @@name: task_dep.2c
* @@type: C
* @@compilable: yes
* @@linkable: yes
* @@expect: success
* @@version: omp_4.0
*/
#include <stdio.h>
int main()
{
int x = 1;
#pragma omp parallel
#pragma omp single
{
#pragma omp task shared(x) depend(in: x)
printf("x = %d\n", x);
#pragma omp task shared(x) depend(out: x)
x = 2;
}
return 0;
}
|
dd_glpk.h | // Header for LP computation of the temporal upper bound using GLPK.
#pragma once
#include <glpk.h>
#include <map>
#include <random>
#include <sstream>
#include <string>
#include <utility>
#include <tuple>
#include <vector>
const int64_t MAX_CONSTRAINTS = 5000000000L;
std::string LP_name(const std::string &prefix, const std::initializer_list<int> &vertices) {
std::ostringstream out;
out << prefix;
if (vertices.size() > 0) {
out << "_{";
for (const auto &v : vertices) {
out << std::to_string(v) << ",";
}
out.seekp(-1, std::ios_base::end), out << "}";
}
return out.str();
}
inline int LP_get_variable_index(const int &u, const int &v, const int &n, const int &n0) {
return (u - n0) * (n - n0) + (v - n0);
}
inline int LP_get_variable_index(
const int &u, const int &i, const int &v, const int &j, const int &n, const int &n0) {
int x = LP_get_variable_index(u, i, n, n0), y = LP_get_variable_index(v, j, n, n0);
return LP_get_variable_index(n, n0, n, n0) + x * (n - n0) * (n - n0) + y;
}
inline void add_transitivity_constraint(
glp_prob *LP, std::vector<int> &X, std::vector<int> &Y, std::vector<double> &A, int &row,
const int &n, const int &n0, const int &i, const int &j, const int &k) {
glp_add_rows(LP, 1);
glp_set_row_name(LP, row, LP_name("T", {i, j, k}).c_str());
X.push_back(row), Y.push_back(LP_get_variable_index(i, j, n, n0) + 1);
X.push_back(row), Y.push_back(LP_get_variable_index(j, k, n, n0) + 1);
X.push_back(row), Y.push_back(LP_get_variable_index(i, k, n, n0) + 1);
A.push_back(1), A.push_back(1), A.push_back(-1);
}
inline void add_density_constraint(
glp_prob *LP, std::vector<int> &X, std::vector<int> &Y, std::vector<double> &A, int &row,
const double &density, const int &n, const int &n0) {
glp_add_rows(LP, 1);
glp_set_row_name(LP, row, LP_name("D", {}).c_str());
for (int i = n0; i < n; i++) {
for (int j = n0; j < n; j++) {
if (i != j) {
X.push_back(row), Y.push_back(LP_get_variable_index(i, j, n, n0) + 1), A.push_back(1);
}
}
}
glp_set_row_bnds(LP, row, GLP_UP, density, density), row++;
}
std::map<std::pair<int, int>, double> retrieve_solution(
glp_prob *LP, const int &n, const int &n0, const double &s) {
std::map<std::pair<int, int>, double> solution;
for (int i = n0; i < n; i++) {
for (int j = n0; j < n; j++) {
if (i == j) {
continue;
}
double y_ij = glp_get_col_prim(LP, LP_get_variable_index(i, j, n, n0) + 1);
solution.insert(std::make_pair(std::make_pair(i, j), y_ij / s));
}
}
return solution;
}
std::tuple<double, std::map<std::pair<int, int>, double>> LP_ordering_solve(
const std::map<std::pair<int, int>, long double> &p_uv, const int &n, const int &n0,
const double &epsilon, const bool get_solution = false) {
glp_prob *LP = glp_create_prob();
glp_set_prob_name(LP, ("Solve " + std::to_string(epsilon)).c_str());
glp_set_obj_dir(LP, GLP_MAX);
double density = epsilon * (n - n0) * (n - n0 - 1) / 2;
// Objective function
glp_add_cols(LP, (n - n0) * (n - n0) + 1);
int s_index = (n - n0) * (n - n0);
for (int i = n0; i < n; i++) {
for (int j = n0; j < n; j++) {
auto index = LP_get_variable_index(i, j, n, n0);
glp_set_col_name(LP, index + 1, LP_name("y", {i, j}).c_str());
if (i != j) {
const auto &p_ij = p_uv.find(std::make_pair(i, j));
glp_set_col_bnds(LP, index + 1, GLP_LO, 0.0, 1.0);
glp_set_obj_coef(
LP, index + 1, p_ij != p_uv.end() ? static_cast<double>(p_ij->second) : 0.0);
}
}
}
glp_set_col_name(LP, s_index + 1, "s");
glp_set_col_bnds(LP, s_index + 1, GLP_DB, 0.0, 1 / density);
std::vector<int> X, Y;
std::vector<double> A;
int row = 1;
glp_add_rows(LP, (n - n0) * (n - n0 - 1) / 2);
// Antisymmetry
#pragma omp parallel for
for (int i = n0; i < n; i++) {
for (int j = i + 1; j < n; j++) {
#pragma omp critical
{
glp_set_row_name(LP, row, LP_name("A", {i, j}).c_str());
X.push_back(row), Y.push_back(LP_get_variable_index(i, j, n, n0) + 1), A.push_back(1);
X.push_back(row), Y.push_back(LP_get_variable_index(j, i, n, n0) + 1), A.push_back(1);
X.push_back(row), Y.push_back(s_index + 1), A.push_back(-1);
glp_set_row_bnds(LP, row, GLP_UP, 0.0, 0.0), row++;
}
}
}
// Transitivity
if (MAX_CONSTRAINTS >= pow(n - n0, 3.0)) {
#pragma omp parallel for
for (int i = n0; i < n; i++) {
for (int j = n0; j < n; j++) {
for (int k = n0; k < n; k++) {
if (i != j && j != k && i != k) {
#pragma omp critical
{
add_transitivity_constraint(LP, X, Y, A, row, n, n0, i, j, k);
X.push_back(row), Y.push_back(s_index + 1), A.push_back(-1);
glp_set_row_bnds(LP, row, GLP_UP, 0.0, 0.0), row++;
}
}
}
}
}
} else {
std::random_device device;
std::mt19937 generator(device());
std::uniform_int_distribution<int> index_distribution(n0, n - 1);
#pragma omp parallel for
for (int64_t constraint = 0; constraint < MAX_CONSTRAINTS; constraint++) {
int i = index_distribution(generator), j = index_distribution(generator),
k = index_distribution(generator);
if (i == j || j == k || i == k) {
continue;
}
#pragma omp critical
{
add_transitivity_constraint(LP, X, Y, A, row, n, n0, i, j, k);
X.push_back(row), Y.push_back(s_index + 1), A.push_back(-1);
glp_set_row_bnds(LP, row, GLP_UP, 0.0, 0.0), row++;
}
}
}
// Density
add_density_constraint(LP, X, Y, A, row, 1.0, n, n0);
glp_load_matrix(LP, A.size(), &X[0] - 1, &Y[0] - 1, &A[0] - 1);
glp_term_out(0);
glp_simplex(LP, NULL);
double objective = glp_get_obj_val(LP);
std::map<std::pair<int, int>, double> solution;
if (get_solution) {
solution = retrieve_solution(LP, n, n0, glp_get_col_prim(LP, s_index + 1));
}
glp_delete_prob(LP);
glp_free_env();
return std::make_tuple(objective, solution);
}
std::tuple<double, std::map<std::pair<int, int>, double>> LP_binning_solve(
const std::map<std::pair<int, int>, long double> &p_uv, const int &n, const int &n0,
const double &epsilon, const bool get_solution = false) {
(void)get_solution;
glp_prob *LP = glp_create_prob();
glp_set_prob_name(LP, ("Solve " + std::to_string(epsilon)).c_str());
glp_set_obj_dir(LP, GLP_MAX);
double density = epsilon * (n - n0) * (n - n0 - 1) / 2;
// Objective function
int var_count = pow(n - n0, 4.0) + pow(n - n0, 2.0) + 1;
glp_add_cols(LP, var_count);
for (int u = n0; u < n; u++) {
for (int i = n0; i < n; i++) {
auto index = LP_get_variable_index(u, i, n, n0);
glp_set_col_name(LP, index + 1, LP_name("y", {u, i}).c_str());
glp_set_col_bnds(LP, index + 1, GLP_LO, 0.0, 1.0);
}
}
for (int u = n0; u < n; u++) {
for (int i = n0; i < n; i++) {
for (int v = n0; v < n; v++) {
for (int j = n0; j < n; j++) {
auto index = LP_get_variable_index(u, i, v, j, n, n0);
glp_set_col_name(LP, index + 1, LP_name("w", {u, i, v, j}).c_str());
glp_set_col_bnds(LP, index + 1, GLP_LO, 0.0, 1.0);
if (u != v && i < j) {
const auto &p_ij = p_uv.find(std::make_pair(u, v));
glp_set_obj_coef(
LP, index + 1, (p_ij != p_uv.end()) ? static_cast<double>(p_ij->second) : 0.0);
}
}
}
}
}
int s_index = var_count - 1;
glp_set_col_name(LP, s_index + 1, "s");
glp_set_col_bnds(LP, s_index + 1, GLP_DB, 0.0, 1 / density);
std::vector<int> X, Y;
std::vector<double> A;
int row = 1;
// Identity
glp_add_rows(LP, (n - n0) * (n - n0));
for (int u = n0; u < n; u++) {
for (int i = n0; i < n; i++) {
glp_set_row_name(LP, row, LP_name("I", {u, i}).c_str());
X.push_back(row), Y.push_back(LP_get_variable_index(u, i, n, n0) + 1), A.push_back(1);
X.push_back(row), Y.push_back(LP_get_variable_index(u, i, u, i, n, n0) + 1), A.push_back(-1);
glp_set_row_bnds(LP, row, GLP_FX, 0.0, 0.0), row++;
}
}
// Symmetry
glp_add_rows(LP, pow(n - n0, 4.0));
for (int u = n0; u < n; u++) {
for (int i = n0; i < n; i++) {
for (int v = n0; v < n; v++) {
for (int j = i + 1; j < n; j++) {
glp_set_row_name(LP, row, LP_name("S", {u, i, v, j}).c_str());
X.push_back(row), Y.push_back(LP_get_variable_index(u, i, v, j, n, n0) + 1);
X.push_back(row), Y.push_back(LP_get_variable_index(v, j, u, i, n, n0) + 1);
A.push_back(1), A.push_back(-1);
glp_set_row_bnds(LP, row, GLP_FX, 0.0, 0.0), row++;
}
}
}
}
// y-density
glp_add_rows(LP, n - n0);
for (int u = n0; u < n; u++) {
glp_set_row_name(LP, row, LP_name("yD", {u}).c_str());
for (int i = n0; i < n; i++) {
X.push_back(row), Y.push_back(LP_get_variable_index(u, i, n, n0) + 1), A.push_back(1);
}
X.push_back(row), Y.push_back(s_index + 1), A.push_back(-1);
glp_set_row_bnds(LP, row, GLP_FX, 0.0, 0.0), row++;
}
// w-density
glp_add_rows(LP, pow(n - n0, 3.0));
for (int u = n0; u < n; u++) {
for (int i = n0; i < n; i++) {
for (int v = n0; v < n; v++) {
glp_set_row_name(LP, row, LP_name("wD", {u, i, v}).c_str());
for (int j = n0; j < n; j++) {
X.push_back(row), Y.push_back(LP_get_variable_index(u, i, v, j, n, n0) + 1);
A.push_back(1);
}
X.push_back(row), Y.push_back(LP_get_variable_index(u, i, n, n0) + 1), A.push_back(-1);
glp_set_row_bnds(LP, row, GLP_FX, 0.0, 0.0), row++;
}
}
}
// Density
glp_add_rows(LP, 1);
glp_set_row_name(LP, row, LP_name("D", {}).c_str());
for (int u = n0; u < n; u++) {
for (int i = n0; i < n; i++) {
for (int v = n0; v < n; v++) {
for (int j = i + 1; j < n; j++) {
if (u != v) {
X.push_back(row), Y.push_back(LP_get_variable_index(u, i, v, j, n, n0) + 1);
A.push_back(1);
}
}
}
}
}
glp_set_row_bnds(LP, row, GLP_FX, 1.0, 1.0), row++;
glp_load_matrix(LP, A.size(), &X[0] - 1, &Y[0] - 1, &A[0] - 1);
glp_term_out(0);
glp_simplex(LP, NULL);
double objective = glp_get_obj_val(LP);
std::map<std::pair<int, int>, double> solution;
glp_delete_prob(LP);
glp_free_env();
return std::make_tuple(objective, solution);
}
std::tuple<double, std::map<std::pair<int, int>, double>> IP_ordering_solve(
const std::map<std::pair<int, int>, long double> &p_uv, const int &n, const int &n0,
const double &epsilon, const bool get_solution = false) {
glp_prob *IP = glp_create_prob();
glp_set_prob_name(IP, ("Solve " + std::to_string(epsilon)).c_str());
glp_set_obj_dir(IP, GLP_MAX);
int density = epsilon * (n - n0) * (n - n0 - 1) / 2;
// Objective function
glp_add_cols(IP, (n - n0) * (n - n0));
for (int i = n0; i < n; i++) {
for (int j = n0; j < n; j++) {
auto index = LP_get_variable_index(i, j, n, n0);
glp_set_col_name(IP, index + 1, LP_name("y", {i, j}).c_str());
if (i != j) {
const auto &p_ij = p_uv.find(std::make_pair(i, j));
glp_set_col_kind(IP, index + 1, GLP_BV);
glp_set_obj_coef(
IP, index + 1, p_ij != p_uv.end() ? static_cast<double>(p_ij->second) : 0.0);
}
}
}
std::vector<int> X, Y;
std::vector<double> A;
int row = 1;
glp_add_rows(IP, (n - n0) * (n - n0 - 1) / 2);
// Antisymmetry
#pragma omp parallel for
for (int i = n0; i < n; i++) {
for (int j = i + 1; j < n; j++) {
#pragma omp critical
{
glp_set_row_name(IP, row, LP_name("A", {i, j}).c_str());
X.push_back(row), Y.push_back(LP_get_variable_index(i, j, n, n0) + 1), A.push_back(1);
X.push_back(row), Y.push_back(LP_get_variable_index(j, i, n, n0) + 1), A.push_back(1);
glp_set_row_bnds(IP, row, GLP_UP, 1, 1), row++;
}
}
}
// Transitivity
if (MAX_CONSTRAINTS >= pow(n - n0, 3.0)) {
#pragma omp parallel for
for (int i = n0; i < n; i++) {
for (int j = n0; j < n; j++) {
for (int k = n0; k < n; k++) {
if (i != j && j != k && i != k) {
#pragma omp critical
{
add_transitivity_constraint(IP, X, Y, A, row, n, n0, i, j, k);
glp_set_row_bnds(IP, row, GLP_UP, 1, 1), row++;
}
}
}
}
}
} else {
std::random_device device;
std::mt19937 generator(device());
std::uniform_int_distribution<int> index_distribution(n0, n - 1);
#pragma omp parallel for
for (int64_t constraint = 0; constraint < MAX_CONSTRAINTS; constraint++) {
int i = index_distribution(generator), j = index_distribution(generator),
k = index_distribution(generator);
if (i == j || j == k || i == k) {
continue;
}
#pragma omp critical
{
add_transitivity_constraint(IP, X, Y, A, row, n, n0, i, j, k);
glp_set_row_bnds(IP, row, GLP_UP, 1, 1), row++;
}
}
}
// Density
add_density_constraint(IP, X, Y, A, row, density, n, n0);
glp_load_matrix(IP, A.size(), &X[0] - 1, &Y[0] - 1, &A[0] - 1);
glp_term_out(0);
glp_simplex(IP, NULL);
glp_intopt(IP, NULL);
double objective = glp_mip_obj_val(IP) / density;
std::map<std::pair<int, int>, double> solution;
if (get_solution) {
solution = retrieve_solution(IP, n, n0, 1);
}
glp_delete_prob(IP);
glp_free_env();
return std::make_tuple(objective, solution);
}
|
GB_unaryop__lnot_int64_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_int64_int16
// op(A') function: GB_tran__lnot_int64_int16
// C type: int64_t
// A type: int16_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
int64_t z = (int64_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT64 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_int64_int16
(
int64_t *Cx, // Cx and Ax may be aliased
int16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_int64_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
cones.c | #include "cones.h"
#include "linalg.h"
#include "scs.h"
#include "scs_blas.h" /* contains BLAS(X) macros and type info */
#include "util.h"
#define CONE_TOL (1e-9)
#define CONE_THRESH (1e-8)
#define EXP_CONE_MAX_ITERS (100)
#define BOX_CONE_MAX_ITERS (25)
#define POW_CONE_MAX_ITERS (20)
/* Box cone limits (+ or -) taken to be INF */
#define MAX_BOX_VAL (1e15)
#ifdef USE_LAPACK
#ifdef __cplusplus
extern "C" {
#endif
void BLAS(syev)(const char *jobz, const char *uplo, blas_int *n, scs_float *a,
blas_int *lda, scs_float *w, scs_float *work, blas_int *lwork,
blas_int *info);
blas_int BLAS(syrk)(const char *uplo, const char *trans, const blas_int *n,
const blas_int *k, const scs_float *alpha,
const scs_float *a, const blas_int *lda,
const scs_float *beta, scs_float *c, const blas_int *ldc);
void BLAS(scal)(const blas_int *n, const scs_float *sa, scs_float *sx,
const blas_int *incx);
#ifdef __cplusplus
}
#endif
#endif
void SCS(free_cone)(ScsCone *k) {
if (k) {
if (k->bu)
scs_free(k->bu);
if (k->bl)
scs_free(k->bl);
if (k->q)
scs_free(k->q);
if (k->s)
scs_free(k->s);
if (k->p)
scs_free(k->p);
scs_free(k);
}
}
void SCS(deep_copy_cone)(ScsCone *dest, const ScsCone *src) {
memcpy(dest, src, sizeof(ScsCone));
/* copy bu, bl */
if (src->bsize > 1) {
dest->bu = (scs_float *)scs_calloc(src->bsize - 1, sizeof(scs_float));
memcpy(dest->bu, src->bu, (src->bsize - 1) * sizeof(scs_float));
dest->bl = (scs_float *)scs_calloc(src->bsize - 1, sizeof(scs_float));
memcpy(dest->bl, src->bl, (src->bsize - 1) * sizeof(scs_float));
} else {
dest->bu = SCS_NULL;
dest->bl = SCS_NULL;
}
/* copy SOC */
if (src->qsize > 0) {
dest->q = (scs_int *)scs_calloc(src->qsize, sizeof(scs_int));
memcpy(dest->q, src->q, src->qsize * sizeof(scs_int));
} else {
dest->q = SCS_NULL;
}
/* copy PSD cone */
if (src->ssize > 0) {
dest->s = (scs_int *)scs_calloc(src->ssize, sizeof(scs_int));
memcpy(dest->s, src->s, src->ssize * sizeof(scs_int));
} else {
dest->s = SCS_NULL;
}
/* copy power cone */
if (src->psize > 0) {
dest->p = (scs_float *)scs_calloc(src->psize, sizeof(scs_float));
memcpy(dest->p, src->p, src->psize * sizeof(scs_float));
} else {
dest->p = SCS_NULL;
}
}
/* set the vector of rho y terms, based on scale and cones */
void SCS(set_r_y)(const ScsConeWork *c, scs_float scale, scs_float *r_y) {
scs_int i;
/* z cone */
for (i = 0; i < c->k->z; ++i) {
/* set rho_y small for z, similar to rho_x term, since z corresponds to
* dual free cone, this effectively decreases penalty on those entries
* and lets them be determined almost entirely by the linear system solve
*/
r_y[i] = 1.0 / (1000. * scale);
}
/* others */
for (i = c->k->z; i < c->m; ++i) {
r_y[i] = 1.0 / scale;
}
}
/* the function f aggregates the entries within each cone */
void SCS(enforce_cone_boundaries)(const ScsConeWork *c, scs_float *vec,
scs_float (*f)(const scs_float *, scs_int)) {
scs_int i, j, delta;
scs_int count = c->cone_boundaries[0];
scs_float wrk;
for (i = 1; i < c->cone_boundaries_len; ++i) {
delta = c->cone_boundaries[i];
wrk = f(&(vec[count]), delta);
for (j = count; j < count + delta; ++j) {
vec[j] = wrk;
}
count += delta;
}
}
static inline scs_int get_sd_cone_size(scs_int s) {
return (s * (s + 1)) / 2;
}
/*
* boundaries will contain array of indices of rows of A corresponding to
* cone boundaries, boundaries[0] is starting index for cones of size strictly
* larger than 1, boundaries malloc-ed here so should be freed.
*/
void set_cone_boundaries(const ScsCone *k, ScsConeWork *c) {
scs_int i, s_cone_sz, count = 0;
scs_int cone_boundaries_len =
1 + k->qsize + k->ssize + k->ed + k->ep + k->psize;
scs_int *b = (scs_int *)scs_calloc(cone_boundaries_len, sizeof(scs_int));
/* cones that can be scaled independently */
b[count] = k->z + k->l + k->bsize;
count += 1; /* started at 0 now move to first entry */
for (i = 0; i < k->qsize; ++i) {
b[count + i] = k->q[i];
}
count += k->qsize;
for (i = 0; i < k->ssize; ++i) {
s_cone_sz = get_sd_cone_size(k->s[i]);
b[count + i] = s_cone_sz;
}
count += k->ssize; /* add ssize here not ssize * (ssize + 1) / 2 */
/* exp cones */
for (i = 0; i < k->ep + k->ed; ++i) {
b[count + i] = 3;
}
count += k->ep + k->ed;
/* power cones */
for (i = 0; i < k->psize; ++i) {
b[count + i] = 3;
}
count += k->psize;
/* other cones */
c->cone_boundaries = b;
c->cone_boundaries_len = cone_boundaries_len;
}
static scs_int get_full_cone_dims(const ScsCone *k) {
scs_int i, c = k->z + k->l + k->bsize;
if (k->qsize) {
for (i = 0; i < k->qsize; ++i) {
c += k->q[i];
}
}
if (k->ssize) {
for (i = 0; i < k->ssize; ++i) {
c += get_sd_cone_size(k->s[i]);
}
}
if (k->ed) {
c += 3 * k->ed;
}
if (k->ep) {
c += 3 * k->ep;
}
if (k->psize) {
c += 3 * k->psize;
}
return c;
}
scs_int SCS(validate_cones)(const ScsData *d, const ScsCone *k) {
scs_int i;
if (get_full_cone_dims(k) != d->m) {
scs_printf("cone dimensions %li not equal to num rows in A = m = %li\n",
(long)get_full_cone_dims(k), (long)d->m);
return -1;
}
if (k->z && k->z < 0) {
scs_printf("free cone dimension error\n");
return -1;
}
if (k->l && k->l < 0) {
scs_printf("lp cone dimension error\n");
return -1;
}
if (k->bsize) {
if (k->bsize < 0) {
scs_printf("box cone dimension error\n");
return -1;
}
for (i = 0; i < k->bsize - 1; ++i) {
if (k->bl[i] > k->bu[i]) {
scs_printf("infeasible: box lower bound larger than upper bound\n");
return -1;
}
}
}
if (k->qsize && k->q) {
if (k->qsize < 0) {
scs_printf("soc cone dimension error\n");
return -1;
}
for (i = 0; i < k->qsize; ++i) {
if (k->q[i] < 0) {
scs_printf("soc cone dimension error\n");
return -1;
}
}
}
if (k->ssize && k->s) {
if (k->ssize < 0) {
scs_printf("sd cone dimension error\n");
return -1;
}
for (i = 0; i < k->ssize; ++i) {
if (k->s[i] < 0) {
scs_printf("sd cone dimension error\n");
return -1;
}
}
}
if (k->ed && k->ed < 0) {
scs_printf("ep cone dimension error\n");
return -1;
}
if (k->ep && k->ep < 0) {
scs_printf("ed cone dimension error\n");
return -1;
}
if (k->psize && k->p) {
if (k->psize < 0) {
scs_printf("power cone dimension error\n");
return -1;
}
for (i = 0; i < k->psize; ++i) {
if (k->p[i] < -1 || k->p[i] > 1) {
scs_printf("power cone error, values must be in [-1,1]\n");
return -1;
}
}
}
return 0;
}
void SCS(finish_cone)(ScsConeWork *c) {
#ifdef USE_LAPACK
if (c->Xs) {
scs_free(c->Xs);
}
if (c->Z) {
scs_free(c->Z);
}
if (c->e) {
scs_free(c->e);
}
if (c->work) {
scs_free(c->work);
}
#endif
if (c->cone_boundaries) {
scs_free(c->cone_boundaries);
}
if (c->s) {
scs_free(c->s);
}
if (c) {
scs_free(c);
}
}
char *SCS(get_cone_header)(const ScsCone *k) {
char *tmp = (char *)scs_malloc(sizeof(char) * 512);
scs_int i, soc_vars, sd_vars;
sprintf(tmp, "cones: ");
if (k->z) {
sprintf(tmp + strlen(tmp), "\t z: primal zero / dual free vars: %li\n",
(long)k->z);
}
if (k->l) {
sprintf(tmp + strlen(tmp), "\t l: linear vars: %li\n", (long)k->l);
}
if (k->bsize) {
sprintf(tmp + strlen(tmp), "\t b: box cone vars: %li\n", (long)(k->bsize));
}
soc_vars = 0;
if (k->qsize && k->q) {
for (i = 0; i < k->qsize; i++) {
soc_vars += k->q[i];
}
sprintf(tmp + strlen(tmp), "\t q: soc vars: %li, qsize: %li\n",
(long)soc_vars, (long)k->qsize);
}
sd_vars = 0;
if (k->ssize && k->s) {
for (i = 0; i < k->ssize; i++) {
sd_vars += get_sd_cone_size(k->s[i]);
}
sprintf(tmp + strlen(tmp), "\t s: psd vars: %li, ssize: %li\n",
(long)sd_vars, (long)k->ssize);
}
if (k->ep || k->ed) {
sprintf(tmp + strlen(tmp), "\t e: exp vars: %li, dual exp vars: %li\n",
(long)(3 * k->ep), (long)(3 * k->ed));
}
if (k->psize && k->p) {
sprintf(tmp + strlen(tmp), "\t p: primal + dual power vars: %li\n",
(long)(3 * k->psize));
}
return tmp;
}
static scs_float exp_newton_one_d(scs_float rho, scs_float y_hat,
scs_float z_hat, scs_float w) {
scs_float t_prev, t = MAX(w - z_hat, MAX(-z_hat, 1e-9));
scs_float f = 1., fp = 1.;
scs_int i;
for (i = 0; i < EXP_CONE_MAX_ITERS; ++i) {
t_prev = t;
f = t * (t + z_hat) / rho / rho - y_hat / rho + log(t / rho) + 1;
fp = (2 * t + z_hat) / rho / rho + 1 / t;
t = t - f / fp;
if (t <= -z_hat) {
t = -z_hat;
break;
} else if (t <= 0) {
t = 0;
break;
} else if (ABS(t - t_prev) < CONE_TOL) {
break;
} else if (SQRTF(f * f / fp) < CONE_TOL) {
break;
}
}
if (i == EXP_CONE_MAX_ITERS) {
scs_printf("warning: exp cone newton step hit maximum %i iters\n", (int)i);
scs_printf("rho=%1.5e; y_hat=%1.5e; z_hat=%1.5e; w=%1.5e; f=%1.5e, "
"fp=%1.5e, t=%1.5e, t_prev= %1.5e\n",
rho, y_hat, z_hat, w, f, fp, t, t_prev);
}
return t + z_hat;
}
static void exp_solve_for_x_with_rho(const scs_float *v, scs_float *x,
scs_float rho, scs_float w) {
x[2] = exp_newton_one_d(rho, v[1], v[2], w);
x[1] = (x[2] - v[2]) * x[2] / rho;
x[0] = v[0] - rho;
}
static scs_float exp_calc_grad(const scs_float *v, scs_float *x, scs_float rho,
scs_float w) {
exp_solve_for_x_with_rho(v, x, rho, w);
if (x[1] <= 1e-12) {
return x[0];
}
return x[0] + x[1] * log(x[1] / x[2]);
}
static void exp_get_rho_ub(const scs_float *v, scs_float *x, scs_float *ub,
scs_float *lb) {
*lb = 0;
*ub = 0.125;
while (exp_calc_grad(v, x, *ub, v[1]) > 0) {
*lb = *ub;
(*ub) *= 2;
}
}
/* project onto the exponential cone, v has dimension *exactly* 3 */
static scs_int proj_exp_cone(scs_float *v) {
scs_int i;
scs_float ub, lb, rho, g, x[3];
scs_float r = v[0], s = v[1], t = v[2];
/* v in cl(Kexp) */
if ((s * exp(r / s) - t <= CONE_THRESH && s > 0) ||
(r <= 0 && s == 0 && t >= 0)) {
return 0;
}
/* -v in Kexp^* */
if ((r > 0 && r * exp(s / r) + exp(1) * t <= CONE_THRESH) ||
(r == 0 && s <= 0 && t <= 0)) {
memset(v, 0, 3 * sizeof(scs_float));
return 0;
}
/* special case with analytical solution */
if (r < 0 && s < 0) {
v[1] = 0.0;
v[2] = MAX(v[2], 0);
return 0;
}
/* iterative procedure to find projection, bisects on dual variable: */
exp_get_rho_ub(v, x, &ub, &lb); /* get starting upper and lower bounds */
for (i = 0; i < EXP_CONE_MAX_ITERS; ++i) {
rho = (ub + lb) / 2; /* halfway between upper and lower bounds */
g = exp_calc_grad(v, x, rho, x[1]); /* calculates gradient wrt dual var */
if (g > 0) {
lb = rho;
} else {
ub = rho;
}
if (ub - lb < CONE_TOL) {
break;
}
}
#if VERBOSITY > 10
scs_printf("exponential cone proj iters %i\n", (int)i);
#endif
if (i == EXP_CONE_MAX_ITERS) {
scs_printf("warning: exp cone outer step hit maximum %i iters\n", (int)i);
scs_printf("r=%1.5e; s=%1.5e; t=%1.5e\n", r, s, t);
}
v[0] = x[0];
v[1] = x[1];
v[2] = x[2];
return 0;
}
static scs_int set_up_sd_cone_work_space(ScsConeWork *c, const ScsCone *k) {
scs_int i;
#ifdef USE_LAPACK
blas_int n_max = 0;
blas_int neg_one = -1;
blas_int info = 0;
scs_float wkopt = 0.0;
#if VERBOSITY > 0
#define _STR_EXPAND(tok) #tok
#define _STR(tok) _STR_EXPAND(tok)
scs_printf("BLAS(func) = '%s'\n", _STR(BLAS(func)));
#endif
/* eigenvector decomp workspace */
for (i = 0; i < k->ssize; ++i) {
if (k->s[i] > n_max) {
n_max = (blas_int)k->s[i];
}
}
c->Xs = (scs_float *)scs_calloc(n_max * n_max, sizeof(scs_float));
c->Z = (scs_float *)scs_calloc(n_max * n_max, sizeof(scs_float));
c->e = (scs_float *)scs_calloc(n_max, sizeof(scs_float));
/* workspace query */
BLAS(syev)
("Vectors", "Lower", &n_max, c->Xs, &n_max, SCS_NULL, &wkopt, &neg_one,
&info);
if (info != 0) {
scs_printf("FATAL: syev failure, info = %li\n", (long)info);
return -1;
}
c->lwork = (blas_int)(wkopt + 1); /* +1 for int casting safety */
c->work = (scs_float *)scs_calloc(c->lwork, sizeof(scs_float));
if (!c->Xs || !c->Z || !c->e || !c->work) {
return -1;
}
return 0;
#else
for (i = 0; i < k->ssize; i++) {
if (k->s[i] > 1) {
scs_printf(
"FATAL: Cannot solve SDPs without linked blas+lapack libraries\n");
scs_printf(
"Install blas+lapack and re-compile SCS with blas+lapack library "
"locations\n");
return -1;
}
}
return 0;
#endif
}
/* size of X is get_sd_cone_size(n) */
static scs_int proj_semi_definite_cone(scs_float *X, const scs_int n,
ScsConeWork *c) {
/* project onto the positive semi-definite cone */
#ifdef USE_LAPACK
scs_int i, first_idx;
blas_int nb = (blas_int)n;
blas_int ncols_z;
blas_int nb_plus_one = (blas_int)(n + 1);
blas_int one_int = 1;
scs_float zero = 0., one = 1.;
scs_float sqrt2 = SQRTF(2.0);
scs_float sqrt2_inv = 1.0 / sqrt2;
scs_float *Xs = c->Xs;
scs_float *Z = c->Z;
scs_float *e = c->e;
scs_float *work = c->work;
blas_int lwork = c->lwork;
blas_int info = 0;
scs_float sq_eig_pos;
#endif
if (n == 0) {
return 0;
}
if (n == 1) {
X[0] = MAX(X[0], 0.);
return 0;
}
#ifdef USE_LAPACK
/* copy lower triangular matrix into full matrix */
for (i = 0; i < n; ++i) {
memcpy(&(Xs[i * (n + 1)]), &(X[i * n - ((i - 1) * i) / 2]),
(n - i) * sizeof(scs_float));
}
/*
rescale so projection works, and matrix norm preserved
see http://www.seas.ucla.edu/~vandenbe/publications/mlbook.pdf pg 3
*/
/* scale diags by sqrt(2) */
BLAS(scal)(&nb, &sqrt2, Xs, &nb_plus_one); /* not n_squared */
/* Solve eigenproblem, reuse workspaces */
BLAS(syev)("Vectors", "Lower", &nb, Xs, &nb, e, work, &lwork, &info);
if (info != 0) {
scs_printf("WARN: LAPACK syev error, info = %i\n", (int)info);
if (info < 0) {
return info;
}
}
first_idx = -1;
/* e is eigvals in ascending order, find first entry > 0 */
for (i = 0; i < n; ++i) {
if (e[i] > 0) {
first_idx = i;
break;
}
}
if (first_idx == -1) {
/* there are no positive eigenvalues, set X to 0 and return */
memset(X, 0, sizeof(scs_float) * get_sd_cone_size(n));
return 0;
}
/* Z is matrix of eigenvectors with positive eigenvalues */
memcpy(Z, &Xs[first_idx * n], sizeof(scs_float) * n * (n - first_idx));
/* scale Z by sqrt(eig) */
for (i = first_idx; i < n; ++i) {
sq_eig_pos = SQRTF(e[i]);
BLAS(scal)(&nb, &sq_eig_pos, &Z[(i - first_idx) * n], &one_int);
}
/* Xs = Z Z' = V E V' */
ncols_z = (blas_int)(n - first_idx);
BLAS(syrk)("Lower", "NoTrans", &nb, &ncols_z, &one, Z, &nb, &zero, Xs, &nb);
/* undo rescaling: scale diags by 1/sqrt(2) */
BLAS(scal)(&nb, &sqrt2_inv, Xs, &nb_plus_one); /* not n_squared */
/* extract just lower triangular matrix */
for (i = 0; i < n; ++i) {
memcpy(&(X[i * n - ((i - 1) * i) / 2]), &(Xs[i * (n + 1)]),
(n - i) * sizeof(scs_float));
}
return 0;
#else
scs_printf("FAILURE: solving SDP but no blas/lapack libraries were found!\n");
scs_printf("SCS will return nonsense!\n");
SCS(scale_array)(X, NAN, n);
return -1;
#endif
}
static scs_float pow_calc_x(scs_float r, scs_float xh, scs_float rh,
scs_float a) {
scs_float x = 0.5 * (xh + SQRTF(xh * xh + 4 * a * (rh - r) * r));
return MAX(x, 1e-12);
}
static scs_float pow_calcdxdr(scs_float x, scs_float xh, scs_float rh,
scs_float r, scs_float a) {
return a * (rh - 2 * r) / (2 * x - xh);
}
static scs_float pow_calc_f(scs_float x, scs_float y, scs_float r,
scs_float a) {
return POWF(x, a) * POWF(y, (1 - a)) - r;
}
static scs_float pow_calc_fp(scs_float x, scs_float y, scs_float dxdr,
scs_float dydr, scs_float a) {
return POWF(x, a) * POWF(y, (1 - a)) * (a * dxdr / x + (1 - a) * dydr / y) -
1;
}
/*
* Routine to scale the limits of the box cone by the scaling diagonal mat D > 0
*
* want (t, s) \in K <==> (t', s') \in K'
*
* (t', s') = (d0 * t, D s) (overloading D to mean D[1:])
* (up to scalar scaling factor which we can ignore due to conic prooperty)
*
* K = { (t, s) | t * l <= s <= t * u, t >= 0 } =>
* { (t, s) | d0 * t * D l / d0 <= D s <= d0 * t D u / d0, t >= 0 } =>
* { (t', s') | t' * l' <= s' <= t' u', t >= 0 } = K'
* where l' = D l / d0, u' = D u / d0.
*/
static void normalize_box_cone(ScsCone *k, scs_float *D, scs_int bsize) {
scs_int j;
for (j = 0; j < bsize - 1; j++) {
if (k->bu[j] >= MAX_BOX_VAL) {
k->bu[j] = INFINITY;
} else {
k->bu[j] = D ? D[j + 1] * k->bu[j] / D[0] : k->bu[j];
}
if (k->bl[j] <= -MAX_BOX_VAL) {
k->bl[j] = -INFINITY;
} else {
k->bl[j] = D ? D[j + 1] * k->bl[j] / D[0] : k->bl[j];
}
}
}
/* Project onto { (t, s) | t * l <= s <= t * u, t >= 0 }, Newton's method on t
tx = [t; s], total length = bsize, under Euclidean metric 1/r_box.
*/
static scs_float proj_box_cone(scs_float *tx, const scs_float *bl,
const scs_float *bu, scs_int bsize,
scs_float t_warm_start, scs_float *r_box) {
scs_float *x, gt, ht, t_prev, t = t_warm_start;
scs_float rho_t = 1, *rho = SCS_NULL, r;
scs_int iter, j;
if (bsize == 1) { /* special case */
tx[0] = MAX(tx[0], 0.0);
return tx[0];
}
x = &(tx[1]);
if (r_box) {
rho_t = 1.0 / r_box[0];
rho = &(r_box[1]);
}
/* should only require about 5 or so iterations, 1 or 2 if warm-started */
for (iter = 0; iter < BOX_CONE_MAX_ITERS; iter++) {
t_prev = t;
gt = rho_t * (t - tx[0]); /* gradient */
ht = rho_t; /* hessian */
for (j = 0; j < bsize - 1; j++) {
r = rho ? 1.0 / rho[j] : 1.;
if (x[j] > t * bu[j]) {
gt += r * (t * bu[j] - x[j]) * bu[j]; /* gradient */
ht += r * bu[j] * bu[j]; /* hessian */
} else if (x[j] < t * bl[j]) {
gt += r * (t * bl[j] - x[j]) * bl[j]; /* gradient */
ht += r * bl[j] * bl[j]; /* hessian */
}
}
t = MAX(t - gt / MAX(ht, 1e-8), 0.); /* newton step */
#if VERBOSITY > 3
scs_printf("iter %i, t_new %1.3e, t_prev %1.3e, gt %1.3e, ht %1.3e\n", iter,
t, t_prev, gt, ht);
scs_printf("ABS(gt / (ht + 1e-6)) %.4e, ABS(t - t_prev) %.4e\n",
ABS(gt / (ht + 1e-6)), ABS(t - t_prev));
#endif
/* TODO: sometimes this check can fail (ie, declare convergence before it
* should) if ht is very large, which can happen with some pathological
* problems.
*/
if (ABS(gt / MAX(ht, 1e-6)) < 1e-12 * MAX(t, 1.) ||
ABS(t - t_prev) < 1e-11 * MAX(t, 1.)) {
break;
}
}
if (iter == BOX_CONE_MAX_ITERS) {
scs_printf("warning: box cone proj hit maximum %i iters\n", (int)iter);
}
for (j = 0; j < bsize - 1; j++) {
if (x[j] > t * bu[j]) {
x[j] = t * bu[j];
} else if (x[j] < t * bl[j]) {
x[j] = t * bl[j];
}
/* x[j] unchanged otherwise */
}
tx[0] = t;
#if VERBOSITY > 3
scs_printf("box cone iters %i\n", (int)iter + 1);
#endif
return t;
}
/* project onto SOC of size q*/
static void proj_soc(scs_float *x, scs_int q) {
if (q == 0) {
return;
}
if (q == 1) {
x[0] = MAX(x[0], 0.);
return;
}
scs_float v1 = x[0];
scs_float s = SCS(norm_2)(&(x[1]), q - 1);
scs_float alpha = (s + v1) / 2.0;
if (s <= v1) {
return;
} else if (s <= -v1) {
memset(&(x[0]), 0, q * sizeof(scs_float));
} else {
x[0] = alpha;
SCS(scale_array)(&(x[1]), alpha / s, q - 1);
}
}
static void proj_power_cone(scs_float *v, scs_float a) {
scs_float xh = v[0], yh = v[1], rh = ABS(v[2]);
scs_float x = 0.0, y = 0.0, r;
scs_int i;
/* v in K_a */
if (xh >= 0 && yh >= 0 &&
CONE_THRESH + POWF(xh, a) * POWF(yh, (1 - a)) >= rh) {
return;
}
/* -v in K_a^* */
if (xh <= 0 && yh <= 0 &&
CONE_THRESH + POWF(-xh, a) * POWF(-yh, 1 - a) >=
rh * POWF(a, a) * POWF(1 - a, 1 - a)) {
v[0] = v[1] = v[2] = 0;
return;
}
r = rh / 2;
for (i = 0; i < POW_CONE_MAX_ITERS; ++i) {
scs_float f, fp, dxdr, dydr;
x = pow_calc_x(r, xh, rh, a);
y = pow_calc_x(r, yh, rh, 1 - a);
f = pow_calc_f(x, y, r, a);
if (ABS(f) < CONE_TOL) {
break;
}
dxdr = pow_calcdxdr(x, xh, rh, r, a);
dydr = pow_calcdxdr(y, yh, rh, r, (1 - a));
fp = pow_calc_fp(x, y, dxdr, dydr, a);
r = MAX(r - f / fp, 0);
r = MIN(r, rh);
}
v[0] = x;
v[1] = y;
v[2] = (v[2] < 0) ? -(r) : (r);
}
/* project onto the primal K cone in the paper */
/* the r_y vector determines the INVERSE metric, ie, project under the
* diag(r_y)^-1 norm.
*/
static scs_int proj_cone(scs_float *x, const ScsCone *k, ScsConeWork *c,
scs_int normalize, scs_float *r_y) {
scs_int i, status;
scs_int count = 0;
scs_float *r_box = SCS_NULL;
if (k->z) { /* doesn't use r_y */
/* project onto primal zero / dual free cone */
memset(x, 0, k->z * sizeof(scs_float));
count += k->z;
}
if (k->l) { /* doesn't use r_y */
/* project onto positive orthant */
for (i = count; i < count + k->l; ++i) {
x[i] = MAX(x[i], 0.0);
}
count += k->l;
}
if (k->bsize) { /* DOES use r_y */
if (r_y) {
r_box = &(r_y[count]);
}
/* project onto box cone */
c->box_t_warm_start = proj_box_cone(&(x[count]), k->bl, k->bu, k->bsize,
c->box_t_warm_start, r_box);
count += k->bsize; /* since b = (t,s), len(s) = bsize - 1 */
}
if (k->qsize && k->q) { /* doesn't use r_y */
/* project onto second-order cones */
for (i = 0; i < k->qsize; ++i) {
proj_soc(&(x[count]), k->q[i]);
count += k->q[i];
}
}
if (k->ssize && k->s) { /* doesn't use r_y */
/* project onto PSD cones */
for (i = 0; i < k->ssize; ++i) {
status = proj_semi_definite_cone(&(x[count]), k->s[i], c);
if (status < 0) {
return status;
}
count += get_sd_cone_size(k->s[i]);
}
}
if (k->ep) { /* doesn't use r_y */
/*
* exponential cone is not self dual, if s \in K
* then y \in K^* and so if K is the primal cone
* here we project onto K^*, via Moreau
* \Pi_C^*(y) = y + \Pi_C(-y)
*/
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < k->ep; ++i) {
proj_exp_cone(&(x[count + 3 * i]));
}
count += 3 * k->ep;
}
/* dual exponential cone */
if (k->ed) { /* doesn't use r_y */
/*
* exponential cone is not self dual, if s \in K
* then y \in K^* and so if K is the primal cone
* here we project onto K^*, via Moreau
* \Pi_C^*(y) = y + \Pi_C(-y)
*/
scs_int idx;
scs_float r, s, t;
SCS(scale_array)(&(x[count]), -1, 3 * k->ed); /* x = -x; */
#ifdef _OPENMP
#pragma omp parallel for private(r, s, t, idx)
#endif
for (i = 0; i < k->ed; ++i) {
idx = count + 3 * i;
r = x[idx];
s = x[idx + 1];
t = x[idx + 2];
proj_exp_cone(&(x[idx]));
x[idx] -= r;
x[idx + 1] -= s;
x[idx + 2] -= t;
}
count += 3 * k->ed;
}
if (k->psize && k->p) { /* doesn't use r_y */
scs_float v[3];
scs_int idx;
/* don't use openmp for power cone
ifdef _OPENMP
pragma omp parallel for private(v, idx)
endif
*/
for (i = 0; i < k->psize; ++i) { /* doesn't use r_y */
idx = count + 3 * i;
if (k->p[i] >= 0) {
/* primal power cone */
proj_power_cone(&(x[idx]), k->p[i]);
} else {
/* dual power cone, using Moreau */
v[0] = -x[idx];
v[1] = -x[idx + 1];
v[2] = -x[idx + 2];
proj_power_cone(v, -k->p[i]);
x[idx] += v[0];
x[idx + 1] += v[1];
x[idx + 2] += v[2];
}
}
count += 3 * k->psize;
}
/* project onto OTHER cones */
return 0;
}
ScsConeWork *SCS(init_cone)(ScsCone *k, scs_int m) {
ScsConeWork *c = (ScsConeWork *)scs_calloc(1, sizeof(ScsConeWork));
c->k = k;
c->m = m;
c->scaled_cones = 0;
set_cone_boundaries(k, c);
c->s = (scs_float *)scs_calloc(m, sizeof(scs_float));
if (k->ssize && k->s) {
if (set_up_sd_cone_work_space(c, k) < 0) {
SCS(finish_cone)(c);
return SCS_NULL;
}
}
return c;
}
void scale_box_cone(ScsCone *k, ScsConeWork *c, ScsScaling *scal) {
if (k->bsize && k->bu && k->bl) {
c->box_t_warm_start = 1.;
if (scal) {
/* also does some sanitizing */
normalize_box_cone(k, &(scal->D[k->z + k->l]), k->bsize);
}
}
}
/* Outward facing cone projection routine, performs projection in-place.
If normalize > 0 then will use normalized (equilibrated) cones if applicable.
Moreau decomposition for R-norm projections:
`x + R^{-1} \Pi_{C^*}^{R^{-1}} ( - R x ) = \Pi_C^R ( x )`
where \Pi^R_C is the projection onto C under the R-norm:
`||x||_R = \sqrt{x ' R x}`.
*/
scs_int SCS(proj_dual_cone)(scs_float *x, ScsConeWork *c, ScsScaling *scal,
scs_float *r_y) {
scs_int status, i;
ScsCone *k = c->k;
if (!c->scaled_cones) {
scale_box_cone(k, c, scal);
c->scaled_cones = 1;
}
/* copy s = x */
memcpy(c->s, x, c->m * sizeof(scs_float));
/* x -> - Rx */
for (i = 0; i < c->m; ++i) {
x[i] *= r_y ? -r_y[i] : -1;
}
/* project -x onto cone, x -> \Pi_{C^*}^{R^{-1}}(-x) under r_y metric */
status = proj_cone(x, k, c, scal ? 1 : 0, r_y);
/* return x + R^{-1} \Pi_{C^*}^{R^{-1}} ( -x ) */
for (i = 0; i < c->m; ++i) {
if (r_y) {
x[i] = x[i] / r_y[i] + c->s[i];
} else {
x[i] += c->s[i];
}
}
return status;
}
|
rotation_operation.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
//
//
#if !defined(KRATOS_ROTATION_OPERATION_H_INCLUDED )
#define KRATOS_ROTATION_OPERATION_H_INCLUDED
// System includes
#include <string>
#include <iostream>
// External includes
// Project includes
#include "includes/define.h"
#include "processes/process.h"
#include "includes/node.h"
#include "includes/element.h"
#include "includes/model_part.h"
#include "includes/table.h"
#include "includes/mesh.h"
namespace Kratos
{
///@name Kratos Classes
///@{
/// The Rotation Operation.
/** This Operation is a derived class from the process.h
*
*/
class RotationOperation : public Process
{
public:
///@name Type Definitions
///@{
/// Pointer definition of Process
KRATOS_CLASS_POINTER_DEFINITION(RotationOperation);
///@}
///@name Life Cycle
///@{
/// Default constructor.
RotationOperation(ModelPart& model_part, DenseVector<int> group_ids,DenseVector<int> table_ids,unsigned int echo_level=0):
Process(),mr_model_part(model_part),mgroup_ids(group_ids),mtable_ids(table_ids)
{
mecho_level=echo_level;
}
/// Destructor.
~RotationOperation() override {}
///@}
///@name Operators
///@{
/// This operator is provided to call the process as a function and simply calls the Execute method.
void operator()()
{
Execute();
}
///@}
///@name Operations
///@{
/// Execute method is used to execute the Process algorithms.
/*
virtual void Execute()
{
}
/// this function is designed for being called at the beginning of the computations
/// right after reading the model and the groups
virtual void ExecuteInitialize()
{
}
/// this function is designed for being execute once before the solution loop but after all of the
/// solvers where built
virtual void ExecuteBeforeSolutionLoop()
{
}
*/
/// this function will be executed at every time step BEFORE performing the solve phase
void ExecuteInitializeSolutionStep() override
{
KRATOS_TRY
if ((mr_model_part.NumberOfTables())==0)
KRATOS_THROW_ERROR(std::logic_error, "Tables of the modelpart are empty", "");
if (mgroup_ids.size()==0)
KRATOS_THROW_ERROR(std::logic_error, "No groups to rotate", "");
if (mtable_ids.size()<6)
KRATOS_THROW_ERROR(std::logic_error, "Table's Vector too small!. Must be at least of size 6 for the 3 rotations + 3 reference(center) coordiantes", "");
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double time = CurrentProcessInfo[TIME];
//double delta_t = CurrentProcessInfo[DELTA_TIME];
//tables are used in the following way
//table(table_ids[0]) is the x displacement, table(table_ids[1]) is the y_displacement and table(table_ids[2]) is the z_displacement
//table0(time,displacement(time)
Table<double,double>& RotationTableA = mr_model_part.GetTable(mtable_ids[0]);
//KRATOS_WATCH(mtable_ids[0]);
Table<double,double>& RotationTableB = mr_model_part.GetTable(mtable_ids[1]);
//KRATOS_WATCH(mtable_ids[1]);
Table<double,double>& RotationTableC = mr_model_part.GetTable(mtable_ids[2]);
Table<double,double>& ReferenceTableX = mr_model_part.GetTable(mtable_ids[3]);
Table<double,double>& ReferenceTableY = mr_model_part.GetTable(mtable_ids[4]);
Table<double,double>& ReferenceTableZ = mr_model_part.GetTable(mtable_ids[5]);
//KRATOS_WATCH(mtable_ids[2]);
array_1d<double,3> rotation = ZeroVector(3); //the 3 angles of the rotation
rotation(0)=RotationTableA(time);
rotation(1)=RotationTableB(time);
rotation(2)=RotationTableC(time);
array_1d<double,3> reference_point = ZeroVector(3); //the reference center of coordinates for the rotation.
reference_point(0)=ReferenceTableX(time);
reference_point(1)=ReferenceTableY(time);
reference_point(2)=ReferenceTableZ(time);
BoundedMatrix<double, 3, 3 > rotation_matrix;
const double c1=cos(rotation(0));
const double c2=cos(rotation(1));
const double c3=cos(rotation(2));
const double s1=sin(rotation(0));
const double s2=sin(rotation(1));
const double s3=sin(rotation(2));
rotation_matrix(0,0)=c2*c3; rotation_matrix(0,1)=c1*s3+s1*s2*c3; rotation_matrix(0,2)=s1*s3-c1*s2*c3;
rotation_matrix(1,0)=-c2*s3; rotation_matrix(1,1)=c1*c3-s1*s2*s3; rotation_matrix(1,2)=s1*c3+c1*s2*s3;
rotation_matrix(2,0)=s2; rotation_matrix(2,1)=-s1*c2; rotation_matrix(2,2)=c1*c2;
for (unsigned int mesh_index=0;mesh_index<mgroup_ids.size();mesh_index++) //we loop around the desired groups
{
const int mesh_id=mgroup_ids[mesh_index];
ModelPart::MeshType& current_mesh = mr_model_part.GetMesh(mesh_id);
ModelPart::NodesContainerType::iterator inodebegin = current_mesh.NodesBegin();
//ModelPart::NodesContainerType::iterator inodeend = mgroup_container(mesh_id).NodesEnd();
#pragma omp parallel for
for(int ii=0; ii< static_cast<int>(current_mesh.Nodes().size()); ii++)
{
ModelPart::NodesContainerType::iterator pnode = inodebegin+ii;
//pnode->Coordinates()=pnode->X0()+translation(0);
const array_1d<double,3> relative_position = pnode->GetInitialPosition().Coordinates() - reference_point;
const array_1d<double,3> new_position = prod(rotation_matrix,relative_position) + reference_point ;
if (pnode->SolutionStepsDataHas(DISPLACEMENT_X)) //
pnode->FastGetSolutionStepValue(DISPLACEMENT) = new_position - pnode->GetInitialPosition().Coordinates();
pnode->Coordinates() = new_position;
}
}
// = (i->FastGetSolutionStepValue(PRESS_PROJ_NO_RO));
KRATOS_CATCH("")
}
/*
/// this function will be executed at every time step AFTER performing the solve phase
virtual void ExecuteFinalizeSolutionStep()
{
}
/// this function will be executed at every time step BEFORE writing the output
virtual void ExecuteBeforeOutputStep()
{
}
/// this function will be executed at every time step AFTER writing the output
virtual void ExecuteAfterOutputStep()
{
}
/// this function is designed for being called at the end of the computations
/// right after reading the model and the groups
virtual void ExecuteFinalize()
{
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
virtual std::string Info() const
{
return "Process";
}
/// Print information about this object.
virtual void PrintInfo(std::ostream& rOStream) const
{
rOStream << "Process";
}
/// Print object's data.
virtual void PrintData(std::ostream& rOStream) const
{
}
*/
///@}
///@name Friends
///@{
///@}
private:
///@name Static Member Variables
///@{
ModelPart& mr_model_part;
// ModelPart::MeshType& mgroup_container;
DenseVector<int> mgroup_ids;
DenseVector<int> mtable_ids;
unsigned int mecho_level;
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
TranslationOperation& operator=(Process const& rOther);
/// Copy constructor.
//Process(Process const& rOther);
///@}
}; // Class Process
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/*
/// input stream function
inline std::istream& operator >> (std::istream& rIStream,
Process& rThis);
/// output stream function
inline std::ostream& operator << (std::ostream& rOStream,
const Process& rThis)
{
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
* */
///@}
} // namespace Kratos.
#endif // KRATOS_ROTATION_OPERATION_H_INCLUDED defined
|
particle_levelset_utilities.h | /*
==============================================================================
KratosTestApplication
A library based on:
Kratos
A General Purpose Software for Multi-Physics Finite Element Analysis
Version 1.0 (Released on march 05, 2007).
Copyright 2010
Pooyan Dadvand, Riccardo Rossi
pooyan@cimne.upc.edu
rrossi@cimne.upc.edu
- CIMNE (International Center for Numerical Methods in Engineering),
Gran Capita' s/n, 08034 Barcelona, Spain
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following condition:
Distribution of this code for any commercial purpose is permissible
ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS.
The above copyright notice and this permission notice sKRATOS_WATCH(disp);hall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
==============================================================================
*/
//
// Project Name: Kratos
// Last Modified by: $Author: rrossi $
// Date: $Date: 2007-03-06 10:30:31 $
// Revision: $Revision: 1.2 $
//
//
#if !defined(KRATOS_PARTICLE_LEVELSET_UTILITIES_INCLUDED )
#define KRATOS_PARTICLE_LEVELSET_UTILITIES_INCLUDED
// System includes
#include <string>
#include <iostream>
#include <algorithm>
// External includes
#include "pybind11/pybind11.h"
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/deprecated_variables.h"
#include "includes/node.h"
#include "utilities/geometry_utilities.h"
#include "geometries/tetrahedra_3d_4.h"
#include "thermo_mechanical_application.h"
#include "spatial_containers/spatial_containers.h"
#include "utilities/timer.h"
#include "processes/node_erase_process.h"
#include "utilities/binbased_fast_point_locator.h"
#include "utilities/timer.h"
// #include <boost/random/linear_congruential.hpp>
// #include <boost/random/uniform_int.hpp>
// #include <boost/random/uniform_real.hpp>
// #include <boost/random/variate_generator.hpp>
// #include <boost/generator_iterator.hpp>
// #include <tr1/random>
#include <time.h>
#ifdef _OPENMP
#include "omp.h"
#endif
namespace Kratos
{
template<std::size_t TDim> class ParticleLevelSetUtils
{
public:
KRATOS_CLASS_POINTER_DEFINITION(ParticleLevelSetUtils<TDim>);
//**********************************************************************************************
//**********************************************************************************************
//function to seed a list of new nodes
void Seed(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, const double max_seed_distance, const double min_edge_size)
{
KRATOS_TRY;
rLagrangianModelPart.Nodes().clear();
unsigned int ele_id = 1;
for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin();
el_it != rEulerianModelPart.ElementsEnd(); el_it++)
{
el_it->SetId(ele_id);
ele_id++;
}
if(TDim==2){
BoundedMatrix<double, 16, 3 > pos;
BoundedMatrix<double, 16, 3 > N;
CreateParticles2D(rEulerianModelPart,rLagrangianModelPart,pos,N,max_seed_distance,min_edge_size);
}
else
{
// BoundedMatrix<double, 56, 3 > pos;
// BoundedMatrix<double, 56, 4 > N;
// CreateParticles3D(rEulerianModelPart,rLagrangianModelPart,pos,N,max_seed_distance,min_edge_size);
BoundedMatrix<double, 10, 3 > pos;
BoundedMatrix<double, 10, 4 > N;
FewCreateParticles3D(rEulerianModelPart,rLagrangianModelPart,pos,N,max_seed_distance,min_edge_size);
}
for (ModelPart::NodesContainerType::iterator node_it = rLagrangianModelPart.NodesBegin();
node_it != rLagrangianModelPart.NodesEnd(); node_it++)
{
node_it->FastGetSolutionStepValue(VELOCITY, 1) = node_it->FastGetSolutionStepValue(VELOCITY);
// node_it->FastGetSolutionStepValue(DISTANCE, 1) = node_it->FastGetSolutionStepValue(DISTANCE);
}
KRATOS_CATCH("");
}
//**********************************************************************************************
//**********************************************************************************************
void StreamlineMove(const double dt, ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, BinBasedFastPointLocator<TDim>& node_locator)
{
KRATOS_TRY
array_1d<double, 3 > veulerian;
Vector N;
const int max_results = 10000;
typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results);
const int nparticles = rLagrangianModelPart.Nodes().size();
//KRATOS_WATCH("551")
#pragma omp parallel for firstprivate(results,N,veulerian)
for (int i = 0; i < nparticles; i++)
{
unsigned int substep = 0;
unsigned int subdivisions = 1;
double small_dt = dt;
while(substep++ < subdivisions)
{
ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + i;
(iparticle)->Set(TO_ERASE, true);
Node < 3 > ::Pointer pparticle = *(iparticle.base());
typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin();
Element::Pointer pelement;
// KRATOS_WATCH("561")
bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results);
// KRATOS_WATCH("564")
if (is_found == true)
{
(pparticle)->GetValue(IS_VISITED) = 1;
Geometry< Node < 3 > >& geom = pelement->GetGeometry();
noalias(veulerian) = N[0] * geom[0].FastGetSolutionStepValue(VELOCITY);
for (unsigned int k = 1; k < geom.size(); k++)
noalias(veulerian) += N[k] * geom[k].FastGetSolutionStepValue(VELOCITY);
//compute adaptive subdivisions
if(substep == 1)
{
//compute h
double h = N[0] * geom[0].FastGetSolutionStepValue(NODAL_H);
for (unsigned int k = 1; k < geom.size(); k++)
h += N[k] * geom[k].FastGetSolutionStepValue(NODAL_H);
//compute number of subdivisions needed
const unsigned int min_subdivisions = 3;
const unsigned int max_subdivisions = 20;
double v = norm_2(veulerian);
subdivisions = double(floor(2*dt*v/h));
subdivisions = (subdivisions<min_subdivisions) ? min_subdivisions : (subdivisions>max_subdivisions) ? max_subdivisions : subdivisions;
//compute subdivisions time step
small_dt = dt / subdivisions;
// KRATOS_WATCH(subdivisions)
}
//move according to the streamline
array_1d<double, 3 > & disp = (iparticle)->FastGetSolutionStepValue(DISPLACEMENT);
noalias(disp) += small_dt*veulerian;
(pparticle)->Set(TO_ERASE, false);
// KRATOS_WATCH("585")
//update position
noalias(iparticle->Coordinates()) = iparticle->GetInitialPosition();
noalias(iparticle->Coordinates()) += iparticle->FastGetSolutionStepValue(DISPLACEMENT);
(iparticle)->GetValue(IS_VISITED) = 0;
//KRATOS_WATCH("619")
}
}
}
KRATOS_CATCH("")
}
//**********************************************************************************************
//**********************************************************************************************
void ParticleLevelSetCorrection(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, BinBasedFastPointLocator<TDim>& node_locator)
{
KRATOS_TRY
//Initilize NAGATIVE_DISTANCE & POSETIVE_DISTANCE
const int nnodes= rEulerianModelPart.Nodes().size();
#pragma omp parallel for
for (int jj = 0; jj < nnodes; jj++)
{
ModelPart::NodesContainerType::iterator node_itr = rEulerianModelPart.NodesBegin() + jj;
const double nd_dist = node_itr->FastGetSolutionStepValue(DISTANCE);
node_itr->SetValue(POSETIVE_DISTANCE,nd_dist );
node_itr->SetValue(NAGATIVE_DISTANCE,nd_dist );
}
//loop over particles
double particle_dist= 0.0;
Vector N;
const int max_results = 10000;
typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results);
const int nparticles = rLagrangianModelPart.Nodes().size();
#pragma omp parallel for firstprivate(results,N,particle_dist)
for (int i = 0; i < nparticles; i++)
{
ModelPart::NodesContainerType::iterator particle_itr = rLagrangianModelPart.NodesBegin() + i;
Node < 3 > ::Pointer p_pointer = *(particle_itr.base());
typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin();
Element::Pointer pelement;
bool is_found = node_locator.FindPointOnMesh(p_pointer->Coordinates(), N, pelement, result_begin, max_results);
if (is_found == true)
{
Geometry< Node < 3 > >& geom = pelement->GetGeometry();
//interpolate the particle distance
particle_dist = N[0] * geom[0].FastGetSolutionStepValue(DISTANCE);
for (unsigned int k = 1; k < geom.size(); k++)
particle_dist += N[k] * geom[k].FastGetSolutionStepValue(DISTANCE);
//check if correction is needed
const double p_sign = particle_itr->FastGetSolutionStepValue(IS_WATER);
const double p_radi = particle_itr->FastGetSolutionStepValue(PARTICLE_RADIUS);
if( particle_dist*p_sign < 0.0 && fabs(particle_dist) > p_radi)
{
double p_xx = particle_itr->X();
double p_yy = particle_itr->Y();
double p_zz = particle_itr->Z();
// const Variable<double> posetive_negative_dist_var;
/* if( p_sign == -1.0 )
posetive_negative_dist_var = NAGATIVE_DISTANCE;
else if( p_sign == 1.0 )
posetive_negative_dist_var = POSETIVE_DISTANCE; */
for (unsigned int kk = 1; kk < geom.size(); kk++){
p_xx -= geom[kk].X();
p_yy -= geom[kk].Y();
p_zz -= geom[kk].Z();
double dd = p_xx*p_xx + p_yy*p_yy + p_zz*p_zz;
dd = sqrt(dd);
double dist_to_particle = p_sign * (p_radi - dd);
//correction due to particle distance and sign
geom[kk].SetLock();
if( p_sign == 1.0){
double& pos_distance = geom[kk].GetValue(POSETIVE_DISTANCE);
if ( dist_to_particle > pos_distance)
pos_distance = dist_to_particle;}
else if( p_sign == -1.0){
double& neg_distance = geom[kk].GetValue(NAGATIVE_DISTANCE);
if ( dist_to_particle < neg_distance)
neg_distance = dist_to_particle; }
geom[kk].UnSetLock();
}
}
}
}//end of loop over particles
//final correction, choose between NAGATIVE_DISTANCE & POSETIVE_DISTANCE
// const int nnodes= rEulerianModelPart.Nodes().size();
#pragma omp parallel for
for (int jj = 0; jj < nnodes; jj++)
{
ModelPart::NodesContainerType::iterator node_itr = rEulerianModelPart.NodesBegin() + jj;
double posetive = node_itr->GetValue(POSETIVE_DISTANCE);
double negative = node_itr->GetValue(NAGATIVE_DISTANCE);
double & nd_dist = node_itr->FastGetSolutionStepValue(DISTANCE);
if ( posetive != negative){
if( fabs(posetive) < fabs(negative) )
nd_dist = posetive;
else
nd_dist = negative;
node_itr->SetValue(POSETIVE_DISTANCE,nd_dist );
node_itr->SetValue(NAGATIVE_DISTANCE,nd_dist );
}
}
KRATOS_CATCH("")
}
//**********************************************************************************************
//**********************************************************************************************
void ResetParticleRadius(const double min_edge_length, ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, BinBasedFastPointLocator<TDim>& node_locator)
{
KRATOS_TRY;
double particle_dist = 0.0;
Vector N;
const int max_results = 10000;
typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results);
const int nparticles = rLagrangianModelPart.Nodes().size();
#pragma omp parallel for firstprivate(results,N,particle_dist)
for (int i = 0; i < nparticles; i++)
{
ModelPart::NodesContainerType::iterator particle_itr = rLagrangianModelPart.NodesBegin() + i;
Node < 3 > ::Pointer p_pointer = *(particle_itr.base());
typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin();
Element::Pointer pelement;
bool is_found = node_locator.FindPointOnMesh(p_pointer->Coordinates(), N, pelement, result_begin, max_results);
if (is_found == true)
{
Geometry< Node < 3 > >& geom = pelement->GetGeometry();
//interpolate the particle distance
particle_dist = N[0] * geom[0].FastGetSolutionStepValue(DISTANCE);
for (unsigned int k = 1; k < geom.size(); k++)
particle_dist += N[k] * geom[k].FastGetSolutionStepValue(DISTANCE);
if( fabs(particle_dist) < 0.1*min_edge_length)
particle_itr->FastGetSolutionStepValue(PARTICLE_RADIUS) = 0.1*min_edge_length;
else if(fabs(particle_dist) > 0.5*min_edge_length)
particle_itr->FastGetSolutionStepValue(PARTICLE_RADIUS) = 0.5*min_edge_length;
else
particle_itr->FastGetSolutionStepValue(PARTICLE_RADIUS) = fabs(particle_dist);
}
}
KRATOS_CATCH("")
}
//**********************************************************************************************
//**********************************************************************************************
void ParticleReseeding(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, BinBasedFastPointLocator<TDim>& node_locator, const double max_seed_distance, const double min_edge_size)
{
KRATOS_TRY;
//generate a tree with the position of the lagrangian nodes
// typedef Node < 3 > PointType;
// typedef Node < 3 > ::Pointer PointTypePointer;
//unsigned int min_number_of_particles = 1;
for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin();
el_it != rEulerianModelPart.ElementsEnd(); el_it++)
{
el_it->SetValue(YOUNG_MODULUS,0.0);
}
for (ModelPart::NodesContainerType::iterator pparticle = rLagrangianModelPart.NodesBegin();
pparticle != rLagrangianModelPart.NodesEnd(); pparticle++)
{
pparticle->Set(TO_ERASE,false);
pparticle->SetValue(NL_ITERATION_NUMBER,(rEulerianModelPart.ElementsBegin())->Id());
pparticle->SetValue(IS_ESCAPED,false);
pparticle->SetValue(IS_VISITED,0);
}
//count particles that fall within an element
Vector N;
const int max_results = 10000;
typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results);
const int nparticles = rLagrangianModelPart.Nodes().size();
//count particles within an element
#pragma omp parallel for firstprivate(results,N)
for (int i = 0; i < nparticles; i++)
{
ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + i;
Node < 3 > ::Pointer pparticle = *(iparticle.base());
typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin();
Element::Pointer pelement;
bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results);
if (is_found == true)
{
const double particle_sign = iparticle->FastGetSolutionStepValue(IS_WATER);
Geometry< Node < 3 > >& geom = pelement->GetGeometry();
bool is_scaped = CheckIfEscaped(geom,N,particle_sign);
iparticle->SetValue(IS_ESCAPED,is_scaped);
if( CheckElemDist(geom,max_seed_distance) )// if it is inside the 3h band
{
double& counter = pelement->GetValue(YOUNG_MODULUS);
#pragma omp atomic
counter += 1.0;
iparticle->SetValue(NL_ITERATION_NUMBER , pelement->Id());
}
else
{
if( is_scaped == false) //delete if it is not an escaped particle
iparticle->Set(TO_ERASE,true);
}
}
}
//loop over close to the surface elements to ressed or delet particles
if(TDim==2){
ReseedOrDelete2D(rEulerianModelPart, rLagrangianModelPart, max_seed_distance, min_edge_size);
}
else
{
const int max_num_ptr = 16;//70;
const int num_ptr = 10;//56;
const int min_num_ptr = 6;//40;
MarkEraseExtraParticles3D(rEulerianModelPart, rLagrangianModelPart, max_seed_distance, min_edge_size, max_num_ptr, num_ptr);
ReseedPoorElements3D(rEulerianModelPart, rLagrangianModelPart, max_seed_distance, min_edge_size, min_num_ptr, num_ptr );
FewReseedPoorElements3D(rEulerianModelPart, rLagrangianModelPart, max_seed_distance, min_edge_size, min_num_ptr, num_ptr );
}
//perform the erase
NodeEraseProcess(rLagrangianModelPart).Execute();
KRATOS_CATCH("");
}
//**********************************************************************************************
//**********************************************************************************************
void VisualizationModelPart(ModelPart& rCompleteModelPart, ModelPart& rEulerianModelPart, ModelPart & rLagrangianModelPart)
{
KRATOS_TRY;
rCompleteModelPart.Elements() = rEulerianModelPart.Elements();
rCompleteModelPart.Nodes() = rEulerianModelPart.Nodes();
unsigned int id;
if(rEulerianModelPart.Nodes().size()!= 0)
id = (rEulerianModelPart.Nodes().end() - 1)->Id() + 1;
else
id = 1;
//preallocate the memory needed
int tot_nodes = rEulerianModelPart.Nodes().size() + rLagrangianModelPart.Nodes().size();
rCompleteModelPart.Nodes().reserve( tot_nodes );
//note that here we renumber the nodes
for (ModelPart::NodesContainerType::iterator node_it = rLagrangianModelPart.NodesBegin();
node_it != rLagrangianModelPart.NodesEnd(); node_it++)
{
rCompleteModelPart.AddNode(*(node_it.base()));
node_it->SetId(id++);
}
KRATOS_CATCH("");
}
//**********************************************************************************
//**********************************************************************************
void FindMaxMinEdgeSize(ModelPart& r_model_part, pybind11::list& maxmin)
{
KRATOS_TRY
double max_edge = 0.0;
double min_edge = 1000.0;
for(ModelPart::ElementsContainerType::iterator it=r_model_part.ElementsBegin(); it!=r_model_part.ElementsEnd(); it++)
{
Geometry<Node<3> >&geom = it->GetGeometry();
double loc_h_max = 0.0;
double loc_h_min = 1000.0;
for(unsigned int i=0; i<TDim+1; i++)
{
double xc = geom[i].X();
double yc = geom[i].Y();
double zc = geom[i].Z();
for(unsigned int j=i+1; j<TDim+1; j++)
{
double x = geom[j].X();
double y = geom[j].Y();
double z = geom[j].Z();
double l = (x - xc)*(x - xc);
l += (y - yc)*(y - yc);
l += (z - zc)*(z - zc);
if (l > loc_h_max) loc_h_max = l;
else if(l < loc_h_min) loc_h_min = l;
}
}
loc_h_max = sqrt(loc_h_max);
loc_h_min = sqrt(loc_h_min);
if(loc_h_max > max_edge ) max_edge = loc_h_max;
if(loc_h_min < min_edge ) min_edge = loc_h_min;
}
// r_model_part.GetCommunicator().MaxAll(h_max);
maxmin.append(max_edge);
maxmin.append(min_edge);
KRATOS_CATCH("");
}
private:
void CreateParticles3D(ModelPart& rEulerianModelPart,
ModelPart& rLagrangianModelPart,
BoundedMatrix<double, 56, 3 > pos,
BoundedMatrix<double, 56, 4 > N,
const double max_seed_distance,
const double min_edge_size)
{
unsigned int id = (rEulerianModelPart.Nodes().end() - 1)->Id() + 1;
for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin();
el_it != rEulerianModelPart.ElementsEnd(); el_it++)
{
Geometry<Node < 3 > >& geom = el_it->GetGeometry();
if(CheckElemDist(geom,max_seed_distance))
{
ComputeGaussPointPositions3D(geom, pos, N);
for (unsigned int i = 0; i < pos.size1(); i++)
{
int node_id = id++;
Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, pos(i, 0), pos(i, 1), pos(i, 2));
array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY);
noalias(vel) = ZeroVector(3);
// double & p_dist = pnode->FastGetSolutionStepValue(DISTANCE);
double p_distance = 0.0;
for (unsigned int j = 0; j < TDim + 1; j++){
noalias(vel) += N(i, j) * geom[j].FastGetSolutionStepValue(VELOCITY);
p_distance += N(i, j) * geom[j].FastGetSolutionStepValue(DISTANCE);
}
// Assign particle sign
if(p_distance < 0.0)
pnode->FastGetSolutionStepValue(IS_WATER)=-1.0;
else if(p_distance > 0.0)
pnode->FastGetSolutionStepValue(IS_WATER)= 1.0;
pnode->Fix(IS_WATER);
AssignParticleRadius(pnode,p_distance,min_edge_size);
}
}
}
}
void FewCreateParticles3D(ModelPart& rEulerianModelPart,
ModelPart& rLagrangianModelPart,
BoundedMatrix<double, 10, 3 > pos,
BoundedMatrix<double, 10, 4 > N,
const double max_seed_distance,
const double min_edge_size)
{
unsigned int id = (rEulerianModelPart.Nodes().end() - 1)->Id() + 1;
for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin();
el_it != rEulerianModelPart.ElementsEnd(); el_it++)
{
Geometry<Node < 3 > >& geom = el_it->GetGeometry();
if(CheckElemDist(geom,max_seed_distance))
{
FewComputeGaussPointPositions3D(geom, pos, N);
for (unsigned int i = 0; i < pos.size1(); i++)
{
int node_id = id++;
Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, pos(i, 0), pos(i, 1), pos(i, 2));
array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY);
noalias(vel) = ZeroVector(3);
// double & p_dist = pnode->FastGetSolutionStepValue(DISTANCE);
double p_distance = 0.0;
for (unsigned int j = 0; j < TDim + 1; j++){
noalias(vel) += N(i, j) * geom[j].FastGetSolutionStepValue(VELOCITY);
p_distance += N(i, j) * geom[j].FastGetSolutionStepValue(DISTANCE);
}
// Assign particle sign
if(p_distance < 0.0)
pnode->FastGetSolutionStepValue(IS_WATER)=-1.0;
else if(p_distance > 0.0)
pnode->FastGetSolutionStepValue(IS_WATER)= 1.0;
pnode->Fix(IS_WATER);
AssignParticleRadius(pnode,p_distance,min_edge_size);
}
}
}
}
void CreateParticles2D(ModelPart& rEulerianModelPart,
ModelPart& rLagrangianModelPart,
BoundedMatrix<double, 16, 3 > pos,
BoundedMatrix<double, 16, 3 > N,
const double max_seed_distance,
const double min_edge_size)
{
unsigned int id = (rEulerianModelPart.Nodes().end() - 1)->Id() + 1;
for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin();
el_it != rEulerianModelPart.ElementsEnd(); el_it++)
{
Geometry<Node < 3 > >& geom = el_it->GetGeometry();
if(CheckElemDist(geom,max_seed_distance))
{
ComputeGaussPointPositions2D(geom, pos, N);
for (unsigned int i = 0; i < pos.size1(); i++)
{
int node_id = id++;
Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, pos(i, 0), pos(i, 1), pos(i, 2));
array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY);
noalias(vel) = ZeroVector(3);
// double & p_dist = pnode->FastGetSolutionStepValue(DISTANCE);
double p_distance = 0.0;
for (unsigned int j = 0; j < TDim + 1; j++){
noalias(vel) += N(i, j) * geom[j].FastGetSolutionStepValue(VELOCITY);
p_distance += N(i, j) * geom[j].FastGetSolutionStepValue(DISTANCE);
}
// Assign particle sign
if(p_distance < 0.0)
pnode->FastGetSolutionStepValue(IS_WATER)=-1.0;
else if(p_distance > 0.0)
pnode->FastGetSolutionStepValue(IS_WATER)= 1.0;
pnode->Fix(IS_WATER);
AssignParticleRadius(pnode,p_distance,min_edge_size);
}
}
}
}
void ReseedOrDelete2D(ModelPart& rEulerianModelPart,
ModelPart& rLagrangianModelPart,
const double max_seed_distance,
const double min_edge_size)
{
int id;
if (rLagrangianModelPart.Nodes().size() != 0)
id = (rLagrangianModelPart.NodesEnd() - 1)->Id();
else
id = 1;
const int nelements = rEulerianModelPart.Elements().size();
const int nparticles = rLagrangianModelPart.Nodes().size();
BoundedMatrix<double, 16, 3 > coord;
BoundedMatrix<double, 16, 3 > NN;
// #pragma omp parallel for firstprivate(NN,coord)
for (int ne = 0; ne < nelements; ne++)
{
ModelPart::ElementsContainerType::iterator ielem = rEulerianModelPart.ElementsBegin() + ne;
Geometry<Node < 3 > >& geom = ielem->GetGeometry();
int n_ptr = int(ielem->GetValue(YOUNG_MODULUS));
if( n_ptr < 12 && CheckElemDist(geom,max_seed_distance) )//ressed in close to surface and poor element
{
//compute cooordinates
//RandomPariclePosition(geom, coord, NN);
ComputeGaussPointPositions2D(geom, coord, NN);
int aux_n_ptr = n_ptr;
int cnt = 0;
while( aux_n_ptr<16 ){
aux_n_ptr++;
//COORDINATES
int node_id = id++;
Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, coord(cnt,0), coord(cnt,1), coord(cnt,2));
array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY);
noalias(vel) = ZeroVector(3);
// double & p_dist = pnode->FastGetSolutionStepValue(DISTANCE);
double p_distance = 0.0;
for (unsigned int j = 0; j < TDim + 1; j++){
noalias(vel) += NN(cnt,j) * geom[j].FastGetSolutionStepValue(VELOCITY);
p_distance += NN(cnt,j) * geom[j].FastGetSolutionStepValue(DISTANCE);
}
// Assign particle sign
if(p_distance < 0.0)
pnode->FastGetSolutionStepValue(IS_WATER)=-1.0;
else if(p_distance > 0.0)
pnode->FastGetSolutionStepValue(IS_WATER)= 1.0;
pnode->Fix(IS_WATER);
AssignParticleRadius(pnode,p_distance,min_edge_size);
cnt++;
}
}
else if( n_ptr > 20 && CheckElemDist(geom,max_seed_distance) ){
const int ele_id = ielem->Id();
ModelPart::NodesContainerType element_particles;
element_particles.reserve(64);
//save particle list
for (int kk = 0; kk < nparticles; kk++)
{
ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + kk;
const int ptr_nest = iparticle->GetValue(NL_ITERATION_NUMBER);
if( ptr_nest==ele_id )
{
iparticle->SetValue(SCALE, 0);
element_particles.push_back( *(iparticle.base()) );
}
}
//loop to order based on the radius
ModelPart::NodesContainerType::iterator ptr_begin = element_particles.begin();
unsigned int ptr_elem_size = element_particles.size();
for(unsigned int ii=0; ii < ptr_elem_size; ii++)
for(unsigned int jj=ii+1; jj < ptr_elem_size; jj++)
{
double ii_radi = (ptr_begin + ii)->FastGetSolutionStepValue(PARTICLE_RADIUS);
double jj_radi = (ptr_begin + jj)->FastGetSolutionStepValue(PARTICLE_RADIUS);
(ii_radi>=jj_radi) ? (ptr_begin + ii)->GetValue(SCALE)+=1 : (ptr_begin + jj)->GetValue(SCALE)+=1;
}
//delete extra nodes
int aux_ptr_elem_size = int(ptr_elem_size);
while(aux_ptr_elem_size>16)
{
for(unsigned int ii=0; ii < ptr_elem_size; ii++){
bool swt = false;
for( int kkk = ptr_elem_size; kkk>0; kkk-- )
if( (ptr_begin + ii)->GetValue(SCALE) == kkk && (ptr_begin + ii)->GetValue(IS_VISITED) == 0){
bool is_escaped = (ptr_begin + ii)->GetValue(IS_ESCAPED);
if( is_escaped==false )
(ptr_begin + ii)->Set(TO_ERASE,true);//CHECK ESCASPED NODES
(ptr_begin + ii)->SetValue(IS_VISITED,1);
swt = true;
break;
}
if(swt )
break;
}
aux_ptr_elem_size -= 1;
}
}
}
}
void MarkEraseExtraParticles3D(ModelPart& rEulerianModelPart,
ModelPart& rLagrangianModelPart,
const double max_seed_distance,
const double min_edge_size,
const int max_num_particle,
const int num_particle)
{
// int id;
// if (rLagrangianModelPart.Nodes().size() != 0)
// id = (rLagrangianModelPart.NodesEnd() - 1)->Id();
// else
// id = 1;
const int nelements = rEulerianModelPart.Elements().size();
const int nparticles = rLagrangianModelPart.Nodes().size();
std::vector< GlobalPointersVector< Node< 3> > > particle_of_element(nelements);
// particle_of_element.reserve(nelements);
std::vector< unsigned int > num_ptr_in_elem(nelements,0);
// num_ptr_in_elem.reserve(nelements);
//loop on elements to resrve the size of particle in element list
#pragma omp parallel for firstprivate(num_ptr_in_elem)
for (int ne = 0; ne < nelements; ne++)
{
ModelPart::ElementsContainerType::iterator ielem = rEulerianModelPart.ElementsBegin() + ne;
int n_ptr = int(ielem->GetValue(YOUNG_MODULUS));
unsigned int ele_id = ielem->Id();
num_ptr_in_elem[ele_id-1] = n_ptr;
if(n_ptr > max_num_particle)
particle_of_element[ele_id-1].reserve(n_ptr);
}
//loop on particles to push_back particle related to full elements
for (int kk = 0; kk < nparticles; kk++)
{
ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + kk;
const int ptr_nest = iparticle->GetValue(NL_ITERATION_NUMBER);
if( num_ptr_in_elem[ptr_nest-1] > static_cast<unsigned int>(max_num_particle) )
particle_of_element[ptr_nest-1].push_back( *(iparticle.base()) );
}
//loop over elements to reoreder the particle radius in over populated elements
#pragma omp parallel for firstprivate(particle_of_element)
for( int ii = 0; ii< static_cast<int>(particle_of_element.size()); ++ii)
{
if(particle_of_element[ii].size() > static_cast<unsigned int>(max_num_particle))
{
//sort
std::sort(particle_of_element[ii].ptr_begin(), particle_of_element[ii].ptr_end(), RadiusCompare() );
//delete extra nodes
GlobalPointersVector< Node< 3> >::iterator ele_pt_ptr = particle_of_element[ii].begin();
const unsigned int this_ele_ptr = particle_of_element[ii].size();
int aux_ptr_elem_size = this_ele_ptr;
for( unsigned int ij = 0; (ij < this_ele_ptr && aux_ptr_elem_size > num_particle); ++ij)
{
bool is_escaped = (ele_pt_ptr + ij)->GetValue(IS_ESCAPED);
if( is_escaped==false ){
(ele_pt_ptr + ij)->Set(TO_ERASE,true);
aux_ptr_elem_size--;
}
}
}
}
}
struct RadiusCompare{
template<class TRefrenceType>
bool operator()(const TRefrenceType ptr_a, const TRefrenceType ptr_b)
{
double a_radi = ptr_a.get()->FastGetSolutionStepValue(PARTICLE_RADIUS);
double b_radi = ptr_b.get()->FastGetSolutionStepValue(PARTICLE_RADIUS);
return (a_radi > b_radi);
}
};
void ReseedPoorElements3D(ModelPart& rEulerianModelPart,
ModelPart& rLagrangianModelPart,
const double max_seed_distance,
const double min_edge_size,
const int min_num_particle,
const int num_particle)
{
int id;
if (rLagrangianModelPart.Nodes().size() != 0)
id = (rLagrangianModelPart.NodesEnd() - 1)->Id();
else
id = 1;
const int nelements = rEulerianModelPart.Elements().size();
// const int nparticles = rLagrangianModelPart.Nodes().size();
BoundedMatrix<double, 56, 3 > coord;
BoundedMatrix<double, 56, 4 > NN;
// #pragma omp parallel for firstprivate(NN,coord)
for (int ne = 0; ne < nelements; ne++)
{
ModelPart::ElementsContainerType::iterator ielem = rEulerianModelPart.ElementsBegin() + ne;
Geometry<Node < 3 > >& geom = ielem->GetGeometry();
int n_ptr = int(ielem->GetValue(YOUNG_MODULUS));
if( n_ptr < min_num_particle && CheckElemDist(geom,max_seed_distance) )//ressed in close to surface and poor element
{
//compute cooordinates
//RandomPariclePosition(geom, coord, NN);
ComputeGaussPointPositions3D(geom, coord, NN);
int aux_n_ptr = n_ptr;
int cnt = 0;
while( aux_n_ptr < num_particle ){
aux_n_ptr++;
//COORDINATES
int node_id = id++;
Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, coord(cnt,0), coord(cnt,1), coord(cnt,2));
array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY);
noalias(vel) = ZeroVector(3);
// double & p_dist = pnode->FastGetSolutionStepValue(DISTANCE);
double p_distance = 0.0;
for (unsigned int j = 0; j < TDim + 1; j++){
noalias(vel) += NN(cnt,j) * geom[j].FastGetSolutionStepValue(VELOCITY);
p_distance += NN(cnt,j) * geom[j].FastGetSolutionStepValue(DISTANCE);
}
// Assign particle sign
if(p_distance < 0.0)
pnode->FastGetSolutionStepValue(IS_WATER)=-1.0;
else if(p_distance > 0.0)
pnode->FastGetSolutionStepValue(IS_WATER)= 1.0;
pnode->Fix(IS_WATER);
AssignParticleRadius(pnode,p_distance,min_edge_size);
cnt++;
}
}
}
}
void FewReseedPoorElements3D(ModelPart& rEulerianModelPart,
ModelPart& rLagrangianModelPart,
const double max_seed_distance,
const double min_edge_size,
const int min_num_particle,
const int num_particle)
{
int id;
if (rLagrangianModelPart.Nodes().size() != 0)
id = (rLagrangianModelPart.NodesEnd() - 1)->Id();
else
id = 1;
const int nelements = rEulerianModelPart.Elements().size();
// const int nparticles = rLagrangianModelPart.Nodes().size();
BoundedMatrix<double, 10, 3 > coord;
BoundedMatrix<double, 10, 4 > NN;
// #pragma omp parallel for firstprivate(NN,coord)
for (int ne = 0; ne < nelements; ne++)
{
ModelPart::ElementsContainerType::iterator ielem = rEulerianModelPart.ElementsBegin() + ne;
Geometry<Node < 3 > >& geom = ielem->GetGeometry();
int n_ptr = int(ielem->GetValue(YOUNG_MODULUS));
if( n_ptr < min_num_particle && CheckElemDist(geom,max_seed_distance) )//ressed in close to surface and poor element
{
//compute cooordinates
//RandomPariclePosition(geom, coord, NN);
FewComputeGaussPointPositions3D(geom, coord, NN);
int aux_n_ptr = n_ptr;
int cnt = 0;
while( aux_n_ptr < num_particle ){
aux_n_ptr++;
//COORDINATES
int node_id = id++;
Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, coord(cnt,0), coord(cnt,1), coord(cnt,2));
array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY);
noalias(vel) = ZeroVector(3);
// double & p_dist = pnode->FastGetSolutionStepValue(DISTANCE);
double p_distance = 0.0;
for (unsigned int j = 0; j < TDim + 1; j++){
noalias(vel) += NN(cnt,j) * geom[j].FastGetSolutionStepValue(VELOCITY);
p_distance += NN(cnt,j) * geom[j].FastGetSolutionStepValue(DISTANCE);
}
// Assign particle sign
if(p_distance < 0.0)
pnode->FastGetSolutionStepValue(IS_WATER)=-1.0;
else if(p_distance > 0.0)
pnode->FastGetSolutionStepValue(IS_WATER)= 1.0;
pnode->Fix(IS_WATER);
AssignParticleRadius(pnode,p_distance,min_edge_size);
cnt++;
}
}
}
}
// void ReseedOrDelete3D(ModelPart& rEulerianModelPart,
// ModelPart& rLagrangianModelPart,
// const double max_seed_distance,
// const double min_edge_size)
// {
// int id;
// if (rLagrangianModelPart.Nodes().size() != 0)
// id = (rLagrangianModelPart.NodesEnd() - 1)->Id();
// else
// id = 1;
// const int nelements = rEulerianModelPart.Elements().size();
// const int nparticles = rLagrangianModelPart.Nodes().size();
//
//
//
// BoundedMatrix<double, 56, 3 > coord;
// BoundedMatrix<double, 56, 4 > NN;
// // #pragma omp parallel for firstprivate(NN,coord)
// for (int ne = 0; ne < nelements; ne++)
// {
// ModelPart::ElementsContainerType::iterator ielem = rEulerianModelPart.ElementsBegin() + ne;
// Geometry<Node < 3 > >& geom = ielem->GetGeometry();
// int n_ptr = int(ielem->GetValue(YOUNG_MODULUS));
//
// if( n_ptr < 42 && CheckElemDist(geom,max_seed_distance) )//ressed in close to surface and poor element
// {
// //compute cooordinates
// //RandomPariclePosition(geom, coord, NN);
// ComputeGaussPointPositions3D(geom, coord, NN);
// int aux_n_ptr = n_ptr;
// int cnt = 0;
// while( aux_n_ptr<56 ){
// aux_n_ptr++;
// //COORDINATES
// int node_id = id++;
// Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, coord(cnt,0), coord(cnt,1), coord(cnt,2));
//
// array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY);
// noalias(vel) = ZeroVector(3);
//
// // double & p_dist = pnode->FastGetSolutionStepValue(DISTANCE);
// double p_distance = 0.0;
// for (unsigned int j = 0; j < TDim + 1; j++){
// noalias(vel) += NN(cnt,j) * geom[j].FastGetSolutionStepValue(VELOCITY);
// p_distance += NN(cnt,j) * geom[j].FastGetSolutionStepValue(DISTANCE);
// }
//
// // Assign particle sign
// if(p_distance < 0.0)
// pnode->FastGetSolutionStepValue(IS_WATER)=-1.0;
// else if(p_distance > 0.0)
// pnode->FastGetSolutionStepValue(IS_WATER)= 1.0;
//
// pnode->Fix(IS_WATER);
//
// AssignParticleRadius(pnode,p_distance,min_edge_size);
//
// cnt++;
// }
// }
// else if( n_ptr > 70 && CheckElemDist(geom,max_seed_distance) ){
// const int ele_id = ielem->Id();
// ModelPart::NodesContainerType element_particles;
// element_particles.reserve(64);
// //save particle list
// for (int kk = 0; kk < nparticles; kk++)
// {
// ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + kk;
//
// const int ptr_nest = iparticle->GetValue(NL_ITERATION_NUMBER);
// if( ptr_nest==ele_id )
// {
// iparticle->SetValue(SCALE, 0);
// element_particles.push_back( *(iparticle.base()) );
// }
// }
//
// //loop to order based on the radius
// ModelPart::NodesContainerType::iterator ptr_begin = element_particles.begin();
// unsigned int ptr_elem_size = element_particles.size();
//
// for(unsigned int ii=0; ii < ptr_elem_size; ii++)
// for(unsigned int jj=ii+1; jj < ptr_elem_size; jj++)
// {
// double ii_radi = (ptr_begin + ii)->FastGetSolutionStepValue(PARTICLE_RADIUS);
// double jj_radi = (ptr_begin + jj)->FastGetSolutionStepValue(PARTICLE_RADIUS);
//
// (ii_radi>=jj_radi) ? (ptr_begin + ii)->GetValue(SCALE)+=1 : (ptr_begin + jj)->GetValue(SCALE)+=1;
//
// }
// //delete extra nodes
// int aux_ptr_elem_size = int(ptr_elem_size);
// while(aux_ptr_elem_size>56)
// {
// for(unsigned int ii=0; ii < ptr_elem_size; ii++){
// bool swt = false;
// for( int kkk = ptr_elem_size; kkk>0; kkk-- )
// if( (ptr_begin + ii)->GetValue(SCALE) == kkk && (ptr_begin + ii)->GetValue(IS_VISITED) == 0){
// bool is_escaped = (ptr_begin + ii)->GetValue(IS_ESCAPED);
// if( is_escaped==false )
// (ptr_begin + ii)->Set(TO_ERASE,true);//CHECK ESCASPED NODES
// (ptr_begin + ii)->SetValue(IS_VISITED,1);
// swt = true;
// break;
// }
// if(swt )
// break;
// }
// aux_ptr_elem_size -= 1;
// }
// }
//
//
// }
//
// }
void ComputeGaussPointPositions2D(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 16, 3 > & pos, BoundedMatrix<double, 16, 3 > & N)
{
//lower diagonal terms
double ypos = 1.0 / 5.0;
int pos_counter = 0;
for (unsigned int i = 0; i < 4; i++)
{
double xpos = 1.0 / 8.0;
for (unsigned int j = 0; j < (7-2*i); j++)
{
double N1 = xpos;
double N2 = ypos;
double N3 = 1.0 - xpos - ypos;
pos(pos_counter, 0) = N1 * geom[0].X() + N2 * geom[1].X() + N3 * geom[2].X();
pos(pos_counter, 1) = N1 * geom[0].Y() + N2 * geom[1].Y() + N3 * geom[2].Y();
pos(pos_counter, 2) = N1 * geom[0].Z() + N2 * geom[1].Z() + N3 * geom[2].Z();
N(pos_counter, 0) = N1;
N(pos_counter, 1) = N2;
N(pos_counter, 2) = N3;
xpos += 1.0 / 8.0;
pos_counter += 1;
}
ypos += 1.0 / 5.0;
}
}
void ComputeGaussPointPositions3D(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 56, 3 > & pos, BoundedMatrix<double, 56, 4 > & N)
{
int pos_counter = 0;
const double one_seventh = 1.0/6.5;
double close_point = 1.0/20;
double zpos = close_point;
for (unsigned int kk = 0; kk < 6; kk++)
{
// double y_div = 1.0/(7 - kk);
double ypos = close_point;//one_seventh;// y_div * (1.0 - zpos);//one_seventh
for (unsigned int i = 0; i < (6-kk); i++)
{
// double x_div = 1.0/(7 - kk);// -i
double xpos = close_point;//one_seventh;//x_div* (1.0 - ypos) * (1.0 - zpos);//one_seventh
for (unsigned int j = 0; j < (6-kk-i); j++)
{
double N1 = xpos;
double N2 = ypos;
double N3 = zpos;
double N4 = 1.0 - xpos - ypos - zpos;
pos(pos_counter, 0) = N1 * geom[0].X() + N2 * geom[1].X() + N3 * geom[2].X() + N4 * geom[3].X();
pos(pos_counter, 1) = N1 * geom[0].Y() + N2 * geom[1].Y() + N3 * geom[2].Y() + N4 * geom[3].Y();
pos(pos_counter, 2) = N1 * geom[0].Z() + N2 * geom[1].Z() + N3 * geom[2].Z() + N4 * geom[3].Z();
N(pos_counter, 0) = N1;
N(pos_counter, 1) = N2;
N(pos_counter, 2) = N3;
N(pos_counter, 3) = N4;
xpos += one_seventh;//x_div * (1.0 - ypos) * (1.0 - zpos); //one_seventh
pos_counter += 1;
}
ypos += one_seventh;//y_div * (1.0 - zpos);//one_seventh
}
zpos += one_seventh;
}
}
void FewComputeGaussPointPositions3D(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 10, 3 > & pos, BoundedMatrix<double, 10, 4 > & N)
{
int pos_counter = 0;
const double one_third = 1.0/2.5;
double close_point = 1.0/20;
double zpos = close_point;
for (unsigned int kk = 0; kk < 3; kk++)
{
// double y_div = 1.0/(7 - kk);
double ypos = close_point;//one_seventh;// y_div * (1.0 - zpos);//one_seventh
for (unsigned int i = 0; i < (3-kk); i++)
{
// double x_div = 1.0/(7 - kk);// -i
double xpos = close_point;//one_seventh;//x_div* (1.0 - ypos) * (1.0 - zpos);//one_seventh
for (unsigned int j = 0; j < (3-kk-i); j++)
{
double N1 = xpos;
double N2 = ypos;
double N3 = zpos;
double N4 = 1.0 - xpos - ypos - zpos;
pos(pos_counter, 0) = N1 * geom[0].X() + N2 * geom[1].X() + N3 * geom[2].X() + N4 * geom[3].X();
pos(pos_counter, 1) = N1 * geom[0].Y() + N2 * geom[1].Y() + N3 * geom[2].Y() + N4 * geom[3].Y();
pos(pos_counter, 2) = N1 * geom[0].Z() + N2 * geom[1].Z() + N3 * geom[2].Z() + N4 * geom[3].Z();
N(pos_counter, 0) = N1;
N(pos_counter, 1) = N2;
N(pos_counter, 2) = N3;
N(pos_counter, 3) = N4;
xpos += one_third;//x_div * (1.0 - ypos) * (1.0 - zpos); //one_seventh
pos_counter += 1;
}
ypos += one_third;//y_div * (1.0 - zpos);//one_seventh
}
zpos += one_third;
}
}
void RandomPariclePosition(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 16, 3 > & coord, BoundedMatrix<double, 16, 3 > & N_shape)
{
for(int ii=0;ii<16;ii++){
double xi = rand()* ( 1.0 / ( RAND_MAX + 1.0 ) );
double etta = (1.0 - xi) * ( rand()* ( 1.0 / ( RAND_MAX + 1.0 ) ) );
double zetta = 1.0 - (xi + etta);
coord(ii,0) = xi * geom[0].X() + etta * geom[1].X() + zetta * geom[2].X();
coord(ii,1) = xi * geom[0].Y() + etta * geom[1].Y() + zetta * geom[2].Y();
coord(ii,2) = xi * geom[0].Z() + etta * geom[1].Z() + zetta * geom[2].Z();
N_shape(ii,0) = xi;
N_shape(ii,1) = etta;
N_shape(ii,1) = zetta;
}
}
static int CheckElemDist(Geometry< Node < 3 > >& geom, const double max_dist)
{
for(unsigned int ii=0; ii < geom.size(); ++ii)
{
double nd_dist = geom[ii].FastGetSolutionStepValue(DISTANCE);
if (fabs(nd_dist) < max_dist)
return 1;
}
return 0;
}
bool CheckIfEscaped(Geometry< Node < 3 > >& geom, const array_1d<double, 3 > & N_shape,const double particle_sign)
{
double dist = N_shape[0]*geom[0].FastGetSolutionStepValue(DISTANCE);
for(unsigned int ii=1; ii < geom.size(); ++ii)
dist += N_shape[ii]*geom[ii].FastGetSolutionStepValue(DISTANCE);
if( dist*particle_sign < 0.0)
return true;
else
return false;
}
void AssignParticleRadius(Node < 3 > ::Pointer nd_ptr, double& p_dist,const double min_edge_size)
{
if( fabs(p_dist) < 0.1*min_edge_size)
nd_ptr->FastGetSolutionStepValue(PARTICLE_RADIUS) = 0.1*min_edge_size;
else if(fabs(p_dist) > 0.5*min_edge_size)
nd_ptr->FastGetSolutionStepValue(PARTICLE_RADIUS) = 0.5*min_edge_size;
else
nd_ptr->FastGetSolutionStepValue(PARTICLE_RADIUS) = fabs(p_dist);
}
// unsigned int time_seed()
// {
// time_t now = time ( 0 );
// unsigned char *p = (unsigned char *)&now;
// unsigned int seed = 0;
// size_t i;
//
// for ( i = 0; i < sizeof now; i++ )
// seed = seed * ( UCHAR_MAX + 2U ) + p[i];
//
// return seed;
// }
};
}
#endif // KRATOS_LAGRANGIAN_PARTICLES_UTILITIES_INCLUDED defined
|
dftcommon.c | // Copyright Naoki Shibata and contributors 2010 - 2020.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <ctype.h>
#include <inttypes.h>
#include <assert.h>
#if defined(POWER64_UNDEF_USE_EXTERN_INLINES)
// This is a workaround required to cross compile for PPC64 binaries
#include <features.h>
#ifdef __USE_EXTERN_INLINES
#undef __USE_EXTERN_INLINES
#endif
#endif
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "misc.h"
#include "sleef.h"
#define IMPORT_IS_EXPORT
#include "sleefdft.h"
#include "dispatchparam.h"
#include "dftcommon.h"
#include "common.h"
#include "arraymap.h"
#define MAGIC_FLOAT 0x31415926
#define MAGIC_DOUBLE 0x27182818
#define MAGIC_LONGDOUBLE 0x14142135
#define MAGIC_QUAD 0x33166247
#define MAGIC2D_FLOAT 0x22360679
#define MAGIC2D_DOUBLE 0x17320508
#define MAGIC2D_LONGDOUBLE 0x26457513
#define MAGIC2D_QUAD 0x36055512
const char *configStr[] = { "ST", "ST stream", "MT", "MT stream" };
static int parsePathStr(char *p, int *path, int *config, int pathLenMax, int log2len) {
int pathLen = 0, l2l = 0;
for(;;) {
while(*p == ' ') p++;
if (*p == '\0') break;
if (!isdigit(*p)) return -1;
pathLen++;
if (pathLen >= pathLenMax) return -2;
int n = 0;
while(isdigit(*p)) n = n * 10 + *p++ - '0';
if (n > MAXBUTWIDTH) return -6;
path[pathLen-1] = n;
l2l += n;
config[pathLen-1] = 0;
if (*p != '(') continue;
int c;
for(c=3;c>=0;c--) if (strncmp(p+1, configStr[c], strlen(configStr[c])) == 0) break;
if (c == -1) return -3;
p += strlen(configStr[c]) + 1;
if (*p != ')') return -4;
p++;
config[pathLen-1] = c;
}
if (l2l != log2len) return -5;
return pathLen;
}
EXPORT void SleefDFT_setPath(SleefDFT *p, char *pathStr) {
assert(p != NULL && (p->magic == MAGIC_FLOAT || p->magic == MAGIC_DOUBLE || p->magic == MAGIC_LONGDOUBLE || p->magic == MAGIC_QUAD));
int path[32], config[32];
int pathLen = parsePathStr(pathStr, path, config, 31, p->log2len);
if (pathLen < 0) {
if ((p->mode & SLEEF_MODE_VERBOSE) != 0) printf("Error %d in parsing path string : %s\n", pathLen, pathStr);
return;
}
for(uint32_t j = 0;j <= p->log2len;j++) p->bestPath[j] = 0;
for(int level = p->log2len, j=0;level > 0 && j < pathLen;) {
p->bestPath[level] = path[j];
p->bestPathConfig[level] = config[j];
level -= path[j];
j++;
}
p->pathLen = 0;
for(int j = p->log2len;j >= 0;j--) if (p->bestPath[j] != 0) p->pathLen++;
if ((p->mode & SLEEF_MODE_VERBOSE) != 0) {
printf("Set path : ");
for(int j = p->log2len;j >= 0;j--) if (p->bestPath[j] != 0) printf("%d(%s) ", p->bestPath[j], configStr[p->bestPathConfig[j]]);
printf("\n");
}
}
void freeTables(SleefDFT *p) {
for(int N=1;N<=MAXBUTWIDTH;N++) {
for(uint32_t level=N;level<=p->log2len;level++) {
Sleef_free(p->tbl[N][level]);
}
free(p->tbl[N]);
p->tbl[N] = NULL;
}
}
EXPORT void SleefDFT_dispose(SleefDFT *p) {
if (p != NULL && (p->magic == MAGIC2D_FLOAT || p->magic == MAGIC2D_DOUBLE || p->magic == MAGIC2D_LONGDOUBLE || p->magic == MAGIC2D_QUAD)) {
Sleef_free(p->tBuf);
SleefDFT_dispose(p->instH);
if (p->hlen != p->vlen) SleefDFT_dispose(p->instV);
p->magic = 0;
free(p);
return;
}
assert(p != NULL && (p->magic == MAGIC_FLOAT || p->magic == MAGIC_DOUBLE || p->magic == MAGIC_LONGDOUBLE || p->magic == MAGIC_QUAD));
if (p->log2len <= 1) {
p->magic = 0;
free(p);
return;
}
if ((p->mode & SLEEF_MODE_REAL) != 0) {
Sleef_free(p->rtCoef1);
Sleef_free(p->rtCoef0);
p->rtCoef0 = p->rtCoef1 = NULL;
}
for(int level = p->log2len;level >= 1;level--) {
Sleef_free(p->perm[level]);
}
free(p->perm);
p->perm = NULL;
freeTables(p);
p->magic = 0;
free(p);
}
uint32_t ilog2(uint32_t q) {
static const uint32_t tab[] = {0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4};
uint32_t r = 0,qq;
if (q & 0xffff0000) r = 16;
q >>= r;
qq = q | (q >> 1);
qq |= (qq >> 2);
qq = ((qq & 0x10) >> 4) | ((qq & 0x100) >> 7) | ((qq & 0x1000) >> 10);
return r + tab[qq] * 4 + tab[q >> (tab[qq] * 4)] - 1;
}
//
char *dftPlanFilePath = NULL;
char *archID = NULL;
uint64_t planMode = SLEEF_PLAN_REFERTOENVVAR;
ArrayMap *planMap = NULL;
int planFilePathSet = 0, planFileLoaded = 0;
#ifdef _OPENMP
omp_lock_t planMapLock;
int planMapLockInitialized = 0;
#endif
static void initPlanMapLock() {
#ifdef _OPENMP
#pragma omp critical
{
if (!planMapLockInitialized) {
planMapLockInitialized = 1;
omp_init_lock(&planMapLock);
}
}
#endif
}
static void planMap_clear() {
if (planMap != NULL) ArrayMap_dispose(planMap);
planMap = NULL;
}
EXPORT void SleefDFT_setPlanFilePath(const char *path, const char *arch, uint64_t mode) {
initPlanMapLock();
if ((mode & SLEEF_PLAN_RESET) != 0) {
planMap_clear();
planFileLoaded = 0;
planFilePathSet = 0;
}
if (dftPlanFilePath != NULL) free(dftPlanFilePath);
if (path != NULL) {
dftPlanFilePath = malloc(strlen(path)+10);
strcpy(dftPlanFilePath, path);
} else {
dftPlanFilePath = NULL;
}
if (archID != NULL) free(archID);
if (arch == NULL) arch = Sleef_getCpuIdString();
archID = malloc(strlen(arch)+10);
strcpy(archID, arch);
planMode = mode;
planFilePathSet = 1;
}
static void loadPlanFromFile() {
if (planFilePathSet == 0 && (planMode & SLEEF_PLAN_REFERTOENVVAR) != 0) {
char *s = getenv(ENVVAR);
if (s != NULL) SleefDFT_setPlanFilePath(s, NULL, planMode);
}
if (planMap != NULL) ArrayMap_dispose(planMap);
if (dftPlanFilePath != NULL && (planMode & SLEEF_PLAN_RESET) == 0) {
planMap = ArrayMap_load(dftPlanFilePath, archID, PLANFILEID, (planMode & SLEEF_PLAN_NOLOCK) == 0);
}
if (planMap == NULL) planMap = initArrayMap();
planFileLoaded = 1;
}
static void savePlanToFile() {
assert(planFileLoaded);
if ((planMode & SLEEF_PLAN_READONLY) == 0 && dftPlanFilePath != NULL) {
ArrayMap_save(planMap, dftPlanFilePath, archID, PLANFILEID);
}
}
#define CATBIT 8
#define BASETYPEIDBIT 2
#define LOG2LENBIT 8
#define DIRBIT 1
#define BUTSTATBIT 16
static uint64_t keyButStat(int baseTypeID, int log2len, int dir, int butStat) {
dir = (dir & SLEEF_MODE_BACKWARD) == 0;
int cat = 0;
uint64_t k = 0;
k = (k << BUTSTATBIT) | (butStat & ~(~(uint64_t)0 << BUTSTATBIT));
k = (k << LOG2LENBIT) | (log2len & ~(~(uint64_t)0 << LOG2LENBIT));
k = (k << DIRBIT) | (dir & ~(~(uint64_t)0 << LOG2LENBIT));
k = (k << BASETYPEIDBIT) | (baseTypeID & ~(~(uint64_t)0 << BASETYPEIDBIT));
k = (k << CATBIT) | (cat & ~(~(uint64_t)0 << CATBIT));
return k;
}
#define LEVELBIT LOG2LENBIT
#define BUTCONFIGBIT 8
#define TRANSCONFIGBIT 8
static uint64_t keyTrans(int baseTypeID, int hlen, int vlen, int transConfig) {
int max = MAX(hlen, vlen), min = MIN(hlen, vlen);
int cat = 2;
uint64_t k = 0;
k = (k << TRANSCONFIGBIT) | (transConfig & ~(~(uint64_t)0 << TRANSCONFIGBIT));
k = (k << LOG2LENBIT) | (max & ~(~(uint64_t)0 << LOG2LENBIT));
k = (k << LOG2LENBIT) | (min & ~(~(uint64_t)0 << LOG2LENBIT));
k = (k << BASETYPEIDBIT) | (baseTypeID & ~(~(uint64_t)0 << BASETYPEIDBIT));
k = (k << CATBIT) | (cat & ~(~(uint64_t)0 << CATBIT));
return k;
}
static uint64_t keyPath(int baseTypeID, int log2len, int dir, int level, int config) {
dir = (dir & SLEEF_MODE_BACKWARD) == 0;
int cat = 3;
uint64_t k = 0;
k = (k << BUTCONFIGBIT) | (config & ~(~(uint64_t)0 << BUTCONFIGBIT));
k = (k << LEVELBIT) | (level & ~(~(uint64_t)0 << LEVELBIT));
k = (k << LOG2LENBIT) | (log2len & ~(~(uint64_t)0 << LOG2LENBIT));
k = (k << DIRBIT) | (dir & ~(~(uint64_t)0 << LOG2LENBIT));
k = (k << BASETYPEIDBIT) | (baseTypeID & ~(~(uint64_t)0 << BASETYPEIDBIT));
k = (k << CATBIT) | (cat & ~(~(uint64_t)0 << CATBIT));
return k;
}
static uint64_t keyPathConfig(int baseTypeID, int log2len, int dir, int level, int config) {
dir = (dir & SLEEF_MODE_BACKWARD) == 0;
int cat = 4;
uint64_t k = 0;
k = (k << BUTCONFIGBIT) | (config & ~(~(uint64_t)0 << BUTCONFIGBIT));
k = (k << LEVELBIT) | (level & ~(~(uint64_t)0 << LEVELBIT));
k = (k << LOG2LENBIT) | (log2len & ~(~(uint64_t)0 << LOG2LENBIT));
k = (k << DIRBIT) | (dir & ~(~(uint64_t)0 << LOG2LENBIT));
k = (k << BASETYPEIDBIT) | (baseTypeID & ~(~(uint64_t)0 << BASETYPEIDBIT));
k = (k << CATBIT) | (cat & ~(~(uint64_t)0 << CATBIT));
return k;
}
static uint64_t planMap_getU64(uint64_t key) {
char *s = ArrayMap_get(planMap, key);
if (s == NULL) return 0;
uint64_t ret;
if (sscanf(s, "%" SCNx64, &ret) != 1) return 0;
return ret;
}
static void planMap_putU64(uint64_t key, uint64_t value) {
char *s = malloc(100);
sprintf(s, "%" PRIx64, value);
s = ArrayMap_put(planMap, key, s);
if (s != NULL) free(s);
}
int PlanManager_loadMeasurementResultsP(SleefDFT *p, int pathCat) {
assert(p != NULL && (p->magic == MAGIC_FLOAT || p->magic == MAGIC_DOUBLE || p->magic == MAGIC_LONGDOUBLE || p->magic == MAGIC_QUAD));
initPlanMapLock();
#ifdef _OPENMP
omp_set_lock(&planMapLock);
#endif
if (!planFileLoaded) loadPlanFromFile();
int stat = planMap_getU64(keyButStat(p->baseTypeID, p->log2len, p->mode, pathCat+10));
if (stat == 0) {
#ifdef _OPENMP
omp_unset_lock(&planMapLock);
#endif
return 0;
}
int ret = 1;
for(int j = p->log2len;j >= 0;j--) {
p->bestPath[j] = planMap_getU64(keyPath(p->baseTypeID, p->log2len, p->mode, j, pathCat));
p->bestPathConfig[j] = planMap_getU64(keyPathConfig(p->baseTypeID, p->log2len, p->mode, j, pathCat));
if (p->bestPath[j] > MAXBUTWIDTH) ret = 0;
}
p->pathLen = 0;
for(int j = p->log2len;j >= 0;j--) if (p->bestPath[j] != 0) p->pathLen++;
#ifdef _OPENMP
omp_unset_lock(&planMapLock);
#endif
return ret;
}
void PlanManager_saveMeasurementResultsP(SleefDFT *p, int pathCat) {
assert(p != NULL && (p->magic == MAGIC_FLOAT || p->magic == MAGIC_DOUBLE || p->magic == MAGIC_LONGDOUBLE || p->magic == MAGIC_QUAD));
initPlanMapLock();
#ifdef _OPENMP
omp_set_lock(&planMapLock);
#endif
if (!planFileLoaded) loadPlanFromFile();
if (planMap_getU64(keyButStat(p->baseTypeID, p->log2len, p->mode, pathCat+10)) != 0) {
#ifdef _OPENMP
omp_unset_lock(&planMapLock);
#endif
return;
}
for(int j = p->log2len;j >= 0;j--) {
planMap_putU64(keyPath(p->baseTypeID, p->log2len, p->mode, j, pathCat), p->bestPath[j]);
planMap_putU64(keyPathConfig(p->baseTypeID, p->log2len, p->mode, j, pathCat), p->bestPathConfig[j]);
}
planMap_putU64(keyButStat(p->baseTypeID, p->log2len, p->mode, pathCat+10), 1);
if ((planMode & SLEEF_PLAN_READONLY) == 0) savePlanToFile();
#ifdef _OPENMP
omp_unset_lock(&planMapLock);
#endif
}
int PlanManager_loadMeasurementResultsT(SleefDFT *p) {
assert(p != NULL && (p->magic == MAGIC2D_FLOAT || p->magic == MAGIC2D_DOUBLE || p->magic == MAGIC2D_LONGDOUBLE || p->magic == MAGIC2D_QUAD));
initPlanMapLock();
int ret = 0;
#ifdef _OPENMP
omp_set_lock(&planMapLock);
#endif
if (!planFileLoaded) loadPlanFromFile();
p->tmNoMT = planMap_getU64(keyTrans(p->baseTypeID, p->log2hlen, p->log2vlen, 0));
p->tmMT = planMap_getU64(keyTrans(p->baseTypeID, p->log2hlen, p->log2vlen, 1));
#ifdef _OPENMP
omp_unset_lock(&planMapLock);
#endif
return p->tmNoMT != 0;
}
void PlanManager_saveMeasurementResultsT(SleefDFT *p) {
assert(p != NULL && (p->magic == MAGIC2D_FLOAT || p->magic == MAGIC2D_DOUBLE || p->magic == MAGIC2D_LONGDOUBLE || p->magic == MAGIC2D_QUAD));
initPlanMapLock();
int ret = 0;
#ifdef _OPENMP
omp_set_lock(&planMapLock);
#endif
if (!planFileLoaded) loadPlanFromFile();
planMap_putU64(keyTrans(p->baseTypeID, p->log2hlen, p->log2vlen, 0), p->tmNoMT);
planMap_putU64(keyTrans(p->baseTypeID, p->log2hlen, p->log2vlen, 1), p->tmMT );
if ((planMode & SLEEF_PLAN_READONLY) == 0) savePlanToFile();
#ifdef _OPENMP
omp_unset_lock(&planMapLock);
#endif
}
|
static_linking.c | // RUN: %libomptarget-compile-generic -DLIBRARY -c -o %t.o
// RUN: llvm-ar rcs %t.a %t.o
// RUN: %libomptarget-compile-generic %t.a && %libomptarget-run-generic 2>&1 | %fcheck-generic
// REQUIRES: nvptx64-nvidia-cuda-oldDriver
// REQUIRES: amdgcn-amd-amdhsa-oldDriver
#ifdef LIBRARY
int x = 42;
#pragma omp declare target(x)
int foo() {
int value;
#pragma omp target map(from : value)
value = x;
return value;
}
#else
#include <stdio.h>
int foo();
int main() {
int x = foo();
// CHECK: PASS
if (x == 42)
printf("PASS\n");
}
#endif
|
GB_binop__ne_fp32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ne_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__ne_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__ne_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__ne_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_fp32)
// A*D function (colscale): GB (_AxD__ne_fp32)
// D*A function (rowscale): GB (_DxB__ne_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__ne_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__ne_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_fp32)
// C=scalar+B GB (_bind1st__ne_fp32)
// C=scalar+B' GB (_bind1st_tran__ne_fp32)
// C=A+scalar GB (_bind2nd__ne_fp32)
// C=A'+scalar GB (_bind2nd_tran__ne_fp32)
// C type: bool
// A type: float
// A pattern? 0
// B type: float
// B pattern? 0
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_NE || GxB_NO_FP32 || GxB_NO_NE_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__ne_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ne_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ne_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ne_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ne_fp32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ne_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
float alpha_scalar ;
float beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((float *) alpha_scalar_in)) ;
beta_scalar = (*((float *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__ne_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ne_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__ne_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ne_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ne_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ne_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__ne_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__ne_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
cpbsv.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zpbsv.c, normal z -> c, Fri Sep 28 17:38:08 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_pbsv
*
* Computes the solution to a system of linear equations A * X = B,
* where A is an n-by-n Hermitian positive definite band matrix, and X and B
* are n-by-nrhs matrices. The Cholesky decomposition is used to factor A as
*
* \f[ A = L\times L^H, \f] if uplo = PlasmaLower,
* or
* \f[ A = U^H\times U, \f] if uplo = PlasmaUpper,
*
* where U is an upper triangular matrix and L is a lower triangular matrix.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] n
* The number of linear equations, i.e., the order of the matrix A.
* n >= 0.
*
* @param[in] kd
* The number of subdiagonals within the band of A if uplo=upper.
* The number of suuperdiagonals within the band of A. ku >= 0.
*
* @param[in] nrhs
* The number of right hand sides, i.e., the number of columns
* of the matrix B. nrhs >= 0.
*
* @param[in,out] AB
* On entry, the upper or lower triangle of the Hermitian band
* matrix A, stored in the first KD+1 rows of the array. The
* j-th column of A is stored in the j-th column of the array AB
* as follows:
* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd) <= i <= j;
* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j <= i <= min(n,j+kd).
* \n
* On exit, if INFO = 0, the triangular factor U or L from the
* Cholesky factorization A = U^H*U or A = L*L^H of the band
* matrix A, in the same storage format as A.
*
* @param[in] ldab
* The leading dimension of the array AB. ldab >= max(1,n).
*
* @param[in,out] B
* On entry, the n-by-nrhs right hand side matrix B.
* On exit, if return value = 0, the n-by-nrhs solution matrix X.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,n).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
* @retval > 0 if i, the leading minor of order i of A is not
* positive definite, so the factorization could not
* be completed, and the solution has not been computed.
*
*******************************************************************************
*
* @sa plasma_omp_cpbsv
* @sa plasma_cpbsv
* @sa plasma_dpbsv
* @sa plasma_spbsv
*
******************************************************************************/
int plasma_cpbsv(plasma_enum_t uplo,
int n, int kd, int nrhs,
plasma_complex32_t *pAB, int ldab,
plasma_complex32_t *pB, int ldb)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
return -1;
}
if (n < 0) {
plasma_error("illegal value of n");
return -2;
}
if (kd < 0) {
plasma_error("illegal value of kd");
return -3;
}
if (nrhs < 0) {
plasma_error("illegal value of nrhs");
return -4;
}
if (ldab < kd+1) {
plasma_error("illegal value of ldab");
return -6;
}
if (ldb < imax(1, n)) {
plasma_error("illegal value of ldb");
return -8;
}
// quick return
if (imin(n, nrhs) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_pbtrf(plasma, PlasmaComplexFloat, n);
// Set tiling parameters.
int nb = plasma->nb;
// Initialize tile matrix descriptors.
int lm = nb*(1+(kd+nb-1)/nb);
plasma_desc_t AB;
plasma_desc_t B;
int retval;
retval = plasma_desc_general_band_create(PlasmaComplexFloat, uplo, nb, nb,
lm, n, 0, 0, n, n, kd, kd, &AB);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_band_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb,
ldb, nrhs, 0, 0, n, nrhs, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&AB);
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_cpb2desc(pAB, ldab, AB, &sequence, &request);
plasma_omp_cge2desc(pB, ldb, B, &sequence, &request);
// Call the tile async function.
plasma_omp_cpbsv(uplo, AB, B, &sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_cdesc2pb(AB, pAB, ldab, &sequence, &request);
plasma_omp_cdesc2ge(B, pB, ldb, &sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout.
plasma_desc_destroy(&AB);
plasma_desc_destroy(&B);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_pbsv
*
* Solves a Hermitian positive definite band system of linear equations
* using Cholesky factorization.
* Non-blocking tile version of plasma_cpbsv().
* Operates on matrices stored by tiles.
* All matrices are passed through descriptors.
* All dimensions are taken from the descriptors.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in,out] AB
* Descriptor of matrix A.
*
* @param[in,out] B
* Descriptor of right-hand-sides B.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_cpbsv
* @sa plasma_omp_cpbsv
* @sa plasma_omp_dpbsv
* @sa plasma_omp_spbsv
*
******************************************************************************/
void plasma_omp_cpbsv(plasma_enum_t uplo, plasma_desc_t AB, plasma_desc_t B,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
return;
}
if (plasma_desc_check(AB) != PlasmaSuccess) {
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
plasma_error("invalid A");
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (AB.n == 0 || B.n == 0)
return;
// Call the parallel functions.
plasma_pcpbtrf(uplo, AB, sequence, request);
plasma_pctbsm(PlasmaLeft, uplo,
uplo == PlasmaUpper ? PlasmaConjTrans : PlasmaNoTrans,
PlasmaNonUnit,
1.0, AB,
B,
NULL,
sequence, request);
plasma_pctbsm(PlasmaLeft, uplo,
uplo == PlasmaUpper ? PlasmaNoTrans : PlasmaConjTrans,
PlasmaNonUnit,
1.0, AB,
B,
NULL,
sequence, request);
}
|
sparse-false.c | /**********************************************************************************************/
/* This program is part of the Barcelona OpenMP Tasks Suite */
/* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */
/* Copyright (C) 2009 Universitat Politecnica de Catalunya */
/* */
/* This program is free software; you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation; either version 2 of the License, or */
/* (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* */
/* You should have received a copy of the GNU General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */
/**********************************************************************************************/
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <libgen.h>
#include <omp.h>
#define EPSILON 1.0E-6
unsigned int bots_arg_size = 50;
unsigned int bots_arg_size_1 = 80;
#define TRUE 1
#define FALSE 0
#define BOTS_RESULT_SUCCESSFUL 1
#define BOTS_RESULT_UNSUCCESSFUL 0
#define USE_FALSE 3
unsigned int FALSE_ARRAY[16];
/***********************************************************************
* checkmat:
**********************************************************************/
int checkmat (float *M, float *N)
{
int i, j;
float r_err;
int bad = 0;
for (i = 0; i < bots_arg_size_1; i++)
{
for (j = 0; j < bots_arg_size_1; j++)
{
r_err = M[i*bots_arg_size_1+j] - N[i*bots_arg_size_1+j];
if (r_err < 0.0 ) r_err = -r_err;
r_err = r_err / M[i*bots_arg_size_1+j];
if(r_err > EPSILON)
{
fprintf(stderr,"Checking failure: A[%d][%d]=%f B[%d][%d]=%f; Relative Error=%f\n",
i,j, M[i*bots_arg_size_1+j], i,j, N[i*bots_arg_size_1+j], r_err);
bad = 1;
}
}
}
return bad ? FALSE : TRUE;
}
/***********************************************************************
* genmat:
**********************************************************************/
void genmat (float *M[])
{
int null_entry, init_val, i, j, ii, jj;
float *p;
init_val = 1325;
/* generating the structure */
for (ii=0; ii < bots_arg_size; ii++)
{
for (jj=0; jj < bots_arg_size; jj++)
{
/* computing null entries */
null_entry=FALSE;
if ((ii<jj) && (ii%3 !=0)) null_entry = TRUE;
if ((ii>jj) && (jj%3 !=0)) null_entry = TRUE;
if (ii%2==1) null_entry = TRUE;
if (jj%2==1) null_entry = TRUE;
if (ii==jj) null_entry = FALSE;
if (ii==jj-1) null_entry = FALSE;
if (ii-1 == jj) null_entry = FALSE;
/* allocating matrix */
if (null_entry == FALSE){
M[ii*bots_arg_size+jj] = (float *) malloc(bots_arg_size_1*bots_arg_size_1*sizeof(float));
if ((M[ii*bots_arg_size+jj] == NULL))
{
fprintf(stderr,"Error: Out of memory\n");
exit(101);
}
/* initializing matrix */
p = M[ii*bots_arg_size+jj];
for (i = 0; i < bots_arg_size_1; i++)
{
for (j = 0; j < bots_arg_size_1; j++)
{
init_val = (3125 * init_val) % 65536;
(*p) = (float)((init_val - 32768.0) / 16384.0);
p++;
}
}
}
else
{
M[ii*bots_arg_size+jj] = NULL;
}
}
}
}
/***********************************************************************
* print_structure:
**********************************************************************/
void print_structure(char *name, float *M[])
{
int ii, jj;
fprintf(stderr,"Structure for matrix %s @ 0x%p\n",name, M);
for (ii = 0; ii < bots_arg_size; ii++) {
for (jj = 0; jj < bots_arg_size; jj++) {
if (M[ii*bots_arg_size+jj]!=NULL) {fprintf(stderr,"x");}
else fprintf(stderr," ");
}
fprintf(stderr,"\n");
}
fprintf(stderr,"\n");
}
/***********************************************************************
* allocate_clean_block:
**********************************************************************/
float * allocate_clean_block()
{
int i,j;
float *p, *q;
p = (float *) malloc(bots_arg_size_1*bots_arg_size_1*sizeof(float));
q=p;
if (p!=NULL){
for (i = 0; i < bots_arg_size_1; i++)
for (j = 0; j < bots_arg_size_1; j++){(*p)=0.0; p++;}
}
else
{
fprintf(stderr,"Error: Out of memory\n");
exit (101);
}
return (q);
}
/***********************************************************************
* lu0:
**********************************************************************/
void lu0(float *diag)
{
int i, j, k;
for (k=0; k<bots_arg_size_1; k++)
for (i=k+1; i<bots_arg_size_1; i++)
{
diag[i*bots_arg_size_1+k] = diag[i*bots_arg_size_1+k] / diag[k*bots_arg_size_1+k];
for (j=k+1; j<bots_arg_size_1; j++)
diag[i*bots_arg_size_1+j] = diag[i*bots_arg_size_1+j] - diag[i*bots_arg_size_1+k] * diag[k*bots_arg_size_1+j];
}
}
/***********************************************************************
* bdiv:
**********************************************************************/
void bdiv(float *diag, float *row)
{
int i, j, k;
for (i=0; i<bots_arg_size_1; i++)
for (k=0; k<bots_arg_size_1; k++)
{
row[i*bots_arg_size_1+k] = row[i*bots_arg_size_1+k] / diag[k*bots_arg_size_1+k];
for (j=k+1; j<bots_arg_size_1; j++)
row[i*bots_arg_size_1+j] = row[i*bots_arg_size_1+j] - row[i*bots_arg_size_1+k]*diag[k*bots_arg_size_1+j];
}
}
/***********************************************************************
* bmod:
**********************************************************************/
void bmod(float *row, float *col, float *inner)
{
int i, j, k;
for (i=0; i<bots_arg_size_1; i++)
for (j=0; j<bots_arg_size_1; j++)
for (k=0; k<bots_arg_size_1; k++)
inner[i*bots_arg_size_1+j] = inner[i*bots_arg_size_1+j] - row[i*bots_arg_size_1+k]*col[k*bots_arg_size_1+j];
}
/***********************************************************************
* fwd:
**********************************************************************/
void fwd(float *diag, float *col)
{
int i, j, k;
for (j=0; j<bots_arg_size_1; j++)
for (k=0; k<bots_arg_size_1; k++)
for (i=k+1; i<bots_arg_size_1; i++)
col[i*bots_arg_size_1+j] = col[i*bots_arg_size_1+j] - diag[i*bots_arg_size_1+k]*col[k*bots_arg_size_1+j];
}
void sparselu_init (float ***pBENCH, char *pass)
{
*pBENCH = (float **) malloc(bots_arg_size*bots_arg_size*sizeof(float *));
genmat(*pBENCH);
print_structure(pass, *pBENCH);
}
void sparselu_par_call(float **BENCH)
{
int ii, jj, kk;
srand(10);
fprintf(stderr,"Computing SparseLU Factorization (%dx%d matrix with %dx%d blocks) ",
bots_arg_size,bots_arg_size,bots_arg_size_1,bots_arg_size_1);
#pragma omp parallel
#pragma omp single
{
double d1 = omp_get_wtime();
for (kk=0; kk<bots_arg_size; kk++)
{
int rnd = rand()%USE_FALSE;
if (rnd == 0)
#pragma omp task firstprivate(kk) shared(BENCH) depend(inout:BENCH[kk*bots_arg_size+kk],)
lu0(BENCH[kk*bots_arg_size+kk]);
if (rnd == 1)
#pragma omp task firstprivate(kk) shared(BENCH) depend(inout:BENCH[kk*bots_arg_size+kk],FALSE_ARRAY[0])
lu0(BENCH[kk*bots_arg_size+kk]);
if (rnd == 2)
#pragma omp task firstprivate(kk) shared(BENCH) depend(inout:BENCH[kk*bots_arg_size+kk]) depend(in:FALSE_ARRAY[0])
lu0(BENCH[kk*bots_arg_size+kk]);
for (jj=kk+1; jj<bots_arg_size; jj++)
if (BENCH[kk*bots_arg_size+jj] != NULL)
{
int rnd = rand()%USE_FALSE;
if (rnd == 0)
#pragma omp task firstprivate(kk, jj) shared(BENCH) depend(in:BENCH[kk*bots_arg_size+jj]) depend(inout:BENCH[kk*bots_arg_size+kk])
fwd(BENCH[kk*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj]);
if (rnd == 1)
#pragma omp task firstprivate(kk, jj) shared(BENCH) depend(in:BENCH[kk*bots_arg_size+jj],FALSE_ARRAY[0]) depend(inout:BENCH[kk*bots_arg_size+kk])
fwd(BENCH[kk*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj]);
if (rnd == 2)
#pragma omp task firstprivate(kk, jj) shared(BENCH) depend(in:BENCH[kk*bots_arg_size+jj]) depend(inout:BENCH[kk*bots_arg_size+kk],FALSE_ARRAY[0])
fwd(BENCH[kk*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj]);
}
for (ii=kk+1; ii<bots_arg_size; ii++)
if (BENCH[ii*bots_arg_size+kk] != NULL)
{
int rnd = rand()%USE_FALSE;
if (rnd == 0)
#pragma omp task firstprivate(kk, ii) shared(BENCH) depend(inout:BENCH[ii*bots_arg_size+kk]) depend(in:BENCH[kk*bots_arg_size+kk])
bdiv (BENCH[kk*bots_arg_size+kk], BENCH[ii*bots_arg_size+kk]);
if (rnd == 1)
#pragma omp task firstprivate(kk, ii) shared(BENCH) depend(inout:BENCH[ii*bots_arg_size+kk]) depend(in:BENCH[kk*bots_arg_size+kk],FALSE_ARRAY[0])
bdiv (BENCH[kk*bots_arg_size+kk], BENCH[ii*bots_arg_size+kk]);
if (rnd == 2)
#pragma omp task firstprivate(kk, ii) shared(BENCH) depend(inout:BENCH[ii*bots_arg_size+kk],FALSE_ARRAY[0]) depend(in:BENCH[kk*bots_arg_size+kk])
bdiv (BENCH[kk*bots_arg_size+kk], BENCH[ii*bots_arg_size+kk]);
}
for (ii=kk+1; ii<bots_arg_size; ii++)
if (BENCH[ii*bots_arg_size+kk] != NULL)
for (jj=kk+1; jj<bots_arg_size; jj++)
if (BENCH[kk*bots_arg_size+jj] != NULL)
{
int rnd = rand()%USE_FALSE;
if (BENCH[ii*bots_arg_size+jj]==NULL) BENCH[ii*bots_arg_size+jj] = allocate_clean_block();
if (rnd == 0)
#pragma omp task firstprivate(kk, jj, ii) shared(BENCH) depend(in:BENCH[kk*bots_arg_size+jj],BENCH[ii*bots_arg_size+kk]) depend(inout:BENCH[ii*bots_arg_size+jj])
bmod(BENCH[ii*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]);
if (rnd == 1)
#pragma omp task firstprivate(kk, jj, ii) shared(BENCH) depend(in:BENCH[kk*bots_arg_size+jj],BENCH[ii*bots_arg_size+kk],FALSE_ARRAY[0]) depend(inout:BENCH[ii*bots_arg_size+jj])
bmod(BENCH[ii*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]);
if (rnd == 2)
#pragma omp task firstprivate(kk, jj, ii) shared(BENCH) depend(in:BENCH[kk*bots_arg_size+jj],BENCH[ii*bots_arg_size+kk]) depend(inout:BENCH[ii*bots_arg_size+jj],FALSE_ARRAY[0])
bmod(BENCH[ii*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]);
}
}
#pragma omp taskwait
double d2 = omp_get_wtime();
fprintf(stderr," Par Time: %f\n",d2-d1);
}
fprintf(stderr," completed!\n");
}
void sparselu_seq_call(float **BENCH)
{
int ii, jj, kk;
double d1 = omp_get_wtime();
for (kk=0; kk<bots_arg_size; kk++)
{
lu0(BENCH[kk*bots_arg_size+kk]);
for (jj=kk+1; jj<bots_arg_size; jj++)
if (BENCH[kk*bots_arg_size+jj] != NULL)
{
fwd(BENCH[kk*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj]);
}
for (ii=kk+1; ii<bots_arg_size; ii++)
if (BENCH[ii*bots_arg_size+kk] != NULL)
{
bdiv (BENCH[kk*bots_arg_size+kk], BENCH[ii*bots_arg_size+kk]);
}
for (ii=kk+1; ii<bots_arg_size; ii++)
if (BENCH[ii*bots_arg_size+kk] != NULL)
for (jj=kk+1; jj<bots_arg_size; jj++)
if (BENCH[kk*bots_arg_size+jj] != NULL)
{
if (BENCH[ii*bots_arg_size+jj]==NULL) BENCH[ii*bots_arg_size+jj] = allocate_clean_block();
bmod(BENCH[ii*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]);
}
}
double d2 = omp_get_wtime();
fprintf(stderr,"Serial Time: %f\n",d2-d1);
}
void sparselu_fini (float **BENCH, char *pass)
{
print_structure(pass, BENCH);
}
int sparselu_check(float **SEQ, float **BENCH)
{
int ii,jj,ok=1;
for (ii=0; ((ii<bots_arg_size) && ok); ii++)
{
for (jj=0; ((jj<bots_arg_size) && ok); jj++)
{
if ((SEQ[ii*bots_arg_size+jj] == NULL) && (BENCH[ii*bots_arg_size+jj] != NULL)) ok = FALSE;
if ((SEQ[ii*bots_arg_size+jj] != NULL) && (BENCH[ii*bots_arg_size+jj] == NULL)) ok = FALSE;
if ((SEQ[ii*bots_arg_size+jj] != NULL) && (BENCH[ii*bots_arg_size+jj] != NULL))
ok = checkmat(SEQ[ii*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]);
if(!ok)abort();
}
}
if (ok) fprintf(stderr,"stämmer\n");
if (ok) return BOTS_RESULT_SUCCESSFUL;
else return BOTS_RESULT_UNSUCCESSFUL;
}
int main ( int argc, char *argv[])
{
float **SEQ,**BENCH;
sparselu_init(&BENCH,"benchmark");
sparselu_par_call(BENCH);
sparselu_fini(BENCH,"benchmark");
sparselu_init(&SEQ,"serial");
sparselu_seq_call(SEQ);
sparselu_fini(SEQ,"serial");
fprintf(stderr,"Testar om Parallel och Seriell version stämmer med varandra...\n");
return (sparselu_check(SEQ,BENCH) == BOTS_RESULT_SUCCESSFUL) ? 0 : 1;
}
|
nested_mixed.c | // RUN: %libomp-compile && env OMP_DISPLAY_AFFINITY=true %libomp-run | %python %S/check.py -c 'CHECK' %s
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main(int argc, char** argv) {
omp_set_affinity_format("TESTER: tl:%L at:%a tn:%n nt:%N");
omp_set_nested(1);
#pragma omp parallel num_threads(1)
{
#pragma omp parallel num_threads(2)
{ }
#pragma omp parallel num_threads(2)
{
#pragma omp parallel num_threads(1)
{
#pragma omp parallel num_threads(2)
{ }
}
}
#pragma omp parallel num_threads(1)
{ }
}
#pragma omp parallel num_threads(2)
{ }
#pragma omp parallel num_threads(1)
{ }
return 0;
}
// CHECK: num_threads=1 TESTER: tl:1 at:0 tn:0 nt:1
// CHECK: num_threads=2 TESTER: tl:2 at:[0-9] tn:[01] nt:2
// CHECK: num_threads=1 TESTER: tl:3 at:[0-9] tn:0 nt:1
// CHECK: num_threads=1 TESTER: tl:3 at:[0-9] tn:0 nt:1
// CHECK: num_threads=2 TESTER: tl:4 at:[0-9] tn:[01] nt:2
// CHECK: num_threads=2 TESTER: tl:4 at:[0-9] tn:[01] nt:2
// CHECK: num_threads=1 TESTER: tl:2 at:[0-9] tn:0 nt:1
// CHECK: num_threads=2 TESTER: tl:1 at:[0-9] tn:[01] nt:2
// CHECK: num_threads=1 TESTER: tl:1 at:[0-9] tn:0 nt:1
|
optimized_bct_types.h | #pragma once
#define EIGEN_NO_DEBUG
#define MKL_DIRECT_CALL_SEQ_JIT
//#define PROFILING
#define CACHE_LINE_WIDTH 64 // length of cache line measured in bytes
#define CACHE_LINE_LENGHT 8 // length of cache line measured in number of doubles
//#define restrict __restrict
#define restrict
#define ALIGN CACHE_LINE_WIDTH // MKL suggests 64-byte alignment
#define CHUNK_SIZE CACHE_LINE_LENGHT
#define RAGGED_SCHEDULE schedule( guided, CHUNK_SIZE)
#define SAFE_ALLOCATE_WARNINGS
//#define USE_NORMALS_ONLY // If defined, then CreateOptimizedBVH will create BVHs whose cluster data contain averaged normals. This may lead to incorrect terms in the far field of the lower order metric.
#include <mkl.h>
#include <tbb/cache_aligned_allocator.h>
#include <algorithm>
#include <omp.h>
#include <deque>
#include <vector>
#include <iterator>
#include <memory>
#include <unistd.h>
#include <string>
#include <chrono>
#include <Eigen/Dense>
#include <Eigen/Sparse>
#include <iostream>
#include "bct_kernel_type.h"
#include "profiler.h"
namespace rsurfaces
{
// "In know only two types: integers and doubles..."
typedef MKL_INT mint; // "machine integer" -- ensuring that we use the integer type requested by MKL. I find "MKL_INT" a bit clunky, though.
typedef double mreal; // "machine real"
class Timers
{
public:
static std::deque<std::chrono::time_point<std::chrono::steady_clock>> time_stack;
static std::chrono::time_point<std::chrono::steady_clock> start_time;
static std::chrono::time_point<std::chrono::steady_clock> stop_time;
};
inline void print(std::string s)
{
// std::cout << (std::string( 2 * (Timers::time_stack.size() + 1), ' ') + s) << std::endl;
std::cout << s << std::endl;
}
template <typename T>
inline void valprint(std::string s, T val)
{
// std::cout << (std::string( 2 * (Timers::time_stack.size() + 1), ' ') + s) << " = " << val << std::endl;
std::cout << s << " = " << val << std::endl;
}
template <typename T>
inline void valprint(std::string s, T * begin, T * end)
{
// std::cout << (std::string( 2 * (Timers::time_stack.size() + 1), ' ') + s) << " = [ ";
std::cout << s << " = [ ";
if( (begin) && end > begin )
{
for( T * ptr = begin; ptr < end-1 ; ++ptr)
{
std::cout << *ptr << ", ";
}
std::cout << *(end-1);
}
std::cout << " ]" << std::endl;
}
// template <typename T>
// inline void valprint(std::string s, mint * begin, mint * end, T * array)
// {
// std::cout << (std::string( 2 * (Timers::time_stack.size() + 1), ' ') + s) << " = [ ";
// if( end > begin )
// {
// for( T * ptr = begin; ptr < end-1 ; ++ptr)
// {
// std::cout << array[*ptr] << ", ";
// }
// std::cout << array[*end];
// }
// std::cout << " ]" << std::endl;
// }
inline void eprint(std::string s)
{
std::cout << (std::string( 2 * (Timers::time_stack.size() + 1), ' ') + "ERROR: " + s) << std::endl;
}
inline void wprint(std::string s)
{
std::cout << (std::string( 2 * (Timers::time_stack.size() + 1), ' ') + "WARNING: " + s) << std::endl;
}
inline void tic(std::string s)
{
Timers::time_stack.push_back(std::chrono::steady_clock::now());
std::cout << (std::string( 2 * Timers::time_stack.size(), ' ') + s + "...") << std::endl;
}
inline mreal toc(std::string s)
{
if (!Timers::time_stack.empty())
{
auto start_time = Timers::time_stack.back();
auto stop_time = std::chrono::steady_clock::now();
mreal duration = std::chrono::duration<double>(stop_time - start_time).count();
std::cout << (std::string( 2 * Timers::time_stack.size(), ' ') + std::to_string(duration) + " s.") << std::endl;
Timers::time_stack.pop_back();
return duration;
}
else
{
std::cout << ("Unmatched toc detected. Label = " + s) << std::endl;
return 0.;
}
}
inline void tic()
{
Timers::time_stack.push_back(std::chrono::steady_clock::now());
}
inline mreal toc()
{
if (!Timers::time_stack.empty())
{
auto start_time = Timers::time_stack.back();
auto stop_time = std::chrono::steady_clock::now();
mreal duration = std::chrono::duration<double>(stop_time - start_time).count();
Timers::time_stack.pop_back();
return duration;
}
else
{
std::cout << ("Unmatched toc detected.") << std::endl;
return 0.;
}
}
// double allocation helpers
template <typename T>
int safe_free( T * & ptr )
{
int wasallocated = (ptr != nullptr);
if( wasallocated ){ mkl_free(ptr); ptr = nullptr; }
return !wasallocated;
}
template <typename T>
int safe_alloc(T * & ptr, size_t size)
{
int wasallocated = (ptr != nullptr);
if( wasallocated )
{
#ifdef SAFE_ALLOCATE_WARNINGS
wprint("safe_alloc: Pointer was not NULL. Calling safe_free to prevent memory leak.");
#endif
safe_free(ptr);
}
ptr = (T *) mkl_malloc ( size * sizeof(T), ALIGN );
return wasallocated;
}
template <typename T>
int safe_alloc( T * & ptr, size_t size, T init)
{
int wasallocated = safe_alloc(ptr, size);
#pragma omp simd aligned( ptr : ALIGN )
for( size_t i = 0; i < size; ++i )
{
ptr[i] = init;
}
return wasallocated;
}
template <typename T>
int safe_iota(T * & ptr, size_t size, T step = static_cast<T>(1) )
{
int wasallocated = safe_alloc(ptr, size);
#pragma omp simd aligned( ptr : ALIGN )
for( size_t i = 0; i < size; i+=step )
{
ptr[i] = i;
}
return wasallocated;
}
template <typename T>
inline void partial_sum( T * begin, T * end)
{
std::partial_sum( begin, end, begin );
}
typedef Eigen::SparseMatrix<mreal, Eigen::RowMajor, mint> EigenMatrixCSR;
typedef Eigen::SparseMatrix<mreal> EigenMatrixCSC;
typedef Eigen::Matrix<mreal, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> EigenMatrixRM;
typedef Eigen::MatrixXd EigenMatrixCM;
// I am not that knowledgable about allocators; tbb::cache_aligned_allocator seemed to work well for allocating thread-owned vectors. And I simply used it for the rest, too, because should also provide good alignment for SIMD instructions (used in MKL routines). DO NOT USE A_Vector OR A_Deque FOR MANY SMALL ARRAYS. I typically allicate only very large arrays, so the exrta memory consumption should not be an issue.
template <typename T>
using A_Vector = std::vector<T, tbb::cache_aligned_allocator<T>>;
template <typename T>
using A_Deque = std::deque<T, tbb::cache_aligned_allocator<T>>;
//template <typename T>
//using A_Vector = std::vector<T>; // about 10% more performance with cache-alined storage
//
//template <typename T>
//using A_Deque = std::deque<T>;
struct PardisoData
{
mint n = 0;
mint mtype = 11; /* Matrix type */
mint * restrict perm = nullptr; /* Permutation */
mint * restrict iparm = nullptr; /* Integer parameter array for controlling pardiso */
A_Vector<void*> pt; /* Pointer used internally by pardiso to store its data */
bool symfactorized = false;
bool numfactorized = false;
PardisoData(){};
~PardisoData(){
safe_free(perm);
safe_free(iparm);
};
// Copy constructor
PardisoData( PardisoData const & P )
{
n = P.n;
mtype = P.mtype;
pt = P.pt;
symfactorized = P.symfactorized;
numfactorized = P.numfactorized;
if( P.perm )
{
const mint * const restrict ptr = P.perm;
safe_alloc( perm, n );
#pragma omp simd aligned( perm, ptr : ALIGN)
for( mint i = 0; i < n; ++i )
{
perm[i] = ptr[i];
}
}
if( P.iparm )
{
const mint * const restrict ptr = P.iparm;
safe_alloc( iparm, 64);
#pragma omp simd aligned( iparm, ptr : ALIGN)
for( mint i = 0; i < 64; ++i )
{
iparm[i] = ptr[i];
}
}
}
// Move constructor
PardisoData( PardisoData && P )
{
n = std::move(P.n);
mtype = std::move(P.mtype);
pt = std::move(P.pt);
symfactorized = std::move(P.symfactorized);
numfactorized = std::move(P.numfactorized);
perm = std::move(P.perm);
iparm = std::move(P.iparm);
}
PardisoData &operator=(PardisoData P)
{
P.swap(*this);
return *this;
}
private:
void swap(PardisoData &P) throw()
{
std::swap(this->n, P.n);
std::swap(this->mtype, P.mtype);
std::swap(this->pt, P.pt);
std::swap(this->symfactorized, P.symfactorized);
std::swap(this->numfactorized, P.numfactorized);
std::swap(this->perm, P.perm);
std::swap(this->iparm, P.iparm);
}
}; // PardisoData
struct MKLSparseMatrix //A container to hold generic sparse array data and to perform MKL matrix-matrix-multiplication routines
{
mint m = 0;
mint n = 0;
mint nnz = 0;
mint * restrict outer = nullptr;
mint * restrict inner = nullptr;
mreal * restrict values = nullptr;
PardisoData P;
matrix_descr descr;
// MKLSparseMatrix( SparseMatrix A ) : MKLSparseMatrix( A.rows(), A.cols(), A.outerIndexPtr(), A.innerIndexPtr(), A.valuePtr() ){}; //dunno why this does not work...
MKLSparseMatrix( const mint m_, const mint n_, const mint nnz_ )
{
// print("MKLSparseMatrix( const mint m_, const mint n_, const mint nnz_ )");
m = m_;
n = n_;
nnz = nnz_;
P.n = n;
safe_alloc( outer, m + 1 );
safe_alloc( inner, nnz );
safe_alloc( values, nnz );
outer[0] = 0;
outer[m_] = nnz;
descr.type = SPARSE_MATRIX_TYPE_GENERAL;
descr.diag = SPARSE_DIAG_NON_UNIT;
P.mtype = 11;
};
MKLSparseMatrix( const mint m_, const mint n_, mint * outer_, mint * inner_, mreal * values_ )
{
// print("MKLSparseMatrix( const mint m_, const mint n_, mint * outer_, mint * inner_, mreal * values_ )");
m = m_;
n = n_;
nnz = outer_[m];
P.n = n;
safe_alloc( outer, m + 1 );
safe_alloc( inner, nnz );
safe_alloc( values, nnz );
#pragma omp simd aligned( outer : ALIGN )
for( mint i = 0; i < m+1; ++i)
{
outer[i] = outer_[i];
}
#pragma omp simd aligned( inner, values : ALIGN )
for( mint i = 0; i < nnz; ++i)
{
inner[i] = inner_[i];
values[i] = values_[i];
}
descr.type = SPARSE_MATRIX_TYPE_GENERAL;
descr.diag = SPARSE_DIAG_NON_UNIT;
P.mtype = 11;
// vprint("m",m);
// vprint("n",n);
// vprint("nnz",nnz);
};
MKLSparseMatrix( const mint m_, const mint n_, mint * outer_B, mint * outer_E, mint * inner_, mreal * values_ )
{
// print("MKLSparseMatrix( const mint m_, const mint n_, mint * outer_B, mint * outer_E, mint * inner_, mreal * values_ )");
m = m_;
n = n_;
nnz = outer_B[m];
P.n = n;
if(outer_B[0])
{
eprint("in MKLSparseMatrix: outer_B[0] != 0.");
}
safe_alloc( outer, m + 1 );
safe_alloc( inner, nnz );
safe_alloc( values, nnz );
outer[0] = 0;
#pragma omp simd aligned( outer : ALIGN )
for( mint i = 0; i < m; ++i)
{
outer[i+1] = outer_E[i];
}
#pragma omp simd aligned( inner, values : ALIGN )
for( mint i = 0; i < nnz; ++i)
{
inner[i] = inner_[i];
values[i] = values_[i];
}
descr.type = SPARSE_MATRIX_TYPE_GENERAL;
descr.diag = SPARSE_DIAG_NON_UNIT;
P.mtype = 11;
};
// Copy constructor
MKLSparseMatrix( MKLSparseMatrix const & B )
{
// print("MKLSparseMatrix copy constructor");
m = B.m;
n = B.n;
nnz = B.nnz;
P = B.P;
if(B.outer[0])
{
eprint("in MKLSparseMatrix &operator=(MKLSparseMatrix const &B): B.outer[0] != 0.");
}
safe_alloc( outer, m + 1 );
safe_alloc( inner, nnz );
safe_alloc( values, nnz );
#pragma omp simd aligned( outer : ALIGN )
for( mint i = 0; i <= m; ++i)
{
outer[i] = B.outer[i];
}
#pragma omp simd aligned( inner, values : ALIGN )
for( mint i = 0; i < nnz; ++i)
{
inner[i] = B.inner[i];
values[i] = B.values[i];
}
descr.type = B.descr.type;
descr.diag = B.descr.diag;
}
// Move constructor
MKLSparseMatrix( MKLSparseMatrix && B )
{
// print("MKLSparseMatrix move constructor");
m = B.m;
n = B.n;
nnz = B.nnz;
P = std::move(B.P);
outer = std::move(B.outer);
inner = std::move(B.inner);
values = std::move(B.values);
descr = std::move(B.descr);
}
// copy-and-swap idiom
MKLSparseMatrix &operator=(MKLSparseMatrix B)
{
// print("MKLSparseMatrix copy-and-swap");
B.swap(*this);
return *this;
}
MKLSparseMatrix(){
descr.type = SPARSE_MATRIX_TYPE_GENERAL;
descr.diag = SPARSE_DIAG_NON_UNIT;
};
~MKLSparseMatrix(){
// PrintStats();
if( P.symfactorized || P.numfactorized )
{
mint phase = -1;
mreal ddum = 0.; /* double dummy pointer */
mint mnum = 1; /* Which factorization to use */
mint error = 0; /* Error flag */
mint nrhs = 1; /* Number of right hand sides */
mint msglvl = 0; /* Do not print statistical information to file */
mint maxfct = 1; /* Maximum number of numerical factorizations */
pardiso (P.pt.data(), &maxfct, &mnum, &P.mtype, &phase, &n, values, outer, inner, P.perm, &nrhs, P.iparm, &msglvl, &ddum, &ddum, &error);
}
safe_free( outer );
safe_free( inner );
safe_free( values );
};
void PrintStats()
{
print("##################################");
valprint("m", m);
valprint("n", n);
valprint("nnz", nnz);
valprint("P.n", P.n);
// std::cout << "descr = " << descr << std::endl;
if( outer )
{
valprint("outer[0]", outer[0]);
valprint("outer[m]", outer[m]);
}
if( inner )
{
valprint("inner[0]", inner[0]);
valprint("inner[nnz]", inner[nnz-1]);
}
if( values )
{
valprint("values[0]", values[0]);
valprint("values[nnz]", values[nnz-1]);
}
print("##################################");
}
void Check()
{
print(" ### MKLSparseMatrix::Check ### ");
valprint("m", m);
valprint("n", n);
valprint("nnz", nnz);
bool failed = false;
if( outer )
{
if( outer[0] )
{
eprint("outer[0] != 0");
valprint("outer[0]", outer[0]);
failed = true;
}
if( outer[m] != nnz )
{
eprint("outer[m] != nnz");
failed = true;
}
}
else
{
wprint("outer not initilized.");
}
if( inner )
{
valprint("inner[0]", inner[0]);
valprint("inner[nnz]", inner[nnz-1]);
}
else
{
wprint("inner not initilized.");
}
if( values )
{
valprint("values[0]", values[0]);
valprint("values[nnz]", values[nnz-1]);
}
else
{
wprint("values not initilized.");
}
if( outer && inner )
{
for( mint i = 0; i < m; ++ i)
{
if( failed )
{
break;
}
for( mint k = outer[i]; k< outer[i+1]; ++k )
{
mint j = inner[k];
if( j<0 || j >= n)
{
eprint("inner[" + std::to_string(k) + "] is out of bounds.");
valprint("i", i);
valprint("j = inner[k]", j);
failed = true;
break;
}
}
}
}
print(" ### MKLSparseMatrix::Check finished ### ");
}
// TODO: Better error handling
void Multiply( A_Vector<mreal> & input, A_Vector<mreal> & output, mint cols, bool addToResult = false )
{
Multiply( &input[0], &output[0], cols, addToResult );
}
void Multiply( mreal * input, mreal * output, mint cols, bool addToResult = false )
{
if( outer[m]>0 )
{
sparse_status_t stat;
sparse_matrix_t A = nullptr;
stat = mkl_sparse_d_create_csr ( &A, SPARSE_INDEX_BASE_ZERO, m, n, outer, outer + 1, inner, values );
if (stat)
{
eprint(" in MKLSparseMatrix::Multiply: mkl_sparse_d_create_csr returned " + std::to_string(stat) );
}
mreal factor = addToResult ? 1. : 0.;
if( cols > 1 )
{
// tic("MKL sparse matrix-matrix multiplication: cols = " + std::to_string(cols) );
stat = mkl_sparse_d_mm ( SPARSE_OPERATION_NON_TRANSPOSE, 1., A, descr, SPARSE_LAYOUT_ROW_MAJOR, input, cols, cols, factor, output, cols );
if (stat)
{
eprint("in MKLSparseMatrix::Multiply: mkl_sparse_d_mm returned " + std::to_string(stat) );
}
// toc("MKL sparse matrix-matrix multiplication: cols = " + std::to_string(cols) );
}
else
{
if( cols == 1)
{
// tic("MKL sparse matrix-vector multiplication" );
sparse_status_t stat = mkl_sparse_d_mv( SPARSE_OPERATION_NON_TRANSPOSE, 1., A, descr, input, factor, output );
if (stat)
{
eprint("in MKLSparseMatrix::Multiply: mkl_sparse_d_mv returned " + std::to_string(stat) );
}
// toc("MKL sparse matrix-vector multiplication" );
}
}
stat = mkl_sparse_destroy(A);
if (stat)
{
eprint("mkl_sparse_destroy returned stat = " + std::to_string(stat) );
}
}
else
{
wprint("MKLSparseMatrix::Multiply: No nonzeroes found. Doing nothing.");
}
}
void Multiply( MKLSparseMatrix & B, MKLSparseMatrix & C)
{
// print("void Multiply( MKLSparseMatrix & B, MKLSparseMatrix & C)");
sparse_status_t stat;
sparse_matrix_t csrA = nullptr;
sparse_matrix_t csrB = nullptr;
sparse_matrix_t csrC = nullptr;
stat = mkl_sparse_d_create_csr(&csrA, SPARSE_INDEX_BASE_ZERO, m, n, outer, outer + 1, inner, values );
if (stat)
{
eprint("in MKLSparseMatrix::Multiply: mkl_sparse_d_create_csr returned " + std::to_string(stat) );
}
stat = mkl_sparse_d_create_csr(&csrB, SPARSE_INDEX_BASE_ZERO, B.m, B.n, B.outer, B.outer +1 , B.inner, B.values );
if (stat)
{
eprint("in MKLSparseMatrix::Multiply: mkl_sparse_d_create_csr returned " + std::to_string(stat) );
}
stat = mkl_sparse_spmm(SPARSE_OPERATION_NON_TRANSPOSE, csrA, csrB, &csrC);
if (stat)
{
eprint("in MKLSparseMatrix::Multiply: mkl_sparse_spmm returned " + std::to_string(stat) );
}
mint rows_C;
mint cols_C;
mint * inner_C = nullptr;
mint * outerB_C = nullptr;
mint * outerE_C = nullptr;
mreal * values_C = nullptr;
sparse_index_base_t indexing = SPARSE_INDEX_BASE_ZERO;
stat = mkl_sparse_d_export_csr( csrC, &indexing, &rows_C, &cols_C, &outerB_C, &outerE_C, &inner_C, &values_C );
if (stat)
{
eprint("in MKLSparseMatrix::Multiply: mkl_sparse_d_export_csr returned " + std::to_string(stat) );
}
C = MKLSparseMatrix( rows_C, cols_C, outerB_C, outerE_C, inner_C, values_C ); // Copy!
mkl_sparse_destroy(csrA);
mkl_sparse_destroy(csrB);
mkl_sparse_destroy(csrC);
}
void Transpose( MKLSparseMatrix & AT)
{
MKLVersion Version;
mkl_get_version(&Version);
mint mkl_version = Version.MajorVersion;
// MKLVersion Version;
// mkl_get_version(&Version);
// printf("Major version: %d\n",Version.MajorVersion);
// printf("Minor version: %d\n",Version.MinorVersion);
// printf("Update version: %d\n",Version.UpdateVersion);
// printf("Product status: %s\n",Version.ProductStatus);
// printf("Build: %s\n",Version.Build);
// printf("Platform: %s\n",Version.Platform);
// printf("Processor optimization: %s\n",Version.Processor);
// printf("================================================================\n");
// printf("\n");
sparse_status_t stat;
sparse_matrix_t csrA = nullptr;
sparse_matrix_t csrAT = nullptr;
stat = mkl_sparse_d_create_csr(&csrA, SPARSE_INDEX_BASE_ZERO, m, n, outer, outer + 1, inner, values );
if (stat)
{
eprint("in MKLSparseMatrix::Transpose: mkl_sparse_d_create_csr returned " + std::to_string(stat) );
}
stat = mkl_sparse_convert_csr (csrA, SPARSE_OPERATION_TRANSPOSE, &csrAT);
if (stat)
{
eprint("in MKLSparseMatrix::Transpose: mkl_sparse_convert_csr returned " + std::to_string(stat) );
}
mint rows_AT;
mint cols_AT;
mint * inner_AT = nullptr;
mint * outerB_AT = nullptr;
mint * outerE_AT = nullptr;
mreal * values_AT = nullptr;
sparse_index_base_t indexing = SPARSE_INDEX_BASE_ZERO;
if( mkl_version >=2020)
{
stat = mkl_sparse_d_export_csr( csrAT, &indexing, &rows_AT, &cols_AT, &outerB_AT, &outerE_AT, &inner_AT, &values_AT );
}
else
{
// MKL 2019.0.1 requires this one for a weird reason
stat = mkl_sparse_d_export_csr( csrAT, &indexing, &cols_AT, &rows_AT, &outerB_AT, &outerE_AT, &inner_AT, &values_AT ); // It's not logical to swap rows_AT and cols_AT...
}
if (stat)
{
eprint("in MKLSparseMatrix::Transpose: mkl_sparse_d_export_csr returned " + std::to_string(stat) );
}
AT = MKLSparseMatrix( rows_AT, cols_AT, outerB_AT, outerE_AT, inner_AT, values_AT ); // Copy!
mkl_sparse_destroy(csrA);
mkl_sparse_destroy(csrAT);
}
mint FactorizeSymbolically()
{
if( m != n)
{
eprint("Matrix is not square, so it cannot be factorized symbolically.");
return 1;
}
else
{
P.pt = A_Vector<void*>(64);
safe_alloc( P.iparm, 64 );
safe_alloc( P.perm , m);
for ( mint i = 0; i < m; ++i )
{
P.perm[i] = i;
}
P.iparm[0] = 1; /* No solver default */
P.iparm[1] = 3; /* parallel version of nested disection */
P.iparm[3] = 0; /* No iterative-direct algorithm */
P.iparm[4] = 2; /* Write fill-in reducing permutation to perm */
P.iparm[5] = 0; /* Write solution into x */
if( P.mtype == 11 )
{
P.iparm[9] = 13; /* Perturb the pivot elements with 1E-iparm[9] */
}
else
{
P.iparm[9] = 8; /* Perturb the pivot elements with 1E-iparm[9] */
}
if( (P.mtype==2) || (P.mtype==-2) )
{
P.iparm[10] = 0; /* Disable scaling. Because it is slow.*/
P.iparm[12] = 0; /* Disable matching. Because it is slow.*/
}
else
{
P.iparm[10] = 1; /* Enable scaling. Default for nonsymmetric matrices. Good for indefinite symmetric matrices */
P.iparm[12] = 1; /* Enable matching. Default for nonsymmetric matrices. Good for indefinite symmetric matrices */
}
P.iparm[17] = -1; /* Report number of nonzeros in the factor LU */
P.iparm[18] = 0; /* Do not compute Mflops for LU factorization (because it is not for free) */
P.iparm[20] = 1; /* Bunch-Kaufman pivoting */
P.iparm[34] = 1; /* 0-based indexing */
mint phase = 11;
mreal ddum = 0.; /* double dummy pointer */
mint mnum = 1; /* Which factorization to use */
mint error = 0; /* Error flag */
mint nrhs = 1; /* Number of right hand sides */
mint msglvl = 0; /* Do not print statistical information to file */
mint maxfct = 1; /* Maximum number of numerical factorizations */
pardiso( P.pt.data(), &maxfct, &mnum, &P.mtype, &phase, &n, values, outer, inner, P.perm, &nrhs, P.iparm, &msglvl, &ddum, &ddum, &error );
if(error!=0)
{
P.symfactorized = false;
eprint("Pardiso reported an error in symbolic factorization: error = " + std::to_string(error) );
}
else
{
P.symfactorized = true;
}
return error;
}
} // FactorizeSymbolically
mint FactorizeNumerically()
{
if(!P.symfactorized)
{
FactorizeSymbolically();
}
mint phase = 22;
mreal ddum = 0.; /* double dummy pointer */
mint mnum = 1; /* Which factorization to use */
mint error = 0; /* Error flag */
mint nrhs = 1; /* Number of right hand sides */
mint msglvl = 0; /* Do not print statistical information to file */
mint maxfct = 1; /* Maximum number of numerical factorizations */
pardiso( P.pt.data(), &maxfct, &mnum, &P.mtype, &phase, &n, values, outer, inner, P.perm, &nrhs, P.iparm, &msglvl, &ddum, &ddum, &error );
if(error!=0)
{
P.numfactorized = false;
eprint("Pardiso reported an error in numeric factorization: error = " + std::to_string(error) );
}
else
{
P.numfactorized = true;
}
return error;
} // FactorizeNumerically
mint LinearSolve(mreal * b, mreal * x, bool transposed = false)
{
// solves A * x = b
if(!P.numfactorized)
{
FactorizeNumerically();
}
mint phase = 33;
P.iparm[11] = transposed ? 1 : 0;
mint mnum = 1; /* Which factorization to use */
mint error = 0; /* Error flag */
mint nrhs = 1; /* Number of right hand sides */
mint msglvl = 0; /* Do not print statistical information to file */
mint maxfct = 1; /* Maximum number of numerical factorizations */
pardiso( P.pt.data(), &maxfct, &mnum, &P.mtype, &phase, &n, values, outer, inner, P.perm, &nrhs, P.iparm, &msglvl, b, x, &error );
if(error!=0)
{
eprint("Pardiso reported an error in solving phase: error = " + std::to_string(error) );
}
P.iparm[11] = 0;
return error;
} // LinearSolve
// TODO: Currently untested whether multiple right hand sides are handled correctly here.
mint LinearSolveMatrix(mreal * B, mreal * X, mint cols, bool transposed = false)
{
// solves A * X = B
if(!P.numfactorized)
{
FactorizeNumerically();
}
mint phase = 33;
P.iparm[11] = transposed ? 1 : 0;
mint mnum = 1; /* Which factorization to use */
mint error = 0; /* Error flag */
mint nrhs = cols; /* Number of right hand sides */
mint msglvl = 0; /* Do not print statistical information to file */
mint maxfct = 1; /* Maximum number of numerical factorizations */
pardiso(P.pt.data(), &maxfct, &mnum, &P.mtype, &phase, &n, values, outer, inner, P.perm, &nrhs, P.iparm, &msglvl, B, X, &error );
if(error!=0)
{
eprint("Pardiso reported an error in solving phase: error = " + std::to_string( error) );
}
P.iparm[11] = 0;
return error;
} // LinearSolveMatrix
private:
void swap(MKLSparseMatrix &B) throw()
{
std::swap(this->m, B.m);
std::swap(this->n, B.n);
std::swap(this->nnz, B.nnz);
std::swap(this->outer, B.outer);
std::swap(this->inner, B.inner);
std::swap(this->values, B.values);
std::swap(this->P, B.P);
std::swap(this->descr, B.descr);
}
}; // MKLSparseMatrix
#pragma omp declare simd
inline mreal mypow ( mreal base, mreal exponent )
{
// Warning: Use only for positive base! This is basically pow with certain checks and cases deactivated
return std::exp2( exponent * std::log2(base) );
} // mypow
// #pragma omp declare simd
// inline mreal intpow(mreal base, mint exponent)
// {
// mreal r = 1.;
// mreal x = base;
// mint k = abs(exponent);
//
// while( k > 0)
// {
// if( k % 2 )
// {
// r *= x;
// }
// x *= x;
// k /= 2;
// }
// return exponent >= 0 ? r : 1./r;
// } // intpow
//#pragma omp declare simd
//inline mreal intpow(const mreal base, const double exponent)
//{
// mreal r = 1.;
// mint last = std::round(std::floor(abs(exponent)));
// for( mint k = 0; k < last; ++k )
// {
// r *= base;
// }
// return exponent>=0. ? r : 1./r;
//} // intpow
#pragma omp declare simd
inline mreal mypow(mreal base, mint exponent)
{
mreal b2, b3, b4, b6;
if( exponent >= 0)
{
switch (exponent) {
case 0: return 1.;
case 1: return base;
case 2: return base * base;
case 3: return base * base * base;
case 4:
b2 = base * base;
return b2 * b2;
case 5:
b2 = base * base;
return b2 * b2 * base;
case 6:
b2 = base * base;
return b2 * b2 * b2;
case 7:
b2 = base * base;
b4 = b2 * b2;
return b4 * b2 * base;
case 8:
b2 = base * base;
b4 = b2 * b2;
return b4 * b4;
case 9:
b2 = base * base;
b4 = b2 * b2;
return b4 * b4 * base;
case 10:
b2 = base * base;
b4 = b2 * b2;
return b4 * b4 * b2;
case 11:
b2 = base * base;
b4 = b2 * b2;
return b4 * b4 * b2 * base;
case 12:
b2 = base * base;
b4 = b2 * b2;
return b4 * b4 * b4;
default:
return mypow(base, exponent);
}
}
else
{
return 1./mypow(base, -exponent);
}
} // mypow
#pragma omp declare simd
inline mreal mymax(const mreal & a, const mreal & b)
{
return fmax(a,b);
}
#pragma omp declare simd
inline mreal mymin(const mreal & a, const mreal & b)
{
return fmin(a,b);
}
// This function reads in a list job_acc_costs of accumuated costs, then allocates job_ptr as a vector of size thread_count + 1, and writes the work distribution to it.
// Aasigns threads to consecutive chunks jobs, ..., job_ptr[k+1]-1 of jobs.
// Uses a binary search to find the chunk boundaries.
// The cost of the i-th job is job_acc_costs[i+1] - job_acc_costs[i].
// The cost of the k-th thread goes from job no job_ptr[k] to job no job_ptr[k+1] (as always in C/C++, job_ptr[k+1] points _after_ the last job.
void BalanceWorkLoad( mint job_count, mint * job_acc_costs, mint thread_count, mint * & job_ptr );
enum class TreePercolationAlgorithm
{
Tasks,
Sequential,
Chunks
};
enum class NearFieldMultiplicationAlgorithm
{
MKL_CSR,
Hybrid,
Eigen,
VBSR
};
} // namespace rsurfaces
|
omp_task_firstprivate.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
int test_omp_task_firstprivate()
{
int i;
int sum = 1234;
int known_sum;
int result = 0; /* counts the wrong sums from tasks */
known_sum = 1234 + (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
#pragma omp parallel
{
#pragma omp single
{
for (i = 0; i < NUM_TASKS; i++) {
#pragma omp task firstprivate(sum)
{
int j;
for (j = 0; j <= LOOPCOUNT; j++) {
#pragma omp flush
sum += j;
}
/* check if calculated sum was right */
if (sum != known_sum) {
#pragma omp critical
{ result++; }
}
} /* omp task */
} /* for loop */
} /* omp single */
} /* omp parallel */
return (result == 0);
}
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_task_firstprivate()) {
num_failed++;
}
}
return num_failed;
}
|
GB_unaryop__identity_bool_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_bool_int64
// op(A') function: GB_tran__identity_bool_int64
// C type: bool
// A type: int64_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
bool z = (bool) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_bool_int64
(
bool *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_bool_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_sgemm.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_msa(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
// Mat bottom_im2col(size, maxk, inch, 4u, 1, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
const float* bias = _bias;
// permute
Mat tmp;
if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + size % 4, 4u, 1, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 4u, 1, opt.workspace_allocator);
{
int nn_size = size / 4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 4;
float* tmpptr = tmp.channel(i / 4);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
#if __mips_msa
__msa_st_w(__msa_ld_w(img0, 0), tmpptr, 0);
#else
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
#endif
img0 += size;
tmpptr += 4;
}
}
}
int remain_size_start = nn_size * 4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
float* tmpptr = tmp.channel(i / 4 + i % 4);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
img0 += size;
tmpptr += 1;
}
}
}
}
#if __mips_msa
int nn_outch = outch >> 3;
int remain_outch_start = nn_outch << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
float* outptr0 = top_blob.channel(p);
float* outptr1 = top_blob.channel(p + 1);
float* outptr2 = top_blob.channel(p + 2);
float* outptr3 = top_blob.channel(p + 3);
float* outptr4 = top_blob.channel(p + 4);
float* outptr5 = top_blob.channel(p + 5);
float* outptr6 = top_blob.channel(p + 6);
float* outptr7 = top_blob.channel(p + 7);
const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p : zeros;
int i = 0;
for (; i + 3 < size; i += 4)
{
const float* tmpptr = tmp.channel(i / 4);
const float* kptr = kernel.channel(p / 8);
int nn = inch * maxk; // inch always > 0
v4f32 _sum0 = __msa_fill_w_f32(biasptr[0]);
v4f32 _sum1 = __msa_fill_w_f32(biasptr[1]);
v4f32 _sum2 = __msa_fill_w_f32(biasptr[2]);
v4f32 _sum3 = __msa_fill_w_f32(biasptr[3]);
v4f32 _sum4 = __msa_fill_w_f32(biasptr[4]);
v4f32 _sum5 = __msa_fill_w_f32(biasptr[5]);
v4f32 _sum6 = __msa_fill_w_f32(biasptr[6]);
v4f32 _sum7 = __msa_fill_w_f32(biasptr[7]);
for (int q = 0; q < nn; q++)
{
__builtin_prefetch(tmpptr + 16);
__builtin_prefetch(kptr + 32);
v4f32 _val = (v4f32)__msa_ld_w(tmpptr, 0);
v4i32 _w0123 = __msa_ld_w(kptr, 0);
v4i32 _w4567 = __msa_ld_w(kptr + 4, 0);
_sum0 = __msa_fmadd_w(_sum0, _val, (v4f32)__msa_splati_w(_w0123, 0));
_sum1 = __msa_fmadd_w(_sum1, _val, (v4f32)__msa_splati_w(_w0123, 1));
_sum2 = __msa_fmadd_w(_sum2, _val, (v4f32)__msa_splati_w(_w0123, 2));
_sum3 = __msa_fmadd_w(_sum3, _val, (v4f32)__msa_splati_w(_w0123, 3));
_sum4 = __msa_fmadd_w(_sum4, _val, (v4f32)__msa_splati_w(_w4567, 0));
_sum5 = __msa_fmadd_w(_sum5, _val, (v4f32)__msa_splati_w(_w4567, 1));
_sum6 = __msa_fmadd_w(_sum6, _val, (v4f32)__msa_splati_w(_w4567, 2));
_sum7 = __msa_fmadd_w(_sum7, _val, (v4f32)__msa_splati_w(_w4567, 3));
tmpptr += 4;
kptr += 8;
}
__msa_st_w((v4i32)_sum0, outptr0, 0);
__msa_st_w((v4i32)_sum1, outptr1, 0);
__msa_st_w((v4i32)_sum2, outptr2, 0);
__msa_st_w((v4i32)_sum3, outptr3, 0);
__msa_st_w((v4i32)_sum4, outptr4, 0);
__msa_st_w((v4i32)_sum5, outptr5, 0);
__msa_st_w((v4i32)_sum6, outptr6, 0);
__msa_st_w((v4i32)_sum7, outptr7, 0);
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
outptr4 += 4;
outptr5 += 4;
outptr6 += 4;
outptr7 += 4;
}
for (; i < size; i++)
{
const float* tmpptr = tmp.channel(i / 4 + i % 4);
const float* kptr = kernel.channel(p / 8);
int nn = inch * maxk; // inch always > 0
float sum0 = biasptr[0];
float sum1 = biasptr[1];
float sum2 = biasptr[2];
float sum3 = biasptr[3];
float sum4 = biasptr[4];
float sum5 = biasptr[5];
float sum6 = biasptr[6];
float sum7 = biasptr[7];
for (int q = 0; q < nn; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[0] * kptr[1];
sum2 += tmpptr[0] * kptr[2];
sum3 += tmpptr[0] * kptr[3];
sum4 += tmpptr[0] * kptr[4];
sum5 += tmpptr[0] * kptr[5];
sum6 += tmpptr[0] * kptr[6];
sum7 += tmpptr[0] * kptr[7];
tmpptr++;
kptr += 8;
}
outptr0[0] = sum0;
outptr1[0] = sum1;
outptr2[0] = sum2;
outptr3[0] = sum3;
outptr4[0] = sum4;
outptr5[0] = sum5;
outptr6[0] = sum6;
outptr7[0] = sum7;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
outptr4++;
outptr5++;
outptr6++;
outptr7++;
}
}
nn_outch = (outch - remain_outch_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
float* outptr0 = top_blob.channel(p);
float* outptr1 = top_blob.channel(p + 1);
float* outptr2 = top_blob.channel(p + 2);
float* outptr3 = top_blob.channel(p + 3);
const float zeros[4] = {0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p : zeros;
int i = 0;
for (; i + 3 < size; i += 4)
{
const float* tmpptr = tmp.channel(i / 4);
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4);
int nn = inch * maxk; // inch always > 0
v4f32 _sum0 = __msa_fill_w_f32(biasptr[0]);
v4f32 _sum1 = __msa_fill_w_f32(biasptr[1]);
v4f32 _sum2 = __msa_fill_w_f32(biasptr[2]);
v4f32 _sum3 = __msa_fill_w_f32(biasptr[3]);
for (int q = 0; q < nn; q++)
{
__builtin_prefetch(tmpptr + 16);
__builtin_prefetch(kptr + 16);
v4f32 _val = (v4f32)__msa_ld_w(tmpptr, 0);
v4i32 _w0123 = __msa_ld_w(kptr, 0);
_sum0 = __msa_fmadd_w(_sum0, _val, (v4f32)__msa_splati_w(_w0123, 0));
_sum1 = __msa_fmadd_w(_sum1, _val, (v4f32)__msa_splati_w(_w0123, 1));
_sum2 = __msa_fmadd_w(_sum2, _val, (v4f32)__msa_splati_w(_w0123, 2));
_sum3 = __msa_fmadd_w(_sum3, _val, (v4f32)__msa_splati_w(_w0123, 3));
tmpptr += 4;
kptr += 4;
}
__msa_st_w((v4i32)_sum0, outptr0, 0);
__msa_st_w((v4i32)_sum1, outptr1, 0);
__msa_st_w((v4i32)_sum2, outptr2, 0);
__msa_st_w((v4i32)_sum3, outptr3, 0);
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
}
for (; i < size; i++)
{
const float* tmpptr = tmp.channel(i / 4 + i % 4);
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4);
int nn = inch * maxk; // inch always > 0
float sum0 = biasptr[0];
float sum1 = biasptr[1];
float sum2 = biasptr[2];
float sum3 = biasptr[3];
for (int q = 0; q < nn; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[0] * kptr[1];
sum2 += tmpptr[0] * kptr[2];
sum3 += tmpptr[0] * kptr[3];
tmpptr++;
kptr += 4;
}
outptr0[0] = sum0;
outptr1[0] = sum1;
outptr2[0] = sum2;
outptr3[0] = sum3;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
}
}
remain_outch_start += nn_outch << 2;
#else // __mips_msa
int nn_outch = outch >> 1;
int remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 2;
float* outptr0 = top_blob.channel(p);
float* outptr1 = top_blob.channel(p + 1);
const float zeros[2] = {0.f, 0.f};
const float* biasptr = bias ? bias + p : zeros;
int i = 0;
for (; i + 3 < size; i += 4)
{
const float* tmpptr = tmp.channel(i / 4);
const float* kptr = kernel.channel(p / 2);
int nn = inch * maxk; // inch always > 0
float sum00 = biasptr[0];
float sum01 = biasptr[0];
float sum02 = biasptr[0];
float sum03 = biasptr[0];
float sum10 = biasptr[1];
float sum11 = biasptr[1];
float sum12 = biasptr[1];
float sum13 = biasptr[1];
for (int q = 0; q < nn; q++)
{
__builtin_prefetch(tmpptr + 16);
__builtin_prefetch(kptr + 8);
float k0 = kptr[0];
float k1 = kptr[1];
sum00 += tmpptr[0] * k0;
sum01 += tmpptr[1] * k0;
sum02 += tmpptr[2] * k0;
sum03 += tmpptr[3] * k0;
sum10 += tmpptr[0] * k1;
sum11 += tmpptr[1] * k1;
sum12 += tmpptr[2] * k1;
sum13 += tmpptr[3] * k1;
tmpptr += 4;
kptr += 2;
}
outptr0[0] = sum00;
outptr0[1] = sum01;
outptr0[2] = sum02;
outptr0[3] = sum03;
outptr1[0] = sum10;
outptr1[1] = sum11;
outptr1[2] = sum12;
outptr1[3] = sum13;
outptr0 += 4;
outptr1 += 4;
}
for (; i < size; i++)
{
const float* tmpptr = tmp.channel(i / 4 + i % 4);
const float* kptr = kernel.channel(p / 2);
int nn = inch * maxk; // inch always > 0
float sum0 = biasptr[0];
float sum1 = biasptr[1];
for (int q = 0; q < nn; q++)
{
__builtin_prefetch(tmpptr + 4);
__builtin_prefetch(kptr + 8);
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[0] * kptr[1];
tmpptr++;
kptr += 2;
}
outptr0[0] = sum0;
outptr1[0] = sum1;
outptr0++;
outptr1++;
}
}
#endif // __mips_msa
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
float* outptr0 = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
int i = 0;
for (; i + 3 < size; i += 4)
{
const float* tmpptr = tmp.channel(i / 4);
#if __mips_msa
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
const float* kptr = kernel.channel(p / 2 + p % 2);
#endif
int nn = inch * maxk; // inch always > 0
#if __mips_msa
v4f32 _sum0 = __msa_fill_w_f32(bias0);
for (int q = 0; q < nn; q++)
{
_sum0 = __msa_fmadd_w(_sum0, __msa_fill_w_f32(kptr[0]), (v4f32)__msa_ld_w(tmpptr, 0));
tmpptr += 4;
kptr++;
}
__msa_st_w((v4i32)_sum0, outptr0, 0);
outptr0 += 4;
#else
float sum0 = bias0;
float sum1 = bias0;
float sum2 = bias0;
float sum3 = bias0;
for (int q = 0; q < nn; q++)
{
__builtin_prefetch(tmpptr + 16);
__builtin_prefetch(kptr + 4);
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[1] * kptr[0];
sum2 += tmpptr[2] * kptr[0];
sum3 += tmpptr[3] * kptr[0];
tmpptr += 4;
kptr++;
}
outptr0[0] = sum0;
outptr0[1] = sum1;
outptr0[2] = sum2;
outptr0[3] = sum3;
outptr0 += 4;
#endif // __mips_msa
}
for (; i < size; i++)
{
const float* tmpptr = tmp.channel(i / 4 + i % 4);
#if __mips_msa
const float* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
const float* kptr = kernel.channel(p / 2 + p % 2);
#endif
int nn = inch * maxk; // inch always > 0
float sum0 = bias0;
for (int q = 0; q < nn; q++)
{
sum0 += tmpptr[0] * kptr[0];
tmpptr++;
kptr++;
}
outptr0[0] = sum0;
outptr0++;
}
}
}
static void convolution_im2col_sgemm_transform_kernel_msa(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = 8b-maxk-inch-outch/8b
Mat kernel = _kernel.reshape(maxk, inch, outch);
#if __mips_msa
kernel_tm.create(8 * maxk, inch, outch / 8 + (outch % 8) / 4 + outch % 4);
#else
kernel_tm.create(2 * maxk, inch, outch / 2 + outch % 2);
#endif
int q = 0;
#if __mips_msa
for (; q + 7 < outch; q += 8)
{
const Mat k0 = kernel.channel(q);
const Mat k1 = kernel.channel(q + 1);
const Mat k2 = kernel.channel(q + 2);
const Mat k3 = kernel.channel(q + 3);
const Mat k4 = kernel.channel(q + 4);
const Mat k5 = kernel.channel(q + 5);
const Mat k6 = kernel.channel(q + 6);
const Mat k7 = kernel.channel(q + 7);
float* g00 = kernel_tm.channel(q / 8);
for (int p = 0; p < inch; p++)
{
const float* k00 = k0.row(p);
const float* k10 = k1.row(p);
const float* k20 = k2.row(p);
const float* k30 = k3.row(p);
const float* k40 = k4.row(p);
const float* k50 = k5.row(p);
const float* k60 = k6.row(p);
const float* k70 = k7.row(p);
for (int k = 0; k < maxk; k++)
{
g00[0] = k00[k];
g00[1] = k10[k];
g00[2] = k20[k];
g00[3] = k30[k];
g00[4] = k40[k];
g00[5] = k50[k];
g00[6] = k60[k];
g00[7] = k70[k];
g00 += 8;
}
}
}
for (; q + 3 < outch; q += 4)
{
const Mat k0 = kernel.channel(q);
const Mat k1 = kernel.channel(q + 1);
const Mat k2 = kernel.channel(q + 2);
const Mat k3 = kernel.channel(q + 3);
float* g00 = kernel_tm.channel(q / 8 + (q % 8) / 4);
for (int p = 0; p < inch; p++)
{
const float* k00 = k0.row(p);
const float* k10 = k1.row(p);
const float* k20 = k2.row(p);
const float* k30 = k3.row(p);
for (int k = 0; k < maxk; k++)
{
g00[0] = k00[k];
g00[1] = k10[k];
g00[2] = k20[k];
g00[3] = k30[k];
g00 += 4;
}
}
}
#else
for (; q + 1 < outch; q += 2)
{
const Mat k0 = kernel.channel(q);
const Mat k1 = kernel.channel(q + 1);
float* g00 = kernel_tm.channel(q / 2);
for (int p = 0; p < inch; p++)
{
const float* k00 = k0.row(p);
const float* k10 = k1.row(p);
for (int k = 0; k < maxk; k++)
{
g00[0] = k00[k];
g00[1] = k10[k];
g00 += 2;
}
}
}
#endif // __mips_msa
for (; q < outch; q++)
{
const Mat k0 = kernel.channel(q);
#if __mips_msa
float* g00 = kernel_tm.channel(q / 8 + (q % 8) / 4 + q % 4);
#else
float* g00 = kernel_tm.channel(q / 2 + q % 2);
#endif
for (int p = 0; p < inch; p++)
{
const float* k00 = k0.row(p);
for (int k = 0; k < maxk; k++)
{
g00[0] = k00[k];
g00 += 1;
}
}
}
}
static void convolution_im2col_sgemm_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 4u, 1, opt.workspace_allocator);
{
const int gap = w * stride_h - outw * stride_w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
float* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const float* sptr = img.row<const float>(dilation_h * u) + dilation_w * v;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
ptr[0] = sptr[0];
sptr += stride_w;
ptr += 1;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_msa(bottom_im2col, top_blob, kernel, _bias, opt);
}
|
main.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <signal.h>
#include <hdf5.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
#include "allvars.h"
#include "proto.h"
#ifndef ALPHA
#define ALPHA (2.0/3)
#endif
#define WEIGHT_FAK (3.0)
MyIDType *IdSnapTable;
MyIDType *tmpptr;
int *int_tmpptr;
long int *long_int_tmpptr;
long long int *long_long_int_tmpptr;
void get_TotNumPart(void);
int main(int argc, char **argv)
{
if(argc != 3)
{
printf("\n usage: L-BaseTree <parameterfile> <outputnum>\n");
printf(" <parameterfile> see readparmeterfile.c\n");
printf(" <outputnum> snapshot number\n\n");
exit(1);
}
read_parameter_file(argv[1]);
#ifdef SKIP_SNAP
read_outputlist(OutputList);
#endif
SnapshotNum = atoi(argv[2]);
int snapA = SnapshotNum + 0 * SnapSkipFac;
int snapB = SnapshotNum + 1 * SnapSkipFac;
int snapC = SnapshotNum + 2 * SnapSkipFac;
#ifdef SKIP_SNAP
if(OutputListFlag[snapA] == 0)
{
printf("broken snapshot, stopping");
exit(0);
}
while(OutputListFlag[snapB] == 0 && snapB < LastSnapShotNr)
{
snapB+=SnapSkipFac;
}
snapC = snapB + SnapSkipFac;
while(OutputListFlag[snapC] == 0 && snapC < LastSnapShotNr)
{
snapC+=SnapSkipFac;
}
printf("Used snapshots: SnapA %d, SnapB %d, SnapC %d\n", snapA,snapB,snapC);
#endif
#if defined(_OPENMP)
printf("OMP: max-threads=%d\n", omp_get_max_threads());
fflush(stdout);
#endif
/*==========================================================================*/
printf("allocating group catalogues...\n"); fflush(stdout);
allocate_group_catalogue(snapA, &CatA);
allocate_group_catalogue(snapB, &CatB);
if(snapC <= LastSnapShotNr)
allocate_group_catalogue(snapC, &CatC);
printf("populating group catalogues...\n"); fflush(stdout);
load_group_catalogue(snapA, &CatA);
load_group_catalogue(snapB, &CatB);
if(snapC <= LastSnapShotNr)
load_group_catalogue(snapC, &CatC);
/*==========================================================================*/
/*==========================================================================*/
printf("allocating subhalo catalogues...\n"); fflush(stdout);
allocate_subhalo_catalogue(snapA, &CatA);
allocate_subhalo_catalogue(snapB, &CatB);
if(snapC <= LastSnapShotNr)
allocate_subhalo_catalogue(snapC, &CatC);
printf("populating subhalo catalogues...\n"); fflush(stdout);
load_subhalo_catalogue(snapA, &CatA);
load_subhalo_catalogue(snapB, &CatB);
if(snapC <= LastSnapShotNr)
load_subhalo_catalogue(snapC, &CatC);
/*==========================================================================*/
/*==========================================================================*/
printf("reading/sorting IDs...\n"); fflush(stdout);
#ifdef IDS_HAVE_GAPS
get_id_translation_table(); /* Load IdSnapTable: sorted array of length N_dm with minimum (first) value of Min(ID_dm) and a maximum (last) value of Max(ID_dm) */
#else
get_TotNumPart();
#endif
printf("reassigning ids ...\n"); fflush(stdout);
reassign_ids(CatA.TotNids, CatA.IdList);
reassign_ids(CatB.TotNids, CatB.IdList);
reassign_ids(CatC.TotNids, CatC.IdList);
myfree(IdSnapTable);
printf("done.\n"); fflush(stdout);
/*==========================================================================*/
/*==========================================================================*/
/* set cat->IdToHalo[i] such that each particle can reference the Halo that it is a part of */
printf("preparing ID-to-halo tables...\n"); fflush(stdout);
prepare_index_list(&CatA);
printf("index A done.\n"); fflush(stdout);
prepare_index_list(&CatB);
printf("index B done.\n"); fflush(stdout);
if(snapC <= LastSnapShotNr)
{
prepare_index_list(&CatC);
printf("index C done.\n"); fflush(stdout);
}
/*==========================================================================*/
/*==========================================================================*/
/* get descendants */
printf("determine_descendants...\n"); fflush(stdout);
determine_descendants(&CatA, &CatB, 0, snapB);
printf("desc AB done.\n"); fflush(stdout);
#ifdef BACKWARD_CHECKING
determine_descendants(&CatB, &CatA, 1, snapA);
printf("desc BA done.\n");
fflush(stdout);
#endif
if(snapC <= LastSnapShotNr)
{
determine_descendants(&CatB, &CatC, 0, snapC);
printf("desc BC done.\n"); fflush(stdout);
determine_descendants(&CatA, &CatC, 1, snapC); /* secondary descendant */
printf("desc AC done.\n"); fflush(stdout);
}
printf("descendants done.\n"); fflush(stdout);
/*==========================================================================*/
/*==========================================================================*/
if(snapC <= LastSnapShotNr)
{
printf("decide whether we should take secondary descendant...\n");
fflush(stdout);
count_progenitors(&CatA, &CatB);
printf("progcount AB done\n");
fflush(stdout);
count_progenitors(&CatB, &CatC);
printf("progcount BC done\n");
fflush(stdout);
decide_upon_descendant();
printf("decision made\n");
fflush(stdout);
}
#ifdef BACKWARD_CHECKING
printf("Doing Backward decision ...\n");
fflush(stdout);
if(snapC > LastSnapShotNr)
{
count_progenitors(&CatA, &CatB);
printf("progcount AB done\n");
fflush(stdout);
}
if(snapC <= LastSnapShotNr)
{
decide_backwards(&CatA, &CatB);
printf("Backward decision for AB done.\n");
fflush(stdout);
}
#endif
/*==========================================================================*/
printf("saving descendants...\n"); fflush(stdout);
save_decendant_list();
printf("saving done.\n"); fflush(stdout);
/*==========================================================================*/
return 0;
}
#ifdef BACKWARD_CHECKING
#ifndef HALO_SIZE_INCREASE_FOR_SWITCHING
#define HALO_SIZE_INCREASE_FOR_SWITCHING 1.5
#endif
void decide_backwards(struct halo_catalogue *catA, struct halo_catalogue *catB)
{
int i, ic = 0, ict = 0, ptA, p, ifound;
for(i = 0; i < catB->TotNsubhalos; i++)
{
if(catB->CountProgenitors[i] == 0) /* select halos with no progenitors */
{
ict++;
if(catB->Descendant[i].HaloIndex[1] >= 0) /* But in reality they have one */
{
ptA = catB->Descendant[i].HaloIndex[1];
/* check if the halo without progenitor's most bound
id is part of the descendant found at the previous snap.
Only in this case, continue ... */
ifound = 0;
for(p = 0; p < catA->SubLen[ptA]; p++)
if(catB->IdList[catB->SubOffset[i]] == catA->IdList[catA->SubOffset[ptA] + p])
ifound++;
if(ifound)
{
/* now check if the tow descendent found merge in the next step ... */
if(catB->Descendant[i].HaloIndex[0] ==
catB->Descendant[catA->Descendant[ptA].HaloIndex[0]].HaloIndex[0])
{
/* only redirect if missed descendent has more particles ... */
if(catB->SubLen[i] >
catB->SubLen[catA->Descendant[ptA].HaloIndex[0]] * HALO_SIZE_INCREASE_FOR_SWITCHING)
{
catA->Descendant[ptA].HaloIndex[0] = i;
ic++;
}
}
}
}
}
}
printf("Redirected %d of %d descendents ...\n", ic, ict);
fflush(stdout);
}
#endif
void decide_upon_descendant(void)
{
int i, index_b, index_c;
int count_b, count_c, count_w, count_n;
double sumpart;
#ifdef SKIP_BY_WEIGHT
int count_s = 0;
#endif
count_b = count_c = count_w = count_n = 0;
sumpart = 0.0;
#if defined(_OPENMP)
#pragma omp parallel for private(index_b, index_c) reduction(+:count_b,count_c,count_w,count_n,sumpart)
#endif
for(i = 0; i < CatA.TotNsubhalos; i++)
{
index_b = CatA.Descendant[i].HaloIndex[0];
index_c = CatA.Descendant[i].HaloIndex[1];
if(index_b >= 0)
count_b++;
if(index_b >= 0 && index_c >= 0)
{
if(CatB.CountProgenitors[index_b] > 1 && CatC.CountProgenitors[index_c] == 0)
{
CatB.CountProgenitors[index_b]--;
CatC.CountProgenitors[index_c]++;
CatA.Descendant[i].HaloIndex[0] = CatA.Descendant[i].HaloIndex[1];
CatA.Descendant[i].SnapNum[0] = CatA.Descendant[i].SnapNum[1];
count_c++;
}
#ifdef SKIP_BY_WEIGHT
else
{
if(CatA.Descendant[i].Weight[1] / WEIGHT_FAK > CatA.Descendant[i].Weight[0])
{
CatB.CountProgenitors[index_b]--;
CatC.CountProgenitors[index_c]++;
CatA.Descendant[i].HaloIndex[0] = CatA.Descendant[i].HaloIndex[1];
CatA.Descendant[i].SnapNum[0] = CatA.Descendant[i].SnapNum[1];
count_c++;
count_s++;
}
}
#endif
}
if(index_b < 0 && index_c >= 0)
{
CatA.Descendant[i].HaloIndex[0] = CatA.Descendant[i].HaloIndex[1];
CatA.Descendant[i].SnapNum[0] = CatA.Descendant[i].SnapNum[1];
CatC.CountProgenitors[index_c]++;
count_w++;
}
if(index_b < 0 && index_c < 0)
{
sumpart += CatA.SubLen[i];
count_n++;
}
}
printf("Out of %d primary descendants, %d have been rerouted to the secondary descendant.\n",
count_b, count_c);
printf("Additionally, %d have been pointed to the secondary because they had no primary.\n", count_w);
#ifdef SKIP_BY_WEIGHT
printf("Additionally, %d have been pointed to the secondary because the primary have had low weights.\n",
count_s);
#endif
printf("This leaves %d without descendant, of average size = %g particles.\n", count_n, sumpart / count_n);
fflush(stdout);
}
void count_progenitors(struct halo_catalogue *catA, struct halo_catalogue *catB)
{
int i;
#if defined(_OPENMP)
#pragma omp parallel for
#endif
for(i = 0; i < catB->TotNsubhalos; i++)
catB->CountProgenitors[i] = 0;
#if defined(_OPENMP)
#pragma omp parallel for
#endif
for(i = 0; i < catA->TotNsubhalos; i++)
{
if(catA->Descendant[i].HaloIndex[0] >= 0)
{
catB->CountProgenitors[catA->Descendant[i].HaloIndex[0]]++;
}
}
}
struct cand_data
{
int haloindex;
float weight;
};
void determine_descendants(struct halo_catalogue *catA, struct halo_catalogue *catB, int entry, int snapnum)
{
int i, j, ndiff, ncand, haloB, prev, maxlen;
MyIDType id;
float weightmax;
int halomax;
struct cand_data *candlist, *difflist;
maxlen = 0;
for(i = 0; i < catA->TotNsubhalos; i++)
if(catA->SubLen[i] > maxlen)
maxlen = catA->SubLen[i];
#if defined(_OPENMP)
#pragma omp parallel private(candlist, difflist, ncand, i, j, id, haloB, ndiff, prev, weightmax, halomax)
#endif
{
candlist = mymalloc(maxlen * sizeof(struct cand_data));
difflist = mymalloc(maxlen * sizeof(struct cand_data));
#if defined(_OPENMP)
#pragma omp for schedule(dynamic) nowait
#endif
for(i = 0; i < catA->TotNsubhalos; i++) // for each subhalo in Snapshot A ...
{
ncand = 0;
for(j = 0; j < catA->SubLen[i]; j++) // ... and for each particle in each subhalo
{
id = catA->IdList[catA->SubOffset[i] + j]; // ... identify the particle's ID
if(id >= 0 && id < TotNumPart) // ... (and as long as it's in the accetable range)
{
haloB = catB->IdToHalo[id]; // ... identify the halo that contains this particle in snapshot B
if(haloB >= 0) // all particles are in haloes (they have -1), but if it is in a halo...
{
candlist[ncand].haloindex = haloB; // ... set the haloindex accordingly
candlist[ncand].weight = 1.0 / pow(j + 1, ALPHA); // ... and set the weighting based on how bound it was
ncand++;
}
}
else
{
char buf[100];
long_to_str(buf, id);
printf("bummer! i=%d id=%s TotumPart=%d\n", i, buf, (int)TotNumPart);
exit(4);
}
}
qsort(candlist, ncand, sizeof(struct cand_data), sort_candlist);
for(j = 0, ndiff = 0, prev = -1; j < ncand; j++)
{
if(candlist[j].haloindex != prev)
{
ndiff++;
difflist[ndiff - 1].haloindex = candlist[j].haloindex;
difflist[ndiff - 1].weight = 0;
}
difflist[ndiff - 1].weight += candlist[j].weight;
prev = candlist[j].haloindex;
}
weightmax = 0;
halomax = -1;
for(j = 0; j < ndiff; j++)
{
if(difflist[j].weight > weightmax)
{
weightmax = difflist[j].weight;
halomax = difflist[j].haloindex;
}
}
if(ndiff > 0 && halomax >= 0)
{
catA->Descendant[i].HaloIndex[entry] = halomax;
catA->Descendant[i].SnapNum[entry] = snapnum;
#ifdef SKIP_BY_WEIGHT
catA->Descendant[i].Weight[entry] = weightmax;
#endif
}
else
{
catA->Descendant[i].HaloIndex[entry] = -1;
catA->Descendant[i].SnapNum[entry] = -1;
#ifdef SKIP_BY_WEIGHT
catA->Descendant[i].Weight[entry] = -1;
#endif
}
}
myfree(candlist);
myfree(difflist);
}
}
int sort_twoids_id(const void *a, const void *b)
{
if(((struct twoids *) a)->id < ((struct twoids *) b)->id)
return -1;
if(((struct twoids *) a)->id > ((struct twoids *) b)->id)
return +1;
return 0;
}
int sort_twoids_ord(const void *a, const void *b)
{
if(((struct twoids *) a)->ord < ((struct twoids *) b)->ord)
return -1;
if(((struct twoids *) a)->ord > ((struct twoids *) b)->ord)
return +1;
return 0;
}
int sort_candlist(const void *a, const void *b)
{
if(((struct cand_data *) a)->haloindex < ((struct cand_data *) b)->haloindex)
return -1;
if(((struct cand_data *) a)->haloindex > ((struct cand_data *) b)->haloindex)
return +1;
return 0;
}
int sort_IDType(const void *a, const void *b)
{
if(*((MyIDType *) a) < *((MyIDType *) b))
return -1;
if(*((MyIDType *) a) > *((MyIDType *) b))
return +1;
return 0;
}
void prepare_index_list(struct halo_catalogue *cat)
{
MyIDType id;
signed long long ii;
int i, j;
cat->IdToHalo = mymalloc(sizeof(int) * TotNumPart);
#if defined(_OPENMP)
#pragma omp parallel for
#endif
for(ii = 0; ii < TotNumPart; ii++) // start by assigning all particles to no halo
cat->IdToHalo[ii] = -1;
#if defined(_OPENMP)
#pragma omp parallel for private(j,id)
#endif
for(i = 0; i < cat->TotNsubhalos; i++) // loop over all subhalos
for(j = 0; j < cat->SubLen[i]; j++) // loop over all particles in each subhalo
{
id = cat->IdList[cat->SubOffset[i] + j]; // id from the subhalo list
if(id >= 0 && id < TotNumPart)
cat->IdToHalo[id] = i;
else
{
char buf[100];
long_to_str(buf, id);
printf("bummer! i=%d j=%d id=%s id=%d TotNumPart=%d)\n", i, j, buf, (int)id, (int)TotNumPart);
exit(1);
}
}
}
void allocate_group_catalogue(int num, struct halo_catalogue *cat)
{
int nids, nFiles, nsubhalos, ngroups;
char buf[1000];
get_filename(buf, num, 0, 1);
read_basic_subfind_header_hdf5(buf, 0, cat, &nFiles , &nids , &nsubhalos, &ngroups);
int jjj;
cat->GroupNsubs = mymalloc(sizeof(MyIDType) * cat->TotNgroups);
cat->GroupLen = mymalloc(sizeof(MyIDType) * cat->TotNgroups);
cat->GroupOffset = mymalloc(sizeof(MyIDType) * cat->TotNgroups);
cat->GroupLenType = mymalloc(6 * sizeof(MyIDType *));
cat->GroupOffsetType = mymalloc(6 * sizeof(MyIDType *));
cat->SubhaloLenType = mymalloc(6 * sizeof(MyIDType *));
cat->Group = mymalloc(sizeof(struct group_data) * cat->TotNgroups);
for(jjj=0 ; jjj< 6; jjj++)
{
cat->GroupLenType[jjj] = mymalloc(cat->TotNgroups * sizeof(MyIDType));
cat->GroupOffsetType[jjj] = mymalloc(cat->TotNgroups * sizeof(MyIDType));
}
for(jjj=0; jjj< cat->TotNgroups; jjj++)
cat->Group[jjj].count = 0;
}
void load_group_catalogue(int num, struct halo_catalogue *cat)
{
int i=0, nids, nFiles, nsubhalos, ngroups, groupcount;
char buf[1000];
get_filename(buf, num, i, 1);
read_basic_subfind_header_hdf5(buf, i, cat, &nFiles , &nids , &nsubhalos, &ngroups);
groupcount = 0;
printf("starting the group loading loop\n"); fflush(stdout);
for(i = 0, nFiles = 1; i < nFiles; i++)
{
get_filename(buf, num, i, 1);
if(i == 1)
printf(" ... to ... \n");
if(i == 0 || i == nFiles-1)
printf("Loading : %s\n",buf);
read_basic_subfind_header_hdf5(buf, i, cat, &nFiles , &nids , &nsubhalos, &ngroups);
if(ngroups > 0)
read_subfind_group_hdf5(buf, i, cat, ngroups, groupcount);
groupcount += ngroups;
}
for(i=0 ; i < cat->TotNgroups ; i++)
cat->Group[i].count = 0;
printf("finished the group loading loop\n"); fflush(stdout);
}
void allocate_subhalo_catalogue(int num, struct halo_catalogue *cat)
{
int nids, nFiles, nsubhalos, ngroups;
char buf[1000];
get_filename(buf, num, 0, 1);
read_basic_subfind_header_hdf5(buf, 0, cat, &nFiles , &nids , &nsubhalos, &ngroups);
int iii,jjj;
cat->SubLen = mymalloc(sizeof(int) * cat->TotNsubhalos);
cat->SubParentHalo = mymalloc(sizeof(int) * cat->TotNsubhalos);
cat->CountProgenitors = mymalloc(sizeof(int) * cat->TotNsubhalos);
cat->SubOffset = mymalloc(sizeof(MyIDType) * cat->TotNsubhalos);
cat->Descendant = mymalloc(sizeof(struct descendant_data) * cat->TotNsubhalos);
cat->SubhaloGrNr = mymalloc(sizeof(MyIDType) * cat->TotNsubhalos);
cat->SubhaloLen = mymalloc(sizeof(MyIDType) * cat->TotNsubhalos);
cat->SubhaloLenType = mymalloc(6 * sizeof(MyIDType *));
for(jjj=0 ; jjj< 6; jjj++)
cat->SubhaloLenType[jjj] = mymalloc(cat->TotNsubhalos * sizeof(MyIDType));
for(iii=0 ; iii < cat->TotNgroups ; iii++)
if(cat->GroupNsubs[iii] > 0)
cat->Group[iii].Subhalo = mymalloc(sizeof(struct subhalo_data) * cat->GroupNsubs[iii]);
}
void load_subhalo_catalogue(int num, struct halo_catalogue *cat)
{
int i=0, nids, nFiles, nsubhalos, subcount, ngroups;
char buf[1000];
MyIDType * local_id_array, ndm = 0 , Nskip = 0;
get_filename(buf, num, i, 1);
read_basic_subfind_header_hdf5(buf, i, cat, &nFiles , &nids , &nsubhalos, &ngroups);
subcount = 0;
printf("starting the subhalo loading loop\n"); fflush(stdout);
for(i = 0, nFiles = 1; i < nFiles; i++)
{
get_filename(buf, num, i, 1);
if(i == 1)
printf(" ... to ... \n");
if(i == 0 || i == nFiles-1)
printf("Loading : %s\n",buf);
read_basic_subfind_header_hdf5(buf, i, cat, &nFiles , &nids , &nsubhalos, &ngroups);
if(nsubhalos > 0)
read_subfind_subhalo_hdf5(buf, i, cat, nsubhalos, subcount);
subcount += nsubhalos;
}
long_to_str(buf, cat->TotNids);
printf("finished the subhalo loading loop\n"); fflush(stdout);
int iii,jjj,j;
long int subfind_dm_ids=0;
for(i=0 ; i < cat->TotNsubhalos ; i++)
{
cat->Group[cat->SubhaloGrNr[i]].Subhalo[cat->Group[cat->SubhaloGrNr[i]].count].SubhaloLen = cat->SubhaloLen[i];
for(j=0 ; j < 6 ; j++)
cat->Group[cat->SubhaloGrNr[i]].Subhalo[cat->Group[cat->SubhaloGrNr[i]].count].SubhaloLenType[j] = cat->SubhaloLenType[j][i];
cat->Group[cat->SubhaloGrNr[i]].count = cat->Group[cat->SubhaloGrNr[i]].count + 1;
}
for(iii = 0; iii < cat->TotNgroups; iii++) // for each group
for(jjj = 0; jjj < cat->Group[iii].count ; jjj++) // and each subhalo within the group
subfind_dm_ids += cat->Group[iii].Subhalo[jjj].SubhaloLenType[1];
cat->TotNids = subfind_dm_ids;
cat->IdList = mymalloc( subfind_dm_ids * sizeof(MyIDType));
i = 0;
get_filename(buf, num, i, 0);
read_snap_header_attributes_in_hdf5(buf);
ndm = header.npartTotal[1]+ ((long long) header.npartTotalHighWord[1] << 32);
local_id_array = mymalloc(ndm * sizeof(MyIDType));
for(i = 0; i < nFiles; i++)
{
get_filename(buf, num, i, 0);
if(i == 0 || i == nFiles-1)
printf(" and : %s\n",buf);
read_snap_header_attributes_in_hdf5(buf);
read_particle_ids_in_hdf5(buf, 1, local_id_array, Nskip); // loads all dm particle ids
Nskip += header.npart[1];
}
int k;
MyIDType local_idcount=0, local_galaxycount=0;
i = j = k = 0;
printf("starting the assignment loop\n"); fflush(stdout);
MyIDType cumulative_subhalo_offset = 0, local_offset = 0;
for(i = 0; i < cat->TotNgroups; i++) // for each group
{
cat->GroupOffset[i] = cumulative_subhalo_offset;
local_offset = 0;
for(j = 0; j < cat->Group[i].count ; j++) // and each subhalo within the group
{
for(k = 0; k < cat->Group[i].Subhalo[j].SubhaloLenType[1] ; k++) // and each DM particle within the subhalo
{
cat->IdList[local_idcount] = local_id_array[cat->GroupOffsetType[1][i] + local_offset + k ]; // can't trust this group offset
local_idcount++;
#ifdef VERBOSE
#ifdef LONGIDS
if (i < 2 && j < 2 && k < 2)
{
printf("cat->GroupOffsetType[1][i] = %lu, local_offset = %d, k = %d, local_id_array[%lu] = %llu\n",
cat->GroupOffsetType[1][i], local_offset, k, cat->GroupOffsetType[1][i] + local_offset + k , local_id_array[cat->GroupOffsetType[1][i] + local_offset + k ]);
printf("Group %d, Subhalo %d, Particle %d, ID = %llu\n",i,j,k,local_id_array[cat->GroupOffsetType[1][i] + local_offset + k ]);
}
#else
if (i < 10 && j < 10 && k < 10)
printf("Group %d, Subhalo %d, Particle %d, ID = %d\n",i,j,k,local_id_array[cat->GroupOffsetType[1][i] + local_offset + k ]);
#endif
#endif
}
cat->SubOffset[local_galaxycount] = cumulative_subhalo_offset;
cat->SubLen[local_galaxycount] = cat->Group[i].Subhalo[j].SubhaloLenType[1];
cumulative_subhalo_offset += cat->Group[i].Subhalo[j].SubhaloLenType[1];
local_offset += cat->Group[i].Subhalo[j].SubhaloLenType[1];
local_galaxycount++;
}
#ifdef VERBOSE
if(i < 10)
{
printf("First ID of Group %d can be indexed as:\n",i);
printf(" local_id_array[cat->GroupOffsetType[1][%d]] = %llu where cat->GroupOffsetType[1][%d] = %llu\n",
i,local_id_array[cat->GroupOffsetType[1][i]],i,cat->GroupOffsetType[1][i]);
printf(" cat->IdList[cat->GroupOffset[%d]] = %llu where cat->GroupOffset[%d] = %llu\n\n",
i,cat->IdList[cat->GroupOffset[i]], i, cat->GroupOffset[i]);
}
#endif
}
printf("finishing the assignment loop\n"); fflush(stdout);
myfree(local_id_array);
}
void save_decendant_list(void)
{
int i, *data;
char buf[1000];
FILE *fd;
sprintf(buf, "%s/treedata", TreeOutputDir);
mkdir(buf, 0755);
sprintf(buf, "%s/treedata/sub_desc_sf%d_%03d", TreeOutputDir, SnapSkipFac, SnapshotNum);
if(!(fd = fopen(buf, "w")))
{
printf("can't open file `%s'\n", buf);
exit(1);
}
fwrite(&CatA.TotNsubhalos, sizeof(int), 1, fd);
data = mymalloc(sizeof(int) * CatA.TotNsubhalos);
for(i = 0; i < CatA.TotNsubhalos; i++)
data[i] = CatA.Descendant[i].HaloIndex[0];
fwrite(data, sizeof(int), CatA.TotNsubhalos, fd);
for(i = 0; i < CatA.TotNsubhalos; i++)
data[i] = CatA.Descendant[i].SnapNum[0];
fwrite(data, sizeof(int), CatA.TotNsubhalos, fd);
fclose(fd);
myfree(data);
}
#define SKIP {my_fread(&blksize1,sizeof(int),1,fd);}
#define SKIP2 {my_fread(&blksize2,sizeof(int),1,fd);}
void get_TotNumPart(void)
{
char buf[1000], bufA[100];
printf("Starting to read...\n");
get_filename(buf, LastSnapShotNr, 0, 0);
read_snap_header_attributes_in_hdf5(buf);
TotNumPart =
// header.npartTotal[0] + (((long long) header.npartTotalHighWord[0]) << (long long) 32) +
header.npartTotal[1] + (((long long) header.npartTotalHighWord[1]) << (long long) 32);
long_to_str(bufA, TotNumPart);
printf("TotNumPart=%s\n", bufA);
}
void get_id_translation_table(void)
{
FILE *fd;
char buf[1000], bufA[100], bufB[100];
int filenr, numfiles;
MyIDType i, minID, maxID, Nskip = 0;
printf("reading IDs from last snapshot\n");
fflush(stdout);
sprintf(buf, "%s/sorted_id_table_%03d.hdf5", TreeOutputDir, LastSnapShotNr);
if((fd = fopen(buf, "r")))
{
fclose(fd);
printf("ok, I'm reading '%s'\n", buf);
fflush(stdout);
read_num_part_table_hdf5(buf, &TotNumPart );
IdSnapTable = mymalloc(TotNumPart * sizeof(MyIDType));
read_id_translation_table_hdf5(buf, TotNumPart, IdSnapTable );
printf("TotNumPart = %llu \n",TotNumPart);
fflush(stdout);
printf("finished reading sorted id table!\n");
fflush(stdout);
}
else
{
numfiles = 1;
for(filenr = 0; filenr < numfiles; filenr++)
{
if(filenr == 0)
printf("Starting to read...\n");
get_filename(buf, LastSnapShotNr, filenr, 0);
printf(" %s\n",buf);
read_snap_header_attributes_in_hdf5(buf);
if(filenr == 0)
{
numfiles = header.num_files;
TotNumPart =
//header.npartTotal[0] + (((long long) header.npartTotalHighWord[0]) << (long long) 32) +
header.npartTotal[1] + (((long long) header.npartTotalHighWord[1]) << (long long) 32);
//header.npartTotal[4] + (((long long) header.npartTotalHighWord[4]) << (long long) 32);
long_to_str(bufA, TotNumPart);
#ifdef VERBOSE
printf("Allocating IdSnapTable...\n");
printf(" header.npartTotal[0] = %d\n",header.npartTotal[0]);
printf(" header.npartTotal[1] = %d\n",header.npartTotal[1]);
printf(" header.npartTotal[4] = %d\n",header.npartTotal[4]);
printf(" TotNumPart = %llu\n\n",TotNumPart);
#endif
IdSnapTable = mymalloc(TotNumPart * sizeof(MyIDType));
}
int parttype;
parttype = 1;
read_particle_ids_in_hdf5(buf, parttype , IdSnapTable , Nskip);
Nskip += header.npart[parttype];
#ifdef VERBOSE
if(filenr == 0)
{
printf("\n\n Check that ids are being loaded properly...\n");
printf(" First 10 DM particle ids in %s are:\n");
int id_check;
for(id_check=0 ; id_check < 10 ; id_check ++)
#ifdef LONGIDS
printf(" ID[%d] = %llu\n",id_check,IdSnapTable[id_check+Nskip - header.npart[0]]);
#else
printf(" ID[%d] = %d\n",id_check,IdSnapTable[id_check+Nskip - header.npart[0]]);
#endif // LONGIDS
}
#endif // VERBOSE
}
printf("TotNumPart=%s\n", bufA);
printf("IDs read.\n");
fflush(stdout);
for(i = 1, minID = maxID = IdSnapTable[0]; i < TotNumPart; i++)
{
if(minID > IdSnapTable[i])
minID = IdSnapTable[i];
if(maxID < IdSnapTable[i])
maxID = IdSnapTable[i];
}
long_to_str(bufA, minID);
long_to_str(bufB, maxID);
printf("min-ID=%s max-ID=%s\n", bufA, bufB);
printf("sorting IDs\n");
fflush(stdout);
qsort(IdSnapTable, Nskip, sizeof(MyIDType), sort_IDType);
printf("sorting done\n");
fflush(stdout);
printf("writing sorted id table...\n");
fflush(stdout);
write_id_translation_table_hdf5(IdSnapTable, TotNumPart);
}
}
void reassign_ids(MyIDType N, MyIDType * ids)
{
#ifdef IDS_HAVE_GAPS
long long i, j, offset, NN;
#if defined(_OPENMP)
int tid;
int nthreads;
#endif
struct twoids *TwoIDs;
printf("reassign IDs...\n");
fflush(stdout);
#if defined(_OPENMP)
#pragma omp parallel private(tid, nthreads, offset, NN, i, j, TwoIDs) shared(IdSnapTable)
#endif
{
#if defined(_OPENMP)
tid = omp_get_thread_num();
nthreads = omp_get_max_threads();
offset = tid * (N / nthreads);
NN = (N / nthreads);
if(nthreads > 1 && tid == (nthreads - 1))
{
NN = N - offset;
}
#else
NN = N;
offset = 0;
#endif
TwoIDs = mymalloc(NN * sizeof(struct twoids));
for(i = 0; i < NN; i++) // load all ids into the TwoID array
{
TwoIDs[i].id = ids[i + offset]; // the ids at each location
TwoIDs[i].ord = i; // the index at each location
}
qsort(TwoIDs, NN, sizeof(struct twoids), sort_twoids_id); // sort them by id! -> Min id first
/* now assign */
j = 0;
for(i = 0; i < NN; i++)
{
while(IdSnapTable[j] < TwoIDs[i].id && j < (TotNumPart - 1)) // this breaks when IdSnapTable[j] == TwoIDs[i].id
j++;
if(IdSnapTable[j] != TwoIDs[i].id) // if this occurs, should imply that
{ // - j reached TotNumPart without finding a match...
printf("ID mismatch found?\n"); // -> this means there is a particle in the subfind catalog not in the snapshot (IdSnapTable)
printf("IdSnapTable[%llu] = %llu TwoIDs[%llu].id = %llu TotNumPart = %llu \n",j,IdSnapTable[j], i,TwoIDs[i].id, TotNumPart);
exit(1);
}
else
TwoIDs[i].id = j; // THIS IS THE KEY POINT -- THE NEW ID IS THE INDEX IN IdSnapTable!!! min=0; max=N_dm
}
/* sort back */
qsort(TwoIDs, NN, sizeof(struct twoids), sort_twoids_ord); // Sort them by orig order -> old first entry is again first
for(i = 0; i < NN; i++)
ids[i + offset] = TwoIDs[i].id; // repackage them back into the origional array
myfree(TwoIDs);
}
printf("done\n");
fflush(stdout);
#else
signed long long i;
#if defined(_OPENMP)
#pragma omp parallel for
#endif
for(i=0; i< N; i++)
ids[i] -= 1;
#endif
}
void long_to_str(char *s, long long n)
{
if(n >= 1000000000)
sprintf(s, "%d%09d", (int) (n / 1000000000), (int) (n % 1000000000));
else
sprintf(s, "%d", (int) n);
}
|
omp_trap1.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
double y(double x); /* Интегрируемая функция */
void Trap(double a, double b, int n, double* global_result_p);
int main(int argc, char* argv[]) {
double global_result = 0.0; /* Хранить результат в global_result */
double a, b; /* Левый и правый концы отрезка */
int n; /* Количество трапеций */
int thread_count;
// Количество потоков:
thread_count = 10;
a = 0;
b = 5;
n = (b - a) / 0.00000005;
if (n % thread_count != 0) {
fprintf(stderr, "Количество трапеций должно ровно делиться на количество потоков\n");
exit(0);
}
// Директива parallel создает параллельный регион для следующего за ней
// структурированного блока, например:
// #pragma omp parallel [раздел[ [,] раздел]...]
// структурированный блок
// Эта директива сообщает компилятору, что структурированный блок кода должен
// быть выполнен параллельно, в нескольких потоках. Каждый поток будет
// выполнять один и тот же поток команд, но не один и тот же набор команд -
// все зависит от операторов, управляющих логикой программы,
// таких как if-else.
#pragma omp parallel num_threads(thread_count)
Trap(a, b, n, &global_result);
printf("С n = %d трапеций, приближенное значение\n", n);
printf("определенного интеграла от %f до %f = %.14e\n",
a, b, global_result);
return 0;
} /* main */
/*------------------------------------------------------------------
* Функция: y
* Назначение: Посчитать значение интегрируемой функции
* Входной аргумент: x
* Возвращаемое значение: y(x)
*/
double y(double x) {
double return_val;
return_val = exp(x) * x - cos(x);
return return_val;
} /* y */
/*------------------------------------------------------------------
* Функция: Trap
* Назначение: Метод трапеций
* Входные аргументы:
* a: левый конец отрезка
* b: правый конец отрезка
* n: количество трапеций
* Выходные аргументы:
* integral: приближенное значение определенного интеграла от a до b для y(x)
*/
void Trap(double a, double b, int n, double* global_result_p) {
double h, x, my_result;
double local_a, local_b;
int i, local_n;
// Номер потока в группе потоков:
// Число в диапазоне от 0 до omp_get_num_threads - 1.
int my_rank = omp_get_thread_num();
// Число потоков, входящих в текущую группу потоков:
// Если вызывающий поток выполняется не в параллельном регионе,
// эта функция возвращает 1.
int thread_count = omp_get_num_threads();
h = (b-a)/n;
local_n = n/thread_count;
local_a = a + my_rank*local_n*h;
local_b = local_a + local_n*h;
my_result = (y(local_a) + y(local_b))/2.0;
for (i = 1; i <= local_n-1; i++) {
x = local_a + i*h;
my_result += y(x);
}
my_result = my_result*h;
// Использование критической секции в качестве барьера:
// В OpenMP для входа в критическую секцию применяется директива
// #pragma omp critical [имя].
// Доступ к блоку кода является взаимоисключающим только для других
// критических секций с тем же именем (это справедливо для всего процесса).
// Если имя не указано, директива ставится в соответствие некоему имени,
// выбираемому системой. Доступ ко всем неименованным критическим секциям
// является взаимоисключающим.
#pragma omp critical
*global_result_p += my_result;
} /* Trap */
|
test_helper.h | //
// Created by liql2007 on 2020/12/23.
//
#ifndef EFANNA2E_TEST_HELPER_H
#define EFANNA2E_TEST_HELPER_H
#include <iostream>
#include <fstream>
#include <cassert>
#include <cstring>
#include <sys/stat.h>
#include <efanna2e/util.h>
#include <efanna2e/distance.h>
#include <efanna2e/neighbor.h>
template<typename T>
void print_vector(const T* vec, unsigned size) {
for (unsigned i = 0; i < size; ++i) {
std::cout << vec[i] << " ";
}
std::cout << std::endl;
}
template<typename T>
void load_data(const char* filename, T*& data, unsigned& num,
unsigned& dim) {
std::ifstream in(filename, std::ios::binary);
if (!in.is_open()) {
std::cout << "open file error" << std::endl;
exit(-1);
}
in.read((char*)&dim, 4);
// std::cout<<"data dimension: "<<dim<<std::endl;
in.seekg(0, std::ios::end);
std::ios::pos_type ss = in.tellg();
size_t fsize = (size_t)ss;
num = (unsigned)(fsize / (dim + 1) / 4);
data = new T[(size_t)num * (size_t)dim];
in.seekg(0, std::ios::beg);
for (size_t i = 0; i < num; i++) {
in.seekg(4, std::ios::cur);
in.read((char*)(data + i * dim), dim * sizeof(T));
}
in.close();
}
template<typename T>
void save_data(const char* filename, std::vector<std::vector<T>>& results) {
std::ofstream out(filename, std::ios::binary | std::ios::out);
for (unsigned i = 0; i < results.size(); i++) {
unsigned sz = (unsigned)results[i].size();
out.write((char*)&sz, sizeof(unsigned));
out.write((char*)results[i].data(), (size_t)sz * sizeof(T));
}
if (out.bad()) {
out.close();
std::cerr << "write to file [" << filename << "] failed" << std::endl;
exit(-1);
}
out.close();
}
template<typename T>
void save_data(const char* filename, const T* data, unsigned num, unsigned dim) {
std::ofstream out(filename, std::ios::binary | std::ios::out);
for (size_t i = 0; i < num; i++) {
out.write((char*)&dim, sizeof(unsigned));
out.write((char*)(data + i * dim), (size_t)dim * sizeof(T));
}
if (out.bad()) {
out.close();
std::cerr << "write to file [" << filename << "] failed" << std::endl;
exit(-1);
}
out.close();
}
inline void checkAndCreateDir(const char* dirPath) {
struct stat sb;
if (stat(dirPath, &sb) == 0) {
if (!S_ISDIR(sb.st_mode)) {
std::cerr << dirPath << " is not dictionary" << std::endl;
exit(-1);
}
} else if (mkdir(dirPath, 0755) != 0) {
std::cerr << "create dictionary [" << dirPath << "] failed" << std::endl;
exit(-1);
}
}
struct GroundTruth {
unsigned truthItemNum;
unsigned queryNum;
unsigned* data;
unsigned TOPK;
GroundTruth(unsigned TOPK) : TOPK(TOPK) {}
void load(const char* filename) {
load_data(filename, data, queryNum, truthItemNum);
std::cout << "ground truth query num: " << queryNum << std::endl;
std::cout << "ground truth item num per query: " << truthItemNum << std::endl;
}
void recallRate(const std::vector<std::vector<unsigned>>& res) {
assert(TOPK <= truthItemNum);
assert(res.size() <= queryNum);
float avgRecallVal = 0;
for (size_t qi = 0; qi < res.size(); ++qi) {
auto truth = data + qi * truthItemNum;
unsigned recallNum = 0;
for (auto docId : res[qi]) {
for (unsigned j = 0; j < TOPK; ++j) {
if (truth[j] == docId) {
++recallNum;
break;
}
}
}
auto recallRateVal = (float) recallNum / TOPK;
// recallRate.push_back(recallRateVal);
avgRecallVal += recallRateVal;
}
auto recall = avgRecallVal / res.size();
std::cout << "recall(top" << TOPK << ") : " << recall << std::endl;
}
static void createPartGroundTruth(const char* queryPath, const char* groundTruthPath,
const float* vecData, unsigned pointNum, unsigned dim,
unsigned queryNum, unsigned topK) {
efanna2e::DistanceL2 distance;
std::mt19937 rng(time(nullptr));
std::vector<unsigned> queryIds(queryNum);
efanna2e::GenRandom(rng, queryIds.data(), queryNum, pointNum);
std::vector<std::vector<unsigned>> topNeighbors(queryNum);
std::vector<float> qVecs((size_t)queryNum * dim);
#pragma omp parallel for
for (size_t i = 0; i < queryNum; ++i) {
auto qId = queryIds[i];
efanna2e::Neighbor nn(qId, 0, true);
std::vector<efanna2e::Neighbor> neighborPool;
neighborPool.reserve(topK + 1);
neighborPool.resize(topK);
neighborPool[0] = std::move(nn);
unsigned poolSize = 1;
auto q = vecData + (size_t)qId * dim;
std::memcpy(qVecs.data() + i * dim, q, dim * sizeof(float));
for (size_t vId = 0; vId < pointNum; ++vId) {
if (vId == qId) {
continue;
}
auto v = vecData + vId * dim;
float dist = distance.compare(v, q, dim);
efanna2e::Neighbor nn(vId, dist, true);
efanna2e::InsertIntoPool(neighborPool.data(), poolSize, nn);
if (poolSize < topK) {
++poolSize;
}
}
assert(poolSize == topK);
std::sort(neighborPool.begin(), neighborPool.end(),
[](const efanna2e::Neighbor& l, const efanna2e::Neighbor& r) {
return l.distance < r.distance; });
auto& queryTopNeighbor = topNeighbors[i];
queryTopNeighbor.reserve(topK);
for (const auto& nn : neighborPool) {
queryTopNeighbor.push_back(nn.id);
}
}
save_data(groundTruthPath, topNeighbors);
save_data(queryPath, qVecs.data(), queryNum, dim);
}
};
struct PartInfo {
size_t vecNum;
std::string docPath;
std::string idPath;
std::string nsgPath;
std::string knnPath;
std::string queryPath;
std::string groundTruthPath;
};
struct Partitions {
std::vector<PartInfo> partInfos;
size_t totalVecNum = 0;
std::string dirPath;
size_t dim;
std::string getMetaPath() { return dirPath + "meta.txt"; }
std::string getMergedNsgPath() { return dirPath + "merged.nsg"; }
std::string getMergedVecPath() { return dirPath + "merged.fvecs"; }
void serialize() {
auto metaPath = getMetaPath();
std::cout << "serialize partition meta to " << metaPath << std::endl;
std::ofstream out(metaPath.c_str());
out << "partition num: " << partInfos.size() << std::endl;
out << "dimension: " << dim << std::endl;
out << "total doc num: " << totalVecNum << std::endl;
for (unsigned i = 0; i < partInfos.size(); ++i) {
out << "partition_" << i + 1 << " doc num: " <<
partInfos[i].vecNum << std::endl;
}
out.close();
}
void deserialize(const char* dirPath) {
struct stat sb;
if (stat(dirPath, &sb) != 0 || !S_ISDIR(sb.st_mode)) {
std::cerr << dirPath << " is not dictionary" << std::endl;
exit(-1);
}
std::string metaPath;
if (dirPath[std::strlen(dirPath) - 1] != '/') {
metaPath = dirPath + std::string("/meta.txt");
} else {
metaPath = dirPath + std::string("meta.txt");
}
std::ifstream in(metaPath.c_str());
if (!in.is_open()) {
std::cout << "open file " << metaPath << " failed" << std::endl;
exit(-1);
}
std::string desc;
std::getline(in, desc, ':');
unsigned partNum;
in >> partNum;
std::cout << "partition num: " << partNum << std::endl;
init(dirPath, partNum);
std::getline(in, desc, ':');
in >> dim;
std::cout << "dim: " << dim << std::endl;
std::getline(in, desc, ':');
in >> totalVecNum;
std::cout << "vector num: " << totalVecNum << std::endl;
for (auto& part : partInfos) {
std::getline(in, desc, ':');
in >> part.vecNum;
std::cout << "partition vector num: " << part.vecNum << std::endl;
}
}
void init(const char* dirName, unsigned partNum) {
dirPath = dirName;
if (dirPath[dirPath.length() - 1] != '/') {
dirPath.append("/");
}
partInfos.clear();
partInfos.reserve(partNum);
for (unsigned i = 0; i < partNum; ++i) {
auto docPath = dirPath + "docs_" + std::to_string(i + 1) + ".fvecs";
auto idPath = dirPath + "ids_" + std::to_string(i + 1) + ".data";
auto nsgPath = dirPath + "nng_" + std::to_string(i + 1) + ".nsg";
auto knnPath = dirPath + "nng_" + std::to_string(i + 1) + ".knn";
auto queryPath = dirPath + "query_" + std::to_string(i + 1) + ".fvecs";
auto groundTruthPath = dirPath + "groundtruth_" + std::to_string(i + 1) + ".ivecs";
PartInfo part{0, docPath, idPath, nsgPath, knnPath, queryPath, groundTruthPath};
partInfos.emplace_back(std::move(part));
}
}
static Partitions create(const char* dirPath, unsigned partNum) {
checkAndCreateDir(dirPath);
Partitions ret;
ret.init(dirPath, partNum);
return ret;
}
};
#endif //EFANNA2E_TEST_HELPER_H
|
task-dependency.c | /*
* task-dependency.c -- Archer testcase
*/
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
//
// See tools/archer/LICENSE.txt for details.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// RUN: %libarcher-compile-and-run-race | FileCheck %s
// RUN: %libarcher-compile-and-run-race-noserial | FileCheck %s
// REQUIRES: tsan
#include "ompt/ompt-signal.h"
#include <omp.h>
#include <stdio.h>
#include <unistd.h>
int main(int argc, char *argv[]) {
int var = 0, a = 0;
#pragma omp parallel num_threads(8) shared(var, a)
#pragma omp master
{
#pragma omp task shared(var, a) depend(out : var)
{
OMPT_SIGNAL(a);
var++;
}
#pragma omp task shared(a) depend(in : var)
{
OMPT_SIGNAL(a);
OMPT_WAIT(a, 3);
}
#pragma omp task shared(var) // depend(in: var) is missing here!
{
var++;
OMPT_SIGNAL(a);
}
// Give other thread time to steal the task.
OMPT_WAIT(a, 2);
}
int error = (var != 2);
fprintf(stderr, "DONE\n");
return error;
}
// CHECK: WARNING: ThreadSanitizer: data race
// CHECK-NEXT: {{(Write|Read)}} of size 4
// CHECK-NEXT: #0 {{.*}}task-dependency.c:41
// CHECK: Previous write of size 4
// CHECK-NEXT: #0 {{.*}}task-dependency.c:30
// CHECK: DONE
// CHECK: ThreadSanitizer: reported 1 warnings
|
time_omp_task_spawn.c | #ifdef HAVE_CONFIG_H
# include "config.h" /* for _GNU_SOURCE */
#endif
#include <assert.h>
#include <stdio.h>
#include <omp.h>
#include <qthread/qthread.h>
#include <qthread/qtimer.h>
#include "argparsing.h"
static aligned_t null_task(void *args_)
{
return 0;
}
int main(int argc,
char *argv[])
{
uint64_t count = 1048576;
int par_fork = 0;
unsigned long threads = 1;
qtimer_t timer;
double total_time = 0.0;
CHECK_VERBOSE();
NUMARG(count, "MT_COUNT");
NUMARG(par_fork, "MT_PAR_FORK");
assert(0 != count);
#pragma omp parallel
#pragma omp single
{
timer = qtimer_create();
threads = omp_get_num_threads();
if (par_fork) {
qtimer_start(timer);
#pragma omp parallel for
for (uint64_t i = 0; i < count; i++) {
#pragma omp task untied
null_task(NULL);
}
} else {
qtimer_start(timer);
#pragma omp task untied
for (uint64_t i = 0; i < count; i++) {
#pragma omp task untied
null_task(NULL);
}
}
#pragma omp taskwait
qtimer_stop(timer);
}
total_time = qtimer_secs(timer);
qtimer_destroy(timer);
printf("%lu %lu %f\n",
threads,
(unsigned long)count,
total_time);
return 0;
}
/* vim:set expandtab */
|
DRB109-orderedmissing-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
/* This is a program based on a test contributed by Yizi Gu@Rice Univ.
* Missing the ordered clause
* Data race pair: x@56:5 vs. x@56:5
* */
int main()
{
int x =0;
#pragma omp parallel for ordered
for (int i = 0; i < 100; ++i) {
x++;
}
printf ("x=%d\n",x);
return 0;
}
|
GB_binop__bxor_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bxor_uint32
// A.*B function (eWiseMult): GB_AemultB__bxor_uint32
// A*D function (colscale): GB_AxD__bxor_uint32
// D*A function (rowscale): GB_DxB__bxor_uint32
// C+=B function (dense accum): GB_Cdense_accumB__bxor_uint32
// C+=b function (dense accum): GB_Cdense_accumb__bxor_uint32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bxor_uint32
// C=scalar+B GB_bind1st__bxor_uint32
// C=scalar+B' GB_bind1st_tran__bxor_uint32
// C=A+scalar GB_bind2nd__bxor_uint32
// C=A'+scalar GB_bind2nd_tran__bxor_uint32
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = (aij) ^ (bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x) ^ (y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BXOR || GxB_NO_UINT32 || GxB_NO_BXOR_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bxor_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bxor_uint32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bxor_uint32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__bxor_uint32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__bxor_uint32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__bxor_uint32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bxor_uint32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bxor_uint32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t bij = Bx [p] ;
Cx [p] = (x) ^ (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bxor_uint32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
Cx [p] = (aij) ^ (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (x) ^ (aij) ; \
}
GrB_Info GB_bind1st_tran__bxor_uint32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (aij) ^ (y) ; \
}
GrB_Info GB_bind2nd_tran__bxor_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_ex_32.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
/*
MIT License
Copyright (c) 2019 NOUREDDINE DAGHBOUDJ
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#define N 1024
void initArray(unsigned int *array, unsigned int size)
{
for(unsigned int i=0; i<size; i++) array[i] = rand() % 40 + 10;
}
void printArray(unsigned int *array, unsigned int size)
{
for(unsigned int i=0; i<size; i++) printf("%i ", array[i]);
printf("\n");
}
unsigned int sumArray(unsigned int *A, unsigned int size)
{
unsigned int gsum = 0;
unsigned int stride;
#pragma omp parallel
{
#pragma omp single
stride = omp_get_num_threads();
unsigned int id = omp_get_thread_num();
unsigned int sum = 0;
for(unsigned int i=id; i<size; i+=stride)
{
sum += A[i];
}
#pragma omp critical
gsum += sum;
}
return gsum;
}
int main()
{
unsigned int a[N], b[N], c[N];
srand(0);
initArray(a, N);
printf("sum(A) = %i\n", sumArray(a, N));
return 0;
}
|
quick_sort.h | // MIT License
//
// Copyright (c) 2020 xiw
// \author wang xi
// Parallel quick sort using OpenMP.
#ifndef QUICK_SORT_H_
#define QUICK_SORT_H_
#include <vector>
#include <memory>
#include <omp.h>
using std::vector;
namespace para {
// when array size is smaller than MIN_SIZE, no more tasks will be created
const int MIN_SIZE = 100000000;
template<typename T>
void Partition(T* arr, size_t size, size_t* partition_pos) {
if (size <= 0) {
return;
}
// maybe better to randomly choose a pivot
T pivot_value = arr[0];
size_t left = 0, right = size;
while (left < right) {
--right;
while (left < right && arr[right] >= pivot_value) {
--right;
}
if (left >= right) {
break;
}
// swap
arr[left] = arr[right];
++left;
while (left < right && arr[left] <= pivot_value) {
++left;
}
if (left >= right) {
break;
}
// swap
arr[right] = arr[left];
}
arr[left] = pivot_value;
*partition_pos = left;
}
template<typename T>
void ParallelSort(T* arr, size_t size) {
if (size <= 1) {
return;
}
size_t partition_pos = 0;
Partition(arr, size, &partition_pos);
if (size > MIN_SIZE) {
#pragma omp taskgroup
{
#pragma omp task mergeable untied
if (partition_pos > 1){
ParallelSort(arr, partition_pos);
}
#pragma omp task mergeable untied
if (partition_pos + 2 < size) {
ParallelSort(arr + partition_pos + 1, size - partition_pos - 1);
}
}
} else {
#pragma omp task mergeable untied
{
if (partition_pos > 1){
ParallelSort(arr, partition_pos);
}
if (partition_pos + 2 < size) {
ParallelSort(arr + partition_pos + 1, size - partition_pos - 1);
}
}
}
}
// \brief Sort the data using parallel version of quicksort.
// Parallel is realized through OpenMP. For parallelism, we
// use omp task clause.
//
// For the 1st version, we only support data type that can use
// < to compare. And the sort is in place.
//
// \param arr the arr to be sorted
// \return void
template<typename T>
void ParallelQuickSort(T* arr, size_t size) {
#pragma omp parallel
#pragma omp single
{
ParallelSort(arr, size);
}
}
} // namespace para
#endif
|
8214.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "3mm.h"
/* Array initialization. */
static
void init_array(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nk; j++)
A[i][j] = ((DATA_TYPE) i*j) / ni;
for (i = 0; i < nk; i++)
for (j = 0; j < nj; j++)
B[i][j] = ((DATA_TYPE) i*(j+1)) / nj;
for (i = 0; i < nj; i++)
for (j = 0; j < nm; j++)
C[i][j] = ((DATA_TYPE) i*(j+3)) / nl;
for (i = 0; i < nm; i++)
for (j = 0; j < nl; j++)
D[i][j] = ((DATA_TYPE) i*(j+2)) / nk;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nl,
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nl; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]);
if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_3mm(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl),
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j, k;
#pragma scop
#pragma omp parallel private (i, j, k) num_threads(#P11)
{
/* E := A*B */
#pragma omp target teams distribute thread_limit(64)
for (i = 0; i < _PB_NI; i++)
{
#pragma omp target teams distribute thread_limit(64)
for (j = 0; j < _PB_NJ; j++)
{
E[i][j] = 0;
for (k = 0; k < _PB_NK; ++k)
E[i][j] += A[i][k] * B[k][j];
}
}
/* F := C*D */
#pragma omp target teams distribute thread_limit(64)
for (i = 0; i < _PB_NJ; i++)
{
#pragma omp target teams distribute thread_limit(64)
for (j = 0; j < _PB_NL; j++)
{
F[i][j] = 0;
for (k = 0; k < _PB_NM; ++k)
F[i][j] += C[i][k] * D[k][j];
}
}
/* G := E*F */
#pragma omp target teams distribute thread_limit(64)
for (i = 0; i < _PB_NI; i++)
{
#pragma omp target teams distribute thread_limit(64)
for (j = 0; j < _PB_NL; j++)
{
G[i][j] = 0;
for (k = 0; k < _PB_NJ; ++k)
G[i][j] += E[i][k] * F[k][j];
}
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
int nk = NK;
int nl = NL;
int nm = NM;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj);
POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl);
POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm);
POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl);
POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl);
/* Initialize array(s). */
init_array (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_3mm (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(E),
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(F),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D),
POLYBENCH_ARRAY(G));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(E);
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
POLYBENCH_FREE_ARRAY(F);
POLYBENCH_FREE_ARRAY(C);
POLYBENCH_FREE_ARRAY(D);
POLYBENCH_FREE_ARRAY(G);
return 0;
}
|
pr79428-2.c | /* PR c/79428 */
/* { dg-options "-fopenmp" } */
void
foo ()
{
#pragma omp sections
#pragma omp section /* { dg-error "'#pragma omp section' may only be used in '#pragma omp sections' construct|not allowed|expected" } */
|
SparseLinear.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/SparseLinear.c"
#else
#ifdef _OPENMP
#include <omp.h>
#endif
#define ROW_PTR2(t, r) (THTensor_(data)(t) + (r) * (t)->stride[0])
#define COL_PTR2(t, c) (THTensor_(data)(t) + (c) * (t)->stride[1])
static bool THNN_(checkLegacyInput)(THTensor* t)
{
return t->nDimension == 3 && t->size[2] == 2;
}
static bool THNN_(checkInput)(THTensor* t)
{
return t->nDimension == 2 && t->size[1] == 3;
}
static bool THNN_(checkSize2D)(THTensor* t, long size0, long size1)
{
return t->nDimension == 2 && t->size[0] == size0 && t->size[1] == size1;
}
static bool THNN_(checkSize1D)(THTensor* t, long size0)
{
return t->nDimension == 1 && t->size[0] == size0;
}
static void THNN_(set1d)(THTensor *t, long x0, real value) {
THStorage_(set)(t->storage, t->storageOffset + x0*t->stride[0], value);
}
static real THNN_(get3d)(const THTensor *t, long x0, long x1, long x2) {
return THStorage_(get)(t->storage, t->storageOffset +
x0*t->stride[0] + x1*t->stride[1] + x2*t->stride[2]);
}
static real THNN_(get2d)(const THTensor *t, long x0, long x1) {
return THStorage_(get)(t->storage, t->storageOffset +
x0*t->stride[0] + x1*t->stride[1]);
}
void THNN_(SparseLinear_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
THTensor *weight,
THTensor *bias)
{
long h, i, j, hp0, hp1;
long outDim = THTensor_(size)(weight, 0);
long inDim = THTensor_(size)(weight, 1);
long batchSize = THTensor_(size)(output, 0);
THArgCheck(THNN_(checkInput)(input), 2, "input must be in coo format, nnz x 3");
THArgCheck(THTensor_(isContiguous)(output), 3, "output must be contiguous");
THArgCheck(THNN_(checkSize1D)(bias, outDim), 5, "bias size wrong");
long nnz = THTensor_(size)(input, 0);
THLongTensor * csr = THLongTensor_newWithSize1d(batchSize+1);
THLongTensor_zero(csr);
weight = THTensor_(newContiguous)(weight);
//#pragma omp parallel for private(i, h, hp0, hp1) schedule(static) if (nnz > 10000)
for (i=0; i<nnz; i++) {
hp0 = (long)(THNN_(get2d)(input, i, 0)) - 1;
hp1 = (i+1 == nnz) ?
batchSize :
(long)(THNN_(get2d)(input, i+1, 0)) - 1;
if (hp0 != hp1) for (h = hp0; h < hp1; h++) {
THLongTensor_set1d(csr, h+1, i+1);
}
}
// output = weight * input + bias
THTensor_(zero)(output);
#pragma omp parallel for private(h, i) schedule(static) if (nnz > 10000)
for (h = 0; h < batchSize; h++) {
long i_start = THLongTensor_get1d(csr, h);
long i_end = THLongTensor_get1d(csr, h+1);
for (i = i_start; i < i_end; i++) {
real val = THNN_(get2d)(input, i, 2);
if (val == 0) {
continue;
}
long offset = (long)(THNN_(get2d)(input, i, 1)) - 1;
if (offset >= 0 && offset < inDim) {
THBlas_(axpy)(outDim,
val,
COL_PTR2(weight, offset), weight->stride[0],
ROW_PTR2(output, h), output->stride[1]);
} else {
THError("index out of bound. updateOutput: %d not between 1 and %d",
offset + 1, inDim);
}
}
}
THTensor* output_row = THTensor_(new)();
for (h = 0; h < batchSize; h++) {
THTensor_(select)(output_row, output, 0, h);
THTensor_(cadd)(output_row, bias, 1.0, output_row);
}
THTensor_(free)(output_row);
THLongTensor_free(csr);
THTensor_(free)(weight);
}
void THNN_(SparseLinear_legacyUpdateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
THTensor *weight,
THTensor *bias)
{
long h, i;
long outDim = THTensor_(size)(weight, 0);
long inDim = THTensor_(size)(weight, 1);
THArgCheck(THNN_(checkLegacyInput)(input), 2, "input size must be batchsize x nnz x 2");
THArgCheck(THTensor_(isContiguous)(output), 3, "output must be contiguous");
THArgCheck(THNN_(checkSize1D)(bias, outDim), 5, "bias size wrong");
weight = THTensor_(newContiguous)(weight);
long batchSize = THTensor_(size)(input, 0);
long nnz = THTensor_(size)(input, 1);
THTensor_(resize2d)(output, batchSize, outDim);
// output = weight * input + bias
THTensor_(zero)(output);
#pragma omp parallel for private(h, i) schedule(static) if ( \
batchSize > 1 && batchSize * nnz * outDim > 10000)
for (h = 0; h < batchSize; h++) {
for (i = 0; i < nnz; i++) {
real val = THNN_(get3d)(input, h, i, 1);
if (val == 0) {
continue;
}
long offset = (long)(THNN_(get3d)(input, h, i, 0)) - 1;
if (offset >= 0 && offset < inDim) {
THBlas_(axpy)(outDim,
val,
COL_PTR2(weight, offset), weight->stride[0],
ROW_PTR2(output, h), output->stride[1]);
} else {
THError("index out of bound. updateOutput: %d not between 1 and %d",
offset + 1, inDim);
}
}
}
THTensor* output_row = THTensor_(new)();
for (h = 0; h < batchSize; h++) {
THTensor_(select)(output_row, output, 0, h);
THTensor_(cadd)(output_row, bias, 1.0, output_row);
}
THTensor_(free)(output_row);
THTensor_(free)(weight);
}
void THNN_(SparseLinear_accGradParameters)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradWeight,
THTensor *gradBias,
THTensor *weight,
THTensor *bias,
accreal weightDecay_,
accreal scale_)
{
real weightDecay = TH_CONVERT_ACCREAL_TO_REAL(weightDecay_);
real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
long h, i, col, hp0, hp1;
long outDim = THTensor_(size)(weight, 0);
long inDim = THTensor_(size)(weight, 1);
THArgCheck(THNN_(checkInput)(input), 2,
"input must be in coo format, nnz x 3");
THArgCheck(THNN_(checkSize2D)(gradWeight, outDim, inDim), 4,
"gradWeight size wrong");
THArgCheck(THNN_(checkSize1D)(gradBias, outDim), 5,
"gradBias size wrong");
THArgCheck(THTensor_(isContiguous)(gradOutput), 1,
"gradOutput must be contiguous");
long nnz = THTensor_(size)(input, 0);
THLongTensor* csc = THLongTensor_newWithSize1d(inDim+1);
THLongTensor_zero(csc);
weight = THTensor_(newContiguous)(weight);
#pragma omp parallel for private(i, h, hp0, hp1) schedule(static) if (nnz > 10000)
for (i = 0; i < nnz; i++) {
hp0 = (long)(THNN_(get2d)(input, i, 1)) - 1;
hp1 = (i+1 == nnz) ?
inDim :
(long)(THNN_(get2d)(input, i+1, 1)) - 1;
if (hp0 != hp1) for (h = hp0; h < hp1; h++) {
THLongTensor_set1d(csc, h+1, i+1);
}
}
// gradWeight += gradOutput * input
#pragma omp parallel for private(h, i, col) schedule(static) if (nnz > 10000)
for (col = 0; col < inDim; col++) {
long i_start = THLongTensor_get1d(csc, col);
long i_end = THLongTensor_get1d(csc, col+1);
for (i = i_start; i < i_end; i++) {
real val = scale * THNN_(get2d)(input, i, 2);
h = (long)(THNN_(get2d)(input, i, 0)) - 1;
long offset = (long)(THNN_(get2d)(input, i, 1)) - 1;
if (offset >= 0 && offset < inDim) {
THBlas_(axpy)(outDim,
val,
ROW_PTR2(gradOutput, h), gradOutput->stride[1],
COL_PTR2(gradWeight, offset), gradWeight->stride[0]);
} else {
THError(
"index out of bound. accGradParameters: %d not between 1 and %d",
offset + 1,
inDim);
}
}
}
// gradBias += gradOutput
THTensor* buf = THTensor_(new)();
THTensor_(sum)(buf, gradOutput, 0, 1);
THTensor_(cadd)(gradBias, gradBias, scale, buf);
THTensor_(free)(buf);
THLongTensor_free(csc);
if (weightDecay != 0) {
THTensor_(cadd)(gradWeight, gradWeight, weightDecay, weight);
}
THTensor_(free)(weight);
}
void THNN_(SparseLinear_legacyAccGradParameters)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradWeight,
THTensor *gradBias,
THTensor *weight,
THTensor *bias,
accreal weightDecay_,
accreal scale_)
{
real weightDecay = TH_CONVERT_ACCREAL_TO_REAL(weightDecay_);
real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
long h, i;
long outDim = THTensor_(size)(weight, 0);
long inDim = THTensor_(size)(weight, 1);
THArgCheck(THNN_(checkLegacyInput)(input), 2,
"input size must be batchsize x nnz x 2");
THArgCheck(THNN_(checkSize2D)(gradWeight, outDim, inDim), 4,
"gradWeight size wrong");
THArgCheck(THNN_(checkSize1D)(gradBias, outDim), 5,
"gradBias size wrong");
THArgCheck(THTensor_(isContiguous)(gradOutput), 1,
"gradOutput must be contiguous");
long batchSize = THTensor_(size)(input, 0);
long nnz = THTensor_(size)(input, 1);
THTensor_(resize2d)(gradOutput, batchSize, outDim);
// gradWeight += gradOutput * input
#pragma omp parallel for private(h, i) schedule(static) if (\
batchSize * nnz * outDim > 10000)
for (i = 0; i < nnz; i++) {
for (h = 0; h < batchSize; h++) {
real val = scale * THNN_(get3d)(input, h, i, 1);
if (val == 0) {
continue;
}
long offset = (long)(THNN_(get3d)(input, h, i, 0)) - 1;
if (offset >= 0 && offset < inDim) {
THBlas_(axpy)(outDim,
val,
ROW_PTR2(gradOutput, h), gradOutput->stride[1],
COL_PTR2(gradWeight, offset), gradWeight->stride[0]);
} else {
THError(
"index out of bound. accGradParameters: %d not between 1 and %d",
offset + 1,
inDim);
}
}
}
// gradBias += gradOutput
THTensor* gradOutput_row = THTensor_(new)();
for (h = 0; h < batchSize; h++) {
THTensor_(select)(gradOutput_row, gradOutput, 0, h);
THTensor_(cadd)(gradBias, gradBias, scale, gradOutput_row);
}
THTensor_(free)(gradOutput_row);
if (weightDecay != 0) {
THTensor_(cadd)(gradWeight, gradWeight, weightDecay, weight);
}
}
void THNN_(SparseLinear_updateParameters)(
THNNState *state,
THTensor *weight,
THTensor *bias,
THTensor *gradWeight,
THTensor *gradBias,
THTensor *lastInput,
accreal learningRate_)
{
real learningRate = TH_CONVERT_ACCREAL_TO_REAL(learningRate_);
long h, i;
long outDim = weight->size[0];
long inDim = weight->size[1];
THArgCheck(THNN_(checkSize2D)(gradWeight, outDim, inDim), 4,
"gradWeight size wrong");
THArgCheck(THNN_(checkSize1D)(bias, outDim), 3, "bias size wrong");
THArgCheck(THNN_(checkSize1D)(gradBias, outDim), 5, "gradBias size wrong");
THArgCheck(THNN_(checkInput)(lastInput), 6,
"input must be in coo format, nnz x 3");
long nnz = THTensor_(size)(lastInput, 0);
// collect unique offsets of non-0 val in input
THTensor* offsets = THTensor_(newWithSize1d)(nnz);
long cnt = 0;
for (i = 0; i < nnz; i++) {
real val = THNN_(get2d)(lastInput, i, 2);
if (val == 0) {
continue;
}
long offset = (long)(THNN_(get2d)(lastInput, i, 1)) - 1;
if (offset >= 0 && offset < inDim) {
THNN_(set1d)(offsets, cnt++, offset);
} else {
THError(
"index out of bound. updateParameters: %d not between 1 and %d",
offset + 1,
inDim);
}
}
if (cnt == 0) return;
THTensor_(resize1d)(offsets, cnt);
THTensor* uniqueOffsets = THTensor_(new)();
THLongTensor* ri = THLongTensor_new();
THTensor_(sort)(uniqueOffsets, ri, offsets, 0, 0);
THLongTensor_free(ri);
THTensor_(free)(offsets);
cnt = 1;
real* uniqueOffsets_p = THTensor_(data)(uniqueOffsets);
for (i = 1; i < THTensor_(size)(uniqueOffsets, 0); i++) {
if (uniqueOffsets_p[i] != uniqueOffsets_p[i - 1]) {
uniqueOffsets_p[cnt++] = uniqueOffsets_p[i];
}
}
THTensor_(resize1d)(uniqueOffsets, cnt);
// weight += -learningRate * gradWeight
THTensor_(cadd)(bias, bias, -learningRate, gradBias);
#pragma omp parallel for private(i) schedule(static) if (cnt * outDim > 10000)
for (i = 0; i < cnt; i++) {
long offset = (long)uniqueOffsets_p[i];
THBlas_(axpy)(outDim,
-learningRate,
COL_PTR2(gradWeight, offset), gradWeight->stride[0],
COL_PTR2(weight, offset), weight->stride[0]);
}
THTensor_(free)(uniqueOffsets);
}
void THNN_(SparseLinear_legacyUpdateParameters)(
THNNState *state,
THTensor *weight,
THTensor *bias,
THTensor *gradWeight,
THTensor *gradBias,
THTensor *lastInput,
accreal learningRate_)
{
real learningRate = TH_CONVERT_ACCREAL_TO_REAL(learningRate_);
long h, i;
long outDim = weight->size[0];
long inDim = weight->size[1];
THArgCheck(THNN_(checkSize2D)(gradWeight, outDim, inDim), 4,
"gradWeight size wrong");
THArgCheck(THNN_(checkSize1D)(bias, outDim), 3, "bias size wrong");
THArgCheck(THNN_(checkSize1D)(gradBias, outDim), 5, "gradBias size wrong");
THArgCheck(THNN_(checkLegacyInput)(lastInput), 6,
"input size must be batchsize x nnz x 2");
long batchSize = THTensor_(size)(lastInput, 0);
long nnz = THTensor_(size)(lastInput, 1);
// collect unique offsets of non-0 val in input
THTensor* offsets = THTensor_(newWithSize1d)(batchSize * nnz);
long cnt = 0;
for (h = 0; h < batchSize; h++) {
for (i = 0; i < nnz; i++) {
real val = THNN_(get3d)(lastInput, h, i, 1);
if (val == 0 ) {
continue;
}
long offset = (long)(THNN_(get3d)(lastInput, h, i, 0)) - 1;
if (offset >= 0 && offset < inDim) {
THNN_(set1d)(offsets, cnt++, offset);
} else {
THError(
"index out of bound. updateParameters: %d not between 1 and %d",
offset + 1,
inDim);
}
}
}
THTensor_(resize1d)(offsets, cnt);
THTensor* uniqueOffsets = THTensor_(new)();
THLongTensor* ri = THLongTensor_new();
THTensor_(sort)(uniqueOffsets, ri, offsets, 0, 0);
THLongTensor_free(ri);
THTensor_(free)(offsets);
cnt = 1;
real* uniqueOffsets_p = THTensor_(data)(uniqueOffsets);
for (i = 1; i < THTensor_(size)(uniqueOffsets, 0); i++) {
if (uniqueOffsets_p[i] != uniqueOffsets_p[i - 1]) {
uniqueOffsets_p[cnt++] = uniqueOffsets_p[i];
}
}
THTensor_(resize1d)(uniqueOffsets, cnt);
// weight += -learningRate * gradWeight
THTensor_(cadd)(bias, bias, -learningRate, gradBias);
#pragma omp parallel for private(i) schedule(static) if (cnt * outDim > 10000)
for (i = 0; i < cnt; i++) {
long offset = (long)uniqueOffsets_p[i];
THBlas_(axpy)(outDim,
-learningRate,
COL_PTR2(gradWeight, offset), gradWeight->stride[0],
COL_PTR2(weight, offset), weight->stride[0]);
}
THTensor_(free)(uniqueOffsets);
}
void THNN_(SparseLinear_zeroGradParameters)(
THNNState *state,
THTensor *gradWeight,
THTensor *gradBias,
THTensor *lastInput)
{
long h, i, j;
long outDim = gradWeight->size[0];
long inDim = gradWeight->size[1];
THArgCheck(THNN_(checkSize1D)(gradBias, outDim), 3, "gradBias size wrong");
THArgCheck(THNN_(checkInput)(lastInput), 4,
"input must be in coo format, nnz x 3");
THTensor_(zero)(gradBias);
long nnz = THTensor_(size)(lastInput, 0);
#pragma omp parallel for private(i, j) schedule(static) if ( \
nnz * outDim > 10000)
for (i = 0; i < nnz; i++) {
if (THNN_(get2d)(lastInput, i, 2) == 0 ) {
continue;
}
long offset = (long)(THNN_(get2d)(lastInput, i, 1)) - 1;
if (offset >= 0 && offset < inDim) {
real* pGradWeight = COL_PTR2(gradWeight, offset);
if (gradWeight->stride[0] == 1) {
THVector_(fill)(pGradWeight, 0, outDim);
} else {
long stride = gradWeight->stride[0];
for (j = 0; j < outDim; ++j) {
pGradWeight[j * stride] = 0;
}
}
} else {
THError(
"index out of bound. zeroGradParameters: %d not between 1 and %d",
offset + 1,
inDim);
}
}
}
void THNN_(SparseLinear_legacyZeroGradParameters)(
THNNState *state,
THTensor *gradWeight,
THTensor *gradBias,
THTensor *lastInput)
{
long h, i, j;
long outDim = gradWeight->size[0];
long inDim = gradWeight->size[1];
THArgCheck(THNN_(checkSize1D)(gradBias, outDim), 3, "gradBias size wrong");
THArgCheck(THNN_(checkLegacyInput)(lastInput), 4,
"input size must be batchsize x nnz x 2");
THTensor_(zero)(gradBias);
long batchSize = THTensor_(size)(lastInput, 0);
long nnz = THTensor_(size)(lastInput, 1);
#pragma omp parallel for private(h, i, j) schedule(static) if ( \
batchSize > 1 && batchSize * nnz * outDim > 10000)
for (h = 0; h < batchSize; h++) {
for (i = 0; i < nnz; i++) {
if (THNN_(get3d)(lastInput, h, i, 1) == 0 ) {
continue;
}
long offset = (long)(THNN_(get3d)(lastInput, h, i, 0)) - 1;
if (offset >= 0 && offset < inDim) {
real* pGradWeight = COL_PTR2(gradWeight, offset);
if (gradWeight->stride[0] == 1) {
THVector_(fill)(pGradWeight, 0, outDim);
} else {
long stride = gradWeight->stride[0];
for (j = 0; j < outDim; ++j) {
pGradWeight[j * stride] = 0;
}
}
} else {
THError(
"index out of bound. zeroGradParameters: %d not between 1 and %d",
offset + 1,
inDim);
}
}
}
}
#undef ROW_PTR2
#undef COL_PTR2
#endif
|
test.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
#include <sys/types.h>
#include <unistd.h>
#include <omp.h>
#include <cilk/cilk.h>
// maximum value of n
#define NMAX 75000000
//#define NMAX 200
#define CHUNKSIZE 20
typedef struct{
int lt;
int gt;
} flag;
flag *flags;
double *local;
int size;
// void printArray(int n){
// int j;
// printf("[");
// int t =0;
// for(j = 0; j<n; j++){
// //if(j<10){
// if(t){
// printf(", %f", N[j]);
// }else{
// t=1;
// printf("%f", N[j]);
// }
// //}
// }
// printf("]\n");
// }
double drand ( double low, double high )
{
return ( (double)rand() * ( high - low ) ) / (double)RAND_MAX + low;
}
void fillArrayRandom(double *arr, int n){
int j;
cilk_for(j = 0; j<n; j++){
double r = drand(0,1000);
arr[j]=r;
}
}
int cmpfunc (const void * a, const void * b)
{
if (*(double*)a > *(double*)b)
return 1;
else if (*(double*)a < *(double*)b)
return -1;
else
return 0;
}
void swap(double *xp, double *yp){
double temp = *xp;
*xp = *yp;
*yp = temp;
}
// Merges two subarrays of arr[].
// First subarray is arr[l..m]
// Second subarray is arr[m+1..r]
// void merge(int l, int m, int r){
// int i, j, k;
// int n1 = m - l + 1;
// int n2 = r - m;
// /* create temp arrays */
// int L[n1], R[n2];
// /* Copy data to temp arrays L[] and R[] */
// for (i = 0; i < n1; i++)
// L[i] = N[l + i];
// for (j = 0; j < n2; j++)
// R[j] = N[m + 1+ j];
// /* Merge the temp arrays back into arr[l..r]*/
// i = 0; // Initial index of first subarray
// j = 0; // Initial index of second subarray
// k = l; // Initial index of merged subarray
// while (i < n1 && j < n2){
// if (L[i] <= R[j]){
// N[k] = L[i];
// i++;
// }else{
// N[k] = R[j];
// j++;
// }
// k++;
// }
// /* Copy the remaining elements of L[], if there
// are any */
// while (i < n1){
// N[k] = L[i];
// i++;
// k++;
// }
// /* Copy the remaining elements of R[], if there
// are any */
// while (j < n2){
// N[k] = R[j];
// j++;
// k++;
// }
// }
/* l is for left index and r is right index of the
sub-array of arr to be sorted */
// void mergeSortHelper(int l, int r){
// if (l < r){
// int m = l+(r-l)/2;
// // Sort first and second halves
// mergeSortHelper(l, m);
// mergeSortHelper(m+1, r);
// merge(l, m, r);
// }
// }
// double mergeSort(int n){
// double t1, t2;
// #pragma omp master
// t1 = omp_get_wtime();
// mergeSortHelper(0,n-1);
// #pragma omp master
// t2 = omp_get_wtime();
// return t2-t1;
// }
// double insertionSort(int n){
// int key, j, i;
// double t1, t2;
// #pragma omp master
// t1 = omp_get_wtime();
// for (i = 1; i < n; i++){
// key = N[i];
// j = i-1;
// while (j >= 0 && N[j] > key){
// N[j+1] = N[j];
// j--;
// }
// N[j+1] = key;
// }
// #pragma omp master
// t2 = omp_get_wtime();
// return t2-t1;
// }
int partition(double *arr, int p, int r){
double key=arr[r];
int i=p-1;
int j;
double temp;
for(j=p; j<r; j++){
if(arr[j]<=key){
i+=1;
temp = arr[i];
arr[i]=arr[j];
arr[j]=temp;
}
}
temp = arr[i+1];
arr[i+1]=arr[r];
arr[r]=temp;
return i+1;
}
// void quickSortHelper(int p, int r){
// if(p<r){
// int q=partition(p,r);
// #pragma omp task
// {
// quickSortHelper(p,q-1);
// }
// quickSortHelper(q+1,r);
// }
// }
// double sequentialQuickSort(int n){
// double t1, t2;
// #pragma omp master
// t1 = omp_get_wtime();
// #pragma omp parallel
// {
// We only want our master thread to be executed once, thus we use the singel construct here.
// nowait is used becuse we have no need for synchronization at the end of the region
// #pragma omp single nowait
// {
// quickSortHelper(0, n-1);
// }
// }
// #pragma omp master
// t2 = omp_get_wtime();
// return t2-t1;
// }
void insertionSortHelper(double *arr, int p, int r){
double key;
int j, i;
for (i = p+1; i<r+1 ; i++){
key = arr[i];
j = i-1;
while (j >= p && arr[j] > key){
arr[j+1] = arr[j];
j--;
}
arr[j+1] = key;
}
}
void prefixSum(int arr[], int p, int r){
int i;
for(i=p+1;i<r+1;i++){
arr[i]+=arr[i-1];
}
}
int log_2(int n){
int i=0;
while(n >>= 1) {++i;}
return i;
}
void parallelPrefixSum(int p, int r){
int len = r-p+1;
int shift, j, h;
int k = log_2(len);
for(h=1; h<k+1;h++){
shift = 1<<h;
// #pragma omp parallel for schedule(static) private(j)
cilk_for (j=1; j<(len/shift)+1;j++){
flags[p+j*shift-1].lt+=flags[p+j*shift-(shift/2)-1].lt;
flags[p+j*shift-1].gt+=flags[p+j*shift-(shift/2)-1].gt;
}
}
for(h=k; h>-1;h--){
shift = 1<<h;
// #pragma omp parallel for schedule(static) private(j)
cilk_for (j=2; j<(len/shift)+1;j++){
if(j%2==1){
flags[p+j*shift-1].lt+=flags[p+j*shift-shift-1].lt;
flags[p+j*shift-1].gt+=flags[p+j*shift-shift-1].gt;
}
}
}
}
int parallelPartition(double *arr, int p, int r){
// printf("p %d r %d\n", p, r);
double key=arr[r];
int i,j;
double temp;
// printf("%d: before first parallel region\n",omp_get_thread_num());
//#pragma omp for schedule(static) private(i)
cilk_for (i=p; i<r+1; i++){
flags[i].lt=0;
flags[i].gt=0;
local[i]=arr[i];
}
//#pragma omp for schedule(static) private(i)
cilk_for (i = p; i <r; i++){
if(arr[i]<key){
flags[i].lt=1;
flags[i].gt=0;
}else{
flags[i].lt=0;
flags[i].gt=1;
}
}
// printf("before less than: [");
// for(i=p; i<r+1; i++){
// printf("%d, ", lt[i]);
// }
// printf("]\n");
// printf("before greater than: [");
// for(i=p; i<r+1; i++){
// printf("%d, ", gt[i]);
// }
// printf("]\n");
// printf("%d: before prefix sum\n",omp_get_thread_num());
parallelPrefixSum(p,r);
// prefixSum(lt, p,r);
// prefixSum(gt,p,r);
// printf("%d: after prefix sum\n",omp_get_thread_num());
//prefixSum(lt, gt, p, r);
// printf("after less than: [");
// for(i=p; i<r+1; i++){
// printf("%d, ", lt[i]);
// }
// printf("]\n");
// printf("after greater than: [");
// for(i=p; i<r+1; i++){
// printf("%d, ", gt[i]);
// }
// printf("]\n");
int pivot = flags[r].lt;
// printf("pivot point is %d\n",pivot);
arr[pivot+p]=key;
//#pragma omp for schedule(static) private(i)
cilk_for (i=p; i<r; i++){
if(local[i]<key){
int index = p+flags[i].lt-1;
arr[index]=local[i];
}else{
int index = p+pivot+flags[i].gt;
arr[index]=local[i];
}
}
// printf("%d: after second parallel\n", omp_get_thread_num());
return pivot+p;
}
void psqHelper(double *arr, int p, int r){
int q;
if(p<r){
if(r-p<=50){
insertionSortHelper(arr, p,r);
}else{
if(r-p < 0.5*size){
q = partition(arr,p,r);
}else{
q=parallelPartition(arr,p,r);
}
// printf("left p: %d left r: %d\n", p, q-1);
// printf("right p: %d right r: %d\n", q+1, r);
cilk_spawn psqHelper(arr,p,q-1);
psqHelper(arr,q+1,r);
}
}
}
double parallelQuickSort(double *arr, int n){
double t1, t2;
flags = malloc(sizeof(flag)*n);
local = malloc(sizeof(double)*n);
int i;
cilk_for(i=0; i<n; i++){
local[i]=0;
flags[i].lt=0;
flags[i].gt=0;
}
#pragma omp master
t1 = omp_get_wtime();
// #pragma omp parallel
// {
// We only want our master thread to be executed once, thus we use the singel construct here.
// nowait is used becuse we have no need for synchronization at the end of the region
// #pragma omp single nowait
// {
psqHelper(arr, 0, n-1);
// }
// }
#pragma omp master
t2 = omp_get_wtime();
return t2-t1;
}
// double selectionSort(int n){
// int j, min_idx,i;
// double t1,t2;
// #pragma omp master
// t1 = omp_get_wtime();
// // One by one move boundary of unsorted subarray
// for (i = 0; i < n-1; i++){
// // Find the minimum element in unsorted array
// min_idx = i;
// for (j = i+1; j < n; j++){
// if (N[j] < N[min_idx]){
// min_idx = j;
// }
// }
// double temp = N[i];
// N[i] = N[min_idx];
// N[min_idx]=temp;
// }
// #pragma omp master
// t2 = omp_get_wtime();
// return t2-t1;
// }
int checkArray(double *arr, int n){
int j;
for(j = 0; j<n-1; j++){
if(arr[j]>arr[j+1]){
return -1;
}
}
return 0;
}
// void tester(int n){
// srand(getpid());
// fillArrayRandom(n);
// printArray(n);
// double t = parallelQuickSort(n);
// printArray(n);
// }
int main(int argc, char * argv[]){
FILE* fp = fopen("simTimes.csv","w+");
int len=15;
//int len=5;
int n[] = {10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000,20000,200000,2000000,20000000,75000000};
int i;
srand(getpid());
//tester(10);
for(i = 0; i<len; i++){
size = n[i];
double *arr = malloc(sizeof(double)*n[i]);
fillArrayRandom(arr, n[i]);
//printf("Before for array of size %d:\n", n[i]);
//printArray(n[i]);
double t = parallelQuickSort(arr, n[i]);
printf("%d elements sorted in %f time\n", n[i], t);
if(checkArray(arr, n[i])==-1){
printf("SORT FAILED\n");
}else{
printf("SUCCESSFUL SORT\n");
}
//printf("after for array of size %d:", n[i]);
//printArray(n[i]);
}
fclose(fp);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.