source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
testCentralMomentsNormDouble.c | #include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <math.h>
#include <omp.h>
#ifdef USE_POLAR
void polar2x64( int64_t *state, const double mu, const double sigma, double *res );
#else
void wichura2x64( int64_t *state, const double mu, const double sigma, double *res );
#endif
// parameters
// vector width of SIMD unit w.r.t. size of doubles
const int vec_width_in_doubles = VECTOR_WIDTH / 8;
// number of random numbers to generate
// needs to be divisible by vec_width_in_doubles
const uint64_t number_random_numbers = 1000ll * 1000ll * 1000ll;
// threshold under which to stop recursion
const uint64_t recursion_threshold = 1000ll * 1000ll;
// derived parameters
// number of calls to polar 4x32 to fill vector registers
const int rng_calls_per_loop = vec_width_in_doubles / 2;
const int loop_iterations = number_random_numbers / vec_width_in_doubles;
// compute the non-scaled central moments of local_number_random_numbers
// normal random numbers
void computeCentralMoments( int64_t *state,
const int64_t local_number_random_numbers,
const double mu,
const double sigma,
double *centralMoments )
{
// vector for random numbers
double r[ vec_width_in_doubles ];
// vector to hold intermediate vector with r^2
double r2[ vec_width_in_doubles ];
// vector to hold intermediate values
double intermediate[ vec_width_in_doubles ];
// vectors for central moments
double cm02[ vec_width_in_doubles ];
double cm04[ vec_width_in_doubles ];
double cm06[ vec_width_in_doubles ];
double cm08[ vec_width_in_doubles ];
double cm10[ vec_width_in_doubles ];
double cm12[ vec_width_in_doubles ];
double cm14[ vec_width_in_doubles ];
double cm16[ vec_width_in_doubles ];
double cm18[ vec_width_in_doubles ];
double cm20[ vec_width_in_doubles ];
// iteration variable
uint64_t i;
int j;
// initialize central moments to 0
for( j = 0; j < vec_width_in_doubles; j++ )
{
cm02[ j ] = 0.;
cm04[ j ] = 0.;
cm06[ j ] = 0.;
cm08[ j ] = 0.;
cm10[ j ] = 0.;
cm12[ j ] = 0.;
cm14[ j ] = 0.;
cm16[ j ] = 0.;
cm18[ j ] = 0.;
cm20[ j ] = 0.;
}
// compute central moments
for( i = 0; i < local_number_random_numbers; i += vec_width_in_doubles )
{
// fill r with random numbers
for( j = 0; j < rng_calls_per_loop; j++ )
{
#ifdef USE_POLAR
polar2x64( state, mu, sigma, &r[ j * 2 ] );
#else
wichura2x64( state, mu, sigma, &r[ j * 2 ] );
#endif
}
// iteratively compute central moments
#pragma omp simd
for( j = 0; j < vec_width_in_doubles; j++ )
{
// foundation for central moments
r[ j ] = r[ j ] - mu;
// always need to multiply by r^2
r2[ j ] = r[ j ] * r[ j ];
// initialize intermediate to r2
intermediate[ j ] = r2[ j ];
// central moments
cm02[ j ] += r2[ j ];
intermediate[ j ] = intermediate[ j ] * r2[ j ];
cm04[ j ] += intermediate[ j ];
intermediate[ j ] = intermediate[ j ] * r2[ j ];
cm06[ j ] += intermediate[ j ];
intermediate[ j ] = intermediate[ j ] * r2[ j ];
cm08[ j ] += intermediate[ j ];
intermediate[ j ] = intermediate[ j ] * r2[ j ];
cm10[ j ] += intermediate[ j ];
intermediate[ j ] = intermediate[ j ] * r2[ j ];
cm12[ j ] += intermediate[ j ];
intermediate[ j ] = intermediate[ j ] * r2[ j ];
cm14[ j ] += intermediate[ j ];
intermediate[ j ] = intermediate[ j ] * r2[ j ];
cm16[ j ] += intermediate[ j ];
intermediate[ j ] = intermediate[ j ] * r2[ j ];
cm18[ j ] += intermediate[ j ];
intermediate[ j ] = intermediate[ j ] * r2[ j ];
cm20[ j ] += intermediate[ j ];
}
}
// compute final central moments
for( j = 0; j < 10; j++ )
{
centralMoments[ j ] = 0.;
}
#pragma omp simd
for( j = 0; j < vec_width_in_doubles; j++ )
{
centralMoments[ 0 ] += cm02[ j ];
centralMoments[ 1 ] += cm04[ j ];
centralMoments[ 2 ] += cm06[ j ];
centralMoments[ 3 ] += cm08[ j ];
centralMoments[ 4 ] += cm10[ j ];
centralMoments[ 5 ] += cm12[ j ];
centralMoments[ 6 ] += cm14[ j ];
centralMoments[ 7 ] += cm16[ j ];
centralMoments[ 8 ] += cm18[ j ];
centralMoments[ 9 ] += cm20[ j ];
}
}
void recursion( int64_t *state,
const int64_t local_number_random_numbers,
const double mu,
const double sigma,
double *centralMoments )
{
// do we keep on recursing
if( local_number_random_numbers <= recursion_threshold )
{
// no, call function directly
computeCentralMoments( state, local_number_random_numbers, mu, sigma, centralMoments );
}
else
{
// yes
double centralMomentsLeft[ 10 ];
double centralMomentsRight[ 10 ];
// init left branch
recursion( state, local_number_random_numbers / 2ll, mu, sigma, centralMomentsLeft );
// init right branch
recursion( state, local_number_random_numbers / 2ll, mu, sigma, centralMomentsRight );
// compute sum of both branches
int j;
for( j = 0; j < 10; j++ )
{
centralMoments[ j ] = centralMomentsLeft[ j ] + centralMomentsRight[ j ];
}
}
return;
}
int main()
{
// state for RNG
int64_t state[ 4 ];
// parameters of normal distribution
const double mu = 0.;
const double sigma = 1.;
// double factorial
long double_fac = 1;
// timing
double startTime, stopTime;
// final central moments
double final_cm[ 10 ];
// relative error
double relative_error;
// iteration variables
int j;
// store number of threads
int num_threads;
printf( "Run test testCentralMomentsNormDouble\n\n" );
// timing
startTime = omp_get_wtime();
// init the rng
state[ 0 ] = 0;
state[ 1 ] = 0;
state[ 2 ] = 0;
state[ 3 ] = 0;
// compute central moments
recursion( state, number_random_numbers, mu, sigma, final_cm );
// timing
stopTime = omp_get_wtime();
printf( "Timing: %es\n\n", stopTime - startTime );
// print out results
for( j = 0; j < 10; j++ )
{
double_fac = double_fac * ( 2 * j + 1 );
final_cm[ j ] = final_cm[ j ] / number_random_numbers;
relative_error = fabs( final_cm[ j ] - (double)double_fac ) / (double)double_fac;
if( j < 4 )
{
if( relative_error > 1e-3 )
{
printf( "Error!\n" );
printf( "%02dth numerical central moment: %e, exact central moment: %9ld, relative error: %e\n", j * 2 + 2, final_cm[ j ], double_fac, fabs( final_cm[ j ] - (double)double_fac ) / (double)double_fac );
exit( 1 );
}
}
else if( j < 8 )
{
if( relative_error > 1e-2 )
{
printf( "Error!\n" );
printf( "%02dth numerical central moment: %e, exact central moment: %9ld, relative error: %e\n", j * 2 + 2, final_cm[ j ], double_fac, fabs( final_cm[ j ] - (double)double_fac ) / (double)double_fac );
exit( 1 );
}
}
else
{
if( relative_error > 2e-2 )
{
printf( "Error!\n" );
printf( "%02dth numerical central moment: %e, exact central moment: %9ld, relative error: %e\n", j * 2 + 2, final_cm[ j ], double_fac, fabs( final_cm[ j ] - (double)double_fac ) / (double)double_fac );
exit( 1 );
}
}
}
return 0;
}
|
ej2.c | #include <stdio.h>
#include <math.h>
#include <float.h>
#include <stdlib.h>
#include <omp.h>
#include "ctimer.h"
main(int argc, char**argv)
{
////// PRODUCTO MATRIZ-VECTOR x=A*b //////
// DECLARACION DE VARIABLES // // DECLARACION DE VARIABLES //
double t1,t2,tucpu,tscpu;
const long int M= 1048576;
double TALLA = 8;
double *b;
b=malloc(M*sizeof(double));
double suma;
srand(time(0));
printf("/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/\n");
printf("/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/\n");
printf("Programa que calcula el Producto Matriz-Vector. \n");
printf("------- \n");
int i;
// GENERACION DE DATOS //
//
for (i=0;i<M;i++){
b[i]=rand();
}
// PRODUCTO MATRIZ-VECTOR SECUENCIAL //
//
//
printf("Voy a empezar la suma secuencial. \n");
printf(" ------- \n");
double alfa;
ctimer(&t1,&tucpu,&tscpu);
alfa=0.0;
for(i=0;i<M;i++){
alfa+=b[i];
}
ctimer(&t2,&tucpu,&tscpu);
printf("Suma = %f \n",alfa);
printf(" ------- \n");
printf("Tiempo %f segundos \n",(float) (t2-t1));
printf(" ------- \n");
// PRODUCTO MATRIZ // PRODUCTO MATRIZ-VECTOR PARALELO / VECTOR PARALELO //
printf("Empiezo la suma paralela\n");
printf(" ------- \n");
ctimer(&t1,&tucpu,&tscpu);
int tb;
int tid;
tb=M/TALLA;
omp_set_num_threads(TALLA);
double sol;
sol = 0.0;
#pragma omp parallel for reduction(+:sol) private(i,tb)
for (i=0;i<M;i++){
sol += b[i];
}
ctimer(&t2,&tucpu,&tscpu);
// SALIDA DE RESULTADOS //
printf("Ha terminado la suma paralela\n");
printf(" ------- \n");
printf("Suma = %f \n",sol);
printf(" ------- \n");
// Fin del calculo del Producto Matriz-Vector paralelo
printf("Tiempo %f segundos \n",(float) (t2-t1));
printf("He acabado. \n");
printf("/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/\n");
printf("/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/\n");
} |
hw2b.c | #ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#define PNG_NO_SETJMP
#include <sched.h>
#include <assert.h>
#include <png.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#include <mpi.h>
#include <pthread.h>
void write_png(const char* filename, int iters, int width, int height, const int* buffer) {
FILE* fp = fopen(filename, "wb");
assert(fp);
png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
assert(png_ptr);
png_infop info_ptr = png_create_info_struct(png_ptr);
assert(info_ptr);
png_init_io(png_ptr, fp);
png_set_IHDR(png_ptr, info_ptr, width, height, 8, PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT);
png_set_filter(png_ptr, 0, PNG_NO_FILTERS);
png_write_info(png_ptr, info_ptr);
png_set_compression_level(png_ptr, 1);
size_t row_size = 3 * width * sizeof(png_byte);
png_bytep row = (png_bytep)malloc(row_size);
for (int y = 0; y < height; ++y) {
memset(row, 0, row_size);
for (int x = 0; x < width; ++x) {
int p = buffer[(height - 1 - y) * width + x];
png_bytep color = row + x * 3;
if (p != iters) {
if (p & 16) {
color[0] = 240;
color[1] = color[2] = p % 16 * 16;
} else {
color[0] = p % 16 * 16;
}
}
}
png_write_row(png_ptr, row);
}
free(row);
png_write_end(png_ptr, NULL);
png_destroy_write_struct(&png_ptr, &info_ptr);
fclose(fp);
}
int main(int argc, char** argv) {
int rank, size;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
/* argument parsing */
assert(argc == 9);
const char* filename = argv[1];
int iters = strtol(argv[2], 0, 10);
double left = strtod(argv[3], 0);
double right = strtod(argv[4], 0);
double lower = strtod(argv[5], 0);
double upper = strtod(argv[6], 0);
int width = strtol(argv[7], 0, 10);
int height = strtol(argv[8], 0, 10);
/* allocate memory for image */
int* image = (int*)malloc(width * height * sizeof(int));
int* result = (int*)malloc(width * height * sizeof(int));
#pragma omp parallel for schedule(dynamic)
/* mandelbrot set */
for (int j = rank; j < height; j += size) {
double y0 = j * ((upper - lower) / height) + lower;
for (int i = 0; i < width; ++i) {
double x0 = i * ((right - left) / width) + left;
int repeats = 0;
double x = 0;
double y = 0;
double length_squared = 0;
while (repeats < iters && length_squared < 4) {
double temp = x * x - y * y + x0;
y = 2 * x * y + y0;
x = temp;
length_squared = x * x + y * y;
++repeats;
}
image[j * width + i] = repeats;
}
}
MPI_Reduce(image, result, width * height, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
if (rank == 0){
/* draw and cleanup */
write_png(filename, iters, width, height, result);
free(image);
}
MPI_Finalize();
}
|
simpar-omp.c | /**
* @file simpar.c
* @authors: Filipe Marques, Luís Fonseca
* @date 29 Abr 2019
* @brief Header Parallellized implementation of simpar.h containing the particle simulation functions's source and main.
*/
#include "simpar.h"
#include "omp.h"
cell_t ** dummy;
void usg_err()
{
printf("\t[-] usage : ./simpar <seed> <ncside> <n_par> <n_step>");
printf("\t\t[-] int <seed> : seed for random number generation.\n");
printf("\t\t[-] int <ncside> : size of the grid (number of cells on the side.\n");
printf("\t\t[-] int <n_par> : number of particles\n");
printf("\t\t[-] int <n_par> : number of time-steps\n");
exit(1);
}
long long val_l(const char* arg)
{
char *endptr;
long long x = strtol(arg, &endptr, 10); /*Parse long from *arg*/
if (endptr == arg) {
printf("[-] ERROR: Invalid number: %s\n", arg);
return 0;
} else if (*endptr) {
printf("[-] ERROR: Trailing characters after number: %s\n", arg);
} else if (x <= 0) {
printf("[-] ERROR: Number must be positive: %llu\n", x);
return 0;
}
return x;
}
cell_t** init_grid(const long ncside)
{
cell_t** grid = (cell_t**) calloc(ncside, sizeof(cell_t*));
dummy = (cell_t**)calloc(ncside, sizeof(cell_t*));
for(long c=0; c<ncside; c++)
{
grid[c] = (cell_t*)calloc(ncside, sizeof(cell_t));
dummy[c] = (cell_t*)calloc(ncside, sizeof(cell_t));
if(grid[c] == NULL)
exit(0);
if(dummy[c] == NULL)
exit(0);
}
return grid;
}
void free_grid(cell_t** grid, long ncside)
{
for(long c=0; c<ncside; c++)
{
free(grid[c]);
free(dummy[c]);
}
free(grid);
free(dummy);
}
void init_particles(long seed, long ncside, long long n_part, particle_t *par)
{
long long i;
srandom(seed);
for(i=0; i < n_part; i++)
{
par[i].x = RND0_1;
par[i].y = RND0_1;
par[i].vx = RND0_1 / ncside / 10.0;
par[i].vy = RND0_1 / ncside / 10.0;
par[i].m = RND0_1 * ncside / (G * 1e6 * n_part);
}
}
void init_env(cell_t** grid, long ncside, particle_t* p, long long n_par)
{
#pragma parallel for
for(long long i=0; i<n_par; i++)
{
p[i].cx = (long) p[i].x * ncside;
p[i].cy = (long) p[i].y * ncside;
#pragma omp atomic
grid[p[i].cx][p[i].cy].M += p[i].m;
dummy[p[i].cx][p[i].cy].M += p[i].m;
#pragma omp atomic
grid[p[i].cx][p[i].cy].x += p[i].m * p[i].x;
dummy[p[i].cx][p[i].cy].x += p[i].m * p[i].x;
#pragma omp atomic
grid[p[i].cx][p[i].cy].y += p[i].m * p[i].y;
dummy[p[i].cx][p[i].cy].y += p[i].m * p[i].y;
}
}
void accellerate_p(double* ax, double* ay, const cell_t* c, double m, double x, double y)
{
// Avoid calculation when cell is empty
if((c->M) == 0.0)
return;
//double dirx = 1.0, diry = 1.0,
double magnitude;
double dx = ((c->x)/(c->M)) - x;
double dy = ((c->y)/(c->M)) - y;
double d_2 = (dx*dx)+(dy*dy);
if(sqrt(d_2) < EPSLON)
return;
//if(dx<0.0){ dirx = -1.0; }else if(dx == 0.0){ dirx = 0.0; }
//if(dy<0.0){ diry = -1.0; }else if(dy == 0.0){ diry = 0.0; }
magnitude = (((c->M)*G)/d_2);
*ax += dx * magnitude;
*ay += dy * magnitude;
}
void update_particles(cell_t** grid, long ncside, particle_t* par, long long n_par, long n_step, long step)
{
double m, px, py, ax, ay;
long cx, cy, nx, ny, ux, uy, lx, ly;
#pragma omp parallel if(n_par*n_step > 1000000)
{
#pragma omp for private(m, px, py, ax, ay, cx, cy, nx, ny, ux, uy, lx, ly), reduction(+:t_mass, t_cx, t_cy), schedule(dynamic, 1000)
for(long long i=0; i<n_par; i++)
{
m = par[i].m;
px = par[i].x;
py = par[i].y;
cx = (long) px * ncside, nx;
cy = (long) py * ncside, ny;
ux = cx+1; uy = cy+1; lx = cx-1; ly = cy-1;
if(ux >= ncside)
ux = 0;
else if(lx < 0)
lx = ncside-1;
if(uy >= ncside)
uy = 0;
else if(ly < 0)
ly = ncside-1;
ax = 0.0;
ay = 0.0;
accellerate_p(&ax, &ay, &(dummy[cx][cy]), m, px, py); // current cell
accellerate_p(&ax, &ay, &(dummy[ux][cy]), m, px, py); // right cell
accellerate_p(&ax, &ay, &(dummy[lx][cy]), m, px, py); // left cell
//upper adjacents
accellerate_p(&ax, &ay, &(dummy[cx][uy]), m, px, py); // upper cell
accellerate_p(&ax, &ay, &(dummy[lx][uy]), m, px, py); // upper left cell
accellerate_p(&ax, &ay, &(dummy[ux][uy]), m, px, py); // upper right cell
//lower adjacents
accellerate_p(&ax, &ay, &(dummy[cx][ly]), m, px, py); // lower cell
accellerate_p(&ax, &ay, &(dummy[lx][ly]), m, px, py); // lower left cell
accellerate_p(&ax, &ay, &(dummy[ux][ly]), m, px, py); // lower right cell
//update velocity
par[i].vx += ax;
par[i].vy += ay;
//update position
par[i].x += par[i].vx + ax*0.5;
while(par[i].x >= 1.0)
par[i].x -= 1.0;
while(par[i].x < 0.0)
par[i].x += 1.0;
par[i].y += par[i].vy + ay*0.5;
while(par[i].y >= 1.0)
par[i].y -= 1.0;
while(par[i].y < 0.0)
par[i].y += 1.0;
//update cells if cell changed maybe outside loop?
nx = (long) par[i].x*ncside;
ny = (long) par[i].y*ncside;
if(cx-nx || cy-ny)
{
if(cx-nx) par[i].cx = nx;
if(cy-ny) par[i].cy = ny;
#pragma omp atomic
grid[cx][cy].M -= m;
#pragma omp atomic
grid[cx][cy].x -= m * px;
#pragma omp atomic
grid[cx][cy].y -= m * py;
#pragma omp atomic
grid[nx][ny].M += m;
#pragma omp atomic
grid[nx][ny].x += m * par[i].x;
#pragma omp atomic
grid[nx][ny].y += m * par[i].y;
}
if(n_step-1-step == 0)
{
t_mass += par[i].m;
t_cx += par[i].m * par[i].x;
t_cy += par[i].m * par[i].y;
}
}
}
#pragma omp parallel for
for(long c = 0; c<ncside; c++)
{
for(long l = 0; l<ncside; l++)
{
dummy[c][l] = grid[c][l];
}
}
}
int main(int argc, const char * argv[])
{
if(argc != 5)
{
printf("[-] ERROR: Invalid number of arguments... Expected 4 but got %d\n", argc-1);
usg_err();
}
const long seed = (long) val_l(argv[1]);
const long ncside = (long) val_l(argv[2]);
const long long n_par = val_l(argv[3]);
const long n_step = (long) val_l(argv[4]);
if(!(seed*ncside*n_par*n_step))
usg_err();
double start_t, end_t;
double elapsed_t;
start_t = omp_get_wtime();
particle_t* par = (particle_t*) calloc(n_par, sizeof(particle_t));
init_particles(seed, ncside, n_par, par);
cell_t** grid = init_grid(ncside);
if(grid==NULL || par == NULL) exit(0);
init_env(grid, ncside, par, n_par);
for(long step = 0; step < n_step; step++)
{
update_particles(grid, ncside, par, n_par, n_step, step);
}
t_cx /= t_mass;
t_cy /= t_mass;
printf("%.2f %.2f\n", par[0].x, par[0].y);
printf("%.2f %.2f\n", t_cx, t_cy);
end_t = omp_get_wtime();
elapsed_t = ((double) (end_t - start_t));
//printf("%f (s)\n", elapsed_t);
free(par);
free_grid(grid, ncside);
return 0;
}
|
spherepix.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <complex.h>
#include <math.h>
#include "fastsphere.h"
#include "config.h"
#include "init.h"
#include "util.h"
#include "scatmat.h"
#include "farfield.h"
#include "spreflect.h"
#ifdef DOUBLEPREC
typedef double real;
typedef complex double cplx;
#else
typedef float real;
typedef complex float cplx;
#endif
void usage (char *name) {
fprintf (stderr, "USAGE: %s [-h] [-e] [-m mx [my mz [Mx My Mz]]] [-n Nx [Ny Nz]] [input [output]]\n", name);
fprintf (stderr, "\t-h: Print this message and exit\n");
fprintf (stderr, "\t-e: Specify the existence of an enclosing sphere in the input file\n");
fprintf (stderr, "\t-m: Specify the lower and upper box corner in wavelengths\n\t\t(default: tight)\n");
fprintf (stderr, "\t-n: Specify the number of pixels in each dimension (default: 100)\n");
fprintf (stderr, "\tInput file name may be '-' or omitted for stdin\n");
fprintf (stderr, "\tOutput file name may be '-' or omitted for stdout\n");
exit (EXIT_FAILURE);
}
/* Determine if a point is inside a sphere. */
int insphere (double *pt, double *cen, double r) {
double dist, dx[3];
dx[0] = pt[0] - cen[0];
dx[1] = pt[1] - cen[1];
dx[2] = pt[2] - cen[2];
/* Find the distance between the point and the center. */
dist = sqrt (dx[0] * dx[0] + dx[1] * dx[1] + dx[2] * dx[2]);
/* Return whether or not the point is in the sphere. */
return (dist <= r);
}
/* Compute the Laplacian of the inverse of the square root of density. The
* density is already rooted. Watch for edges of the domain. */
real lapden (real *r, real *lr, real *nr, double *c, int pos, int *nelt) {
real dlap, nval, pval;
int x, y;
x = pos % nelt[0];
y = pos / nelt[0];
/* Contribution of the x offsets with bounds checking. */
if (x >= nelt[0] - 1) nval = 1.0;
else nval = 1.0 / r[pos + 1];
if (x <= 0) pval = 1.0;
else pval = 1.0 / r[pos - 1];
dlap = (pval + nval - 2.0 / r[pos]) / (c[0] * c[0]);
/* Contribution of the y offsets with bounds checking. */
if (y >= nelt[1] - 1) nval = 1.0;
else nval = 1.0 / r[pos + nelt[0]];
if (y <= 0) pval = 1.0;
else pval = 1.0 / r[pos - nelt[0]];
dlap += (pval + nval - 2.0 / r[pos]) / (c[1] * c[1]);
/* Contribution of the z offsets with bounds checking. */
if (!nr) nval = 1.0;
else nval = 1.0 / nr[pos];
if (!lr) pval = 1.0;
else pval = 1.0 / lr[pos];
dlap += (pval + nval - 2.0 / r[pos]) / (c[2] * c[2]);
return dlap;
}
int augct (cplx *k, real *r, real *lr, real *nr, int *nelt, double *cell) {
int i, npx = nelt[0] * nelt[1];
real dval;
#pragma omp parallel for default(shared) private(i,dval)
for (i = 0; i < npx; ++i) {
/* Compute the Laplacian of the inverse square root of the density. */
dval = lapden (r, lr, nr, cell, i, nelt);
/* Scale by density square root and normalize by wave number. */
dval *= r[i] / (4.0 * M_PI * M_PI);
/* Subtract the density term from the contrast. */
k[i] -= dval;
}
return npx;
}
/* Build the contrast and density maps for a slab. */
int bldct (cplx *ct, real *density, int *nelt, double *blim,
double *cell, sptype *bgs, spscat *slist, int nsphere, int zidx) {
double zero[3] = {0, 0, 0};
int ntot = nelt[0] * nelt[1];
#pragma omp parallel default(shared)
{
double cen[3];
int i, j, idx[2];
cplx ctval;
real dval;
cen[2] = blim[2] + ((double)zidx + 0.5) * cell[2];
/* Build the density-free contrast and the density map. */
#pragma omp for
for (i = 0; i < ntot; ++i) {
/* Find the cell index. */
idx[0] = i % nelt[0];
idx[1] = i / nelt[0];
/* Find the cell center. */
cen[0] = blim[0] + ((double)idx[0] + 0.5) * cell[0];
cen[1] = blim[1] + ((double)idx[1] + 0.5) * cell[1];
/* Set the background contrast. */
ctval = 2 * M_PI;
dval = 1.0;
/* Check if the point is in an enclosing sphere, if it exists. */
if (bgs && insphere (cen, zero, bgs->r)) {
ctval = (cplx)(bgs->k);
dval = (real)(bgs->rho);
}
/* If the point is in an inner sphere, set the wave number. */
for (j = 0; j < nsphere; ++j)
if (insphere (cen, slist[j].cen, slist[j].spdesc->r)) {
ctval = (cplx)(slist[j].spdesc->k);
dval = (real)(slist[j].spdesc->rho);
}
/* Convert the wave number to the contrast. */
ctval /= (2 * M_PI);
ctval = ctval * ctval - 1;
/* Set the contrast value and density in the grid. */
ct[i] = ctval;
density[i] = sqrt(dval);
}
}
return ntot;
}
int main (int argc, char **argv) {
int nspheres, nsptype, n, i, npx, ndig;
int autobox = 1, nelt[3] = {100, 100, 100};
double boxlim[6], cell[3];
cplx *k, *nk, *kslab;
real *density, *lr, *r, *nr;
FILE *fptr = NULL;
char ch, *progname;
sptype *sparms, *bgptr = NULL, bgspt;
spscat *slist;
bgtype bg;
exctparm exct;
itconf itc;
/* Store the name used to invoke the program. */
progname = argv[0];
while ((ch = getopt (argc, argv, "hem:n:")) != -1) {
switch (ch) {
case 'e':
bgptr = &bgspt;
break;
case 'm':
/* Specify the box limits. */
autobox = sscanf (optarg, "%lf %lf %lf %lf %lf %lf",
boxlim, boxlim + 1, boxlim + 2,
boxlim + 3, boxlim + 4, boxlim + 5);
switch (autobox) {
case 1:
/* Set symmetric bounds from one dimension. */
boxlim[0] = boxlim[1] = boxlim[2] = -ABS(boxlim[0]);
boxlim[3] = boxlim[4] = boxlim[5] = ABS(boxlim[0]);
break;
case 3:
/* Set symmetric bounds from one corner. */
boxlim[0] = -(boxlim[3] = ABS(boxlim[0]));
boxlim[1] = -(boxlim[4] = ABS(boxlim[1]));
boxlim[2] = -(boxlim[5] = ABS(boxlim[2]));
break;
case 6:
/* Nothing to te done for fully specified box. */
break;
default:
usage (progname);
}
/* Don't automatically specify limits. */
autobox = 0;
break;
case 'n':
i = sscanf (optarg, "%d %d %d", nelt, nelt + 1, nelt + 2);
if (i == 1) nelt[1] = nelt[2] = nelt[0];
else if (i != 3) usage (progname);
break;
case 'h': default:
usage (progname);
}
}
/* Point argv to the input and output specifications. */
argc -= optind;
argv += optind;
if (argc < 1 || !strcmp("-", argv[0])) fptr = stdin;
else fptr = critopen (argv[0], "r");
readcfg (fptr, &nspheres, &nsptype, &sparms, bgptr, &slist, &bg, &exct, &itc, &ndig);
fprintf (stderr, "Parsed configuration for %d spheres at %g MHz\n", nspheres, exct.f / 1e6);
fclose (fptr);
/* Automatically set box dimensions if necessary. */
if (autobox && bgptr) {
boxlim[0] = boxlim[1] = boxlim[2] = -bgspt.r;
boxlim[3] = boxlim[4] = boxlim[5] = -bgspt.r;
} else if (autobox) {
/* Set the initial bounds to enclose the first sphere. */
boxlim[0] = slist->cen[0] - slist->spdesc->r;
boxlim[1] = slist->cen[1] - slist->spdesc->r;
boxlim[2] = slist->cen[2] - slist->spdesc->r;
boxlim[3] = slist->cen[0] + slist->spdesc->r;
boxlim[4] = slist->cen[1] + slist->spdesc->r;
boxlim[5] = slist->cen[2] + slist->spdesc->r;
for (i = 1; i < nspheres; ++i) {
boxlim[0] = MIN(boxlim[0], slist[i].cen[0] - slist[i].spdesc->r);
boxlim[1] = MIN(boxlim[1], slist[i].cen[1] - slist[i].spdesc->r);
boxlim[2] = MIN(boxlim[2], slist[i].cen[2] - slist[i].spdesc->r);
boxlim[3] = MAX(boxlim[3], slist[i].cen[0] + slist[i].spdesc->r);
boxlim[4] = MAX(boxlim[4], slist[i].cen[1] + slist[i].spdesc->r);
boxlim[5] = MAX(boxlim[5], slist[i].cen[2] + slist[i].spdesc->r);
}
}
/* Compute the cell dimensions. */
cell[0] = (boxlim[3] - boxlim[0]) / nelt[0];
cell[1] = (boxlim[4] - boxlim[1]) / nelt[1];
cell[2] = (boxlim[5] - boxlim[2]) / nelt[2];
npx = nelt[0] * nelt[1];
/* Allocate the contrast and density map for a slab. */
kslab = malloc (2 * npx * sizeof(cplx));
density = malloc (3 * npx * sizeof(real));
/* Point to the slab data stores. */
k = kslab;
nk = k + npx;
lr = NULL;
r = density;
nr = r + npx;
if (argc < 2 || !strcmp("-", argv[1])) fptr = stdout;
else fptr = critopen (argv[1], "w");
fprintf (stderr, "Writing contrast file.\n");
/* Write the header. */
fwrite (nelt, sizeof(int), 3, fptr);
/* Construct the first slab of data. */
bldct (k, r, nelt, boxlim, cell, bgptr, slist, nspheres, 0);
for (i = 1; i < nelt[2]; ++i) {
/* Construct the next slab of data. */
bldct (nk, nr, nelt, boxlim, cell, bgptr, slist, nspheres, i);
/* Build and write the previous slab. */
augct (k, r, lr, nr, nelt, cell);
fwrite (k, sizeof(cplx), npx, fptr);
/* Update the media pointers. */
k = kslab + (i % 2) * npx;
nk = kslab + ((i + 1) % 2) * npx;
lr = density + ((i - 1) % 3) * npx;
r = density + (i % 3) * npx;
nr = density + ((i + 1) % 3) * npx;
}
/* Build and write the last slab. */
augct (k, r, lr, NULL, nelt, cell);
fwrite (k, sizeof(cplx), npx, fptr);
fclose (fptr);
clrspheres (sparms, nsptype);
if (exct.pwmag) free (exct.pwmag);
if (exct.theta) free (exct.theta);
if (exct.psmag) free (exct.psmag);
if (exct.psloc) free (exct.psloc);
free (kslab);
free (density);
return EXIT_SUCCESS;
}
|
persistent.c | #include "persistent.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <math.h>
#include <omp.h>
int compute_commit_dsv(Box* box_arr) {
int count = 1;
int thread_count;
//omp_set_dynamic(0); // Explicitly disable dynamic teams
omp_set_num_threads(NUM_THREADS);
#pragma omp parallel shared(box_arr)
{
#pragma omp master
thread_count=omp_get_num_threads();
do {
#pragma omp for
for (int i = 0; i < NUM_BOXES; i++) {
box_arr[i].waat = 0;
// Get weighted average of top neighbours
if (box_arr[i].num_top != 0) {
int j;
for (j = 0; j < box_arr[i].num_top; j++) {
int cur_topid = box_arr[i].top_ids[j];
int overlap = box_arr[i].top_ov[j];
box_arr[i].waat = box_arr[i].waat + box_arr[cur_topid].dsv * overlap;
}
}
else {
box_arr[i].waat = box_arr[i].waat + box_arr[i].width * box_arr[i].dsv;
}
// Get weighted average of bottom neighbours
if (box_arr[i].num_bottom != 0) {
int j;
for (j = 0; j < box_arr[i].num_bottom; j++) {
int cur_bottomid = box_arr[i].bottom_ids[j];
int overlap = box_arr[i].bottom_ov[j];
box_arr[i].waat = box_arr[i].waat + box_arr[cur_bottomid].dsv * overlap;
}
}
else {
box_arr[i].waat = box_arr[i].waat + box_arr[i].width * box_arr[i].dsv;
}
// Get weighted average of left neighbours
if (box_arr[i].num_left != 0) {
int j;
for (j = 0; j < box_arr[i].num_left; j++) {
int cur_leftid = box_arr[i].left_ids[j];
int overlap = box_arr[i].left_ov[j];
box_arr[i].waat = box_arr[i].waat + box_arr[cur_leftid].dsv * overlap;
}
}
else {
box_arr[i].waat = box_arr[i].waat + box_arr[i].height * box_arr[i].dsv;
}
// Get weighted average of right neighbours
if (box_arr[i].num_right != 0) {
int j;
for (j = 0; j < box_arr[i].num_right; j++) {
int cur_rightid = box_arr[i].right_ids[j];
int overlap = box_arr[i].right_ov[j];
box_arr[i].waat = box_arr[i].waat + box_arr[cur_rightid].dsv * overlap;
}
}
else {
box_arr[i].waat = box_arr[i].waat + box_arr[i].height * box_arr[i].dsv;
}
// Find the weighted average by dividing with the perimeter
box_arr[i].waat = box_arr[i].waat / box_arr[i].perimeter;
}
#pragma omp master
{
MIN_DSV = INT_MAX;
MAX_DSV = INT_MIN;
count++;
}
#pragma omp barrier
#pragma omp for reduction(max:MAX_DSV) reduction(min:MIN_DSV)
for (int i = 0; i < NUM_BOXES; i++) {
if (box_arr[i].waat > box_arr[i].dsv) {
box_arr[i].dsv = box_arr[i].dsv + AFFECT_RATE * (box_arr[i].waat - box_arr[i].dsv);
}
else {
box_arr[i].dsv = box_arr[i].dsv - AFFECT_RATE * (box_arr[i].dsv - box_arr[i].waat);
}
if (box_arr[i].dsv < MIN_DSV) MIN_DSV = box_arr[i].dsv;
if (box_arr[i].dsv > MAX_DSV) MAX_DSV = box_arr[i].dsv;
}
//if (count == 2) break;
if (MAX_DSV == 0) break;
} while (((MAX_DSV - MIN_DSV) / MAX_DSV) > EPSILON);
count--;
}
printf("A total of %d threads were created.\n", thread_count);
return count;
}
void readgridparam() {
// Assuming each line in the datafile is Max 500 characters
char line[MAXLEN] = "";
fflush(stdin);
if (fgets(line, sizeof(line), stdin)) {
// If the first line of the file contains -1, exit
if (line[0] == '-') {
fprintf(stderr, "First line of the file contains -1. Exiting....");
exit(EXIT_FAILURE);
}
else {
// We only expect 3 numbers in the first line
// <number of grid boxes> <num_grid_rows> <num_grid_cols>
int arr[3];
parseline(arr, line, 0);
NUM_BOXES = arr[0];
NUM_ROWS = arr[1];
NUM_COLS = arr[2];
}
}
else {
fprintf(stderr, "File may not exist or is empty. Exiting....");
exit(EXIT_FAILURE);
}
}
void populate(Box* box_arr) {
char line1[MAXLEN] = "";
int box_count = 0;
// Read rest of file and populate the data structure
fflush(stdin);
while (fgets(line1, sizeof(line1), stdin)) {
if (line1[0] == '-') {
break;
}
else if (!strcmp(line1, "")) continue;
else if (!(line1[0] >= '0' && line1[0] <= '9')) continue;
else {
// Create new Box element
// Get Box id;
int id[1];
parseline(id, line1, 0);
box_arr[box_count].id = id[0];
// Get location, height and width
fflush(stdin);
fgets(line1, sizeof(line1), stdin);
int box_loc[4];
parseline(box_loc, line1, 0);
box_arr[box_count].up_left_y = box_loc[0];
box_arr[box_count].up_left_x = box_loc[1];
box_arr[box_count].height = box_loc[2];
box_arr[box_count].width = box_loc[3];
box_arr[box_count].perimeter = 2 * (box_arr[box_count].height + box_arr[box_count].width);
// Get top neighbours
fflush(stdin);
fgets(line1, sizeof(line1), stdin);
int top_num;
top_num = parsefirst(line1);
box_arr[box_count].num_top = top_num;
int* toparr = (int*)malloc(top_num * sizeof(int));
int* toparrov = (int*)malloc(top_num * sizeof(int));
parseline(toparr, line1, 1);
box_arr[box_count].top_ids = toparr;
box_arr[box_count].top_ov = toparrov;
if (top_num == 0) {
box_arr[box_count].top_ids = NULL;
}
// Get bottom neighbours
fflush(stdin);
fgets(line1, sizeof(line1), stdin);
int bottom_num;
bottom_num = parsefirst(line1);
box_arr[box_count].num_bottom = bottom_num;
int* bottomarr = (int*)malloc(bottom_num * sizeof(int));
int* bottomarrov = (int*)malloc(bottom_num * sizeof(int));
parseline(bottomarr, line1, 1);
box_arr[box_count].bottom_ids = bottomarr;
box_arr[box_count].bottom_ov = bottomarrov;
if (bottom_num == 0) {
box_arr[box_count].bottom_ids = NULL;
}
// Get left neighbours
fflush(stdin);
fgets(line1, sizeof(line1), stdin);
int left_num;
left_num = parsefirst(line1);
box_arr[box_count].num_left = left_num;
int* leftarr = (int*)malloc(left_num * sizeof(int));
int* leftarrov = (int*)malloc(left_num * sizeof(int));
parseline(leftarr, line1, 1);
box_arr[box_count].left_ids = leftarr;
box_arr[box_count].left_ov = leftarrov;
if (left_num == 0) {
box_arr[box_count].left_ids = NULL;
}
// Get right neighbours
fflush(stdin);
fgets(line1, sizeof(line1), stdin);
int right_num;
right_num = parsefirst(line1);
box_arr[box_count].num_right = right_num;
int* rightarr = (int*)malloc(right_num * sizeof(int));
int* rightarrov = (int*)malloc(right_num * sizeof(int));
parseline(rightarr, line1, 1);
box_arr[box_count].right_ids = rightarr;
box_arr[box_count].right_ov = rightarrov;
if (right_num == 0) {
box_arr[box_count].right_ids = NULL;
}
// Get dsv value
fflush(stdin);
fgets(line1, sizeof(line1), stdin);
double dsv_val;
dsv_val = parsedsv(line1);
box_arr[box_count].dsv = dsv_val;
// Move to next box
box_count++;
fflush(stdin);
}
}
}
void parseline(int* num, char* path, int func) {
char c;
int i = 0, digit, number = 0;
int num_count = 0;
if (func == 0) i = 0;
if (func == 1) {
while (i < strlen(path) && path[i] >= '0' && path[i] <= '9') {
i++;
}
}
for (; i < strlen(path); i++)
{
if (path[i] >= '0' && path[i] <= '9') //to confirm it's a digit
{
number = 0;
do {
digit = path[i] - '0';
number = number * 10 + digit;
i++;
} while (i < strlen(path) && path[i] >= '0' && path[i] <= '9');
num[num_count] = number;
num_count++;
}
}
}
int parsefirst(char* path) {
int i = 0, digit, number = 0;
do {
digit = path[i] - '0';
number = number * 10 + digit;
i++;
} while (i < strlen(path) && path[i] >= '0' && path[i] <= '9');
return number;
}
double parsedsv(char* path) {
/*int i=0, digit;
double number = 0;
do {
digit = path[i] - '0';
number = number*10 + digit;
i++;
} while (i<strlen(path) && (path[i]>='0' && path[i]<='9' || ));
return number;*/
double number = 0;
number = strtod(path, NULL);
return number;
}
void calcoverlap(struct Box* box_arr) {
int i;
for (i = 0; i < NUM_BOXES; i++) {
// Calculate TOP overlap for each node.
// If 0, skip.
if (box_arr[i].num_top != 0) {
int j;
for (j = 0; j < box_arr[i].num_top; j++) {
// find right most of x_left and xtop_left
int cur_topid = box_arr[i].top_ids[j];
int len2, len1;
if (box_arr[i].up_left_x >= box_arr[cur_topid].up_left_x) len1 = box_arr[i].up_left_x;
else len1 = box_arr[cur_topid].up_left_x;
if ((box_arr[i].up_left_x + box_arr[i].width) <= (box_arr[cur_topid].up_left_x + box_arr[cur_topid].width)) len2 = (box_arr[i].up_left_x + box_arr[i].width);
else len2 = (box_arr[cur_topid].up_left_x + box_arr[cur_topid].width);
box_arr[i].top_ov[j] = abs(len2 - len1);
}
}
// Calculate BOTTOM overlap for each node.
// If 0, skip.
if (box_arr[i].num_bottom != 0) {
int j;
for (j = 0; j < box_arr[i].num_bottom; j++) {
// find right most of x_left and xbottom_left
int cur_bottomid = box_arr[i].bottom_ids[j];
int len2, len1;
if (box_arr[i].up_left_x >= box_arr[cur_bottomid].up_left_x) len1 = box_arr[i].up_left_x;
else len1 = box_arr[cur_bottomid].up_left_x;
// find left most of x_left + width and xbottom_left + its width
if ((box_arr[i].up_left_x + box_arr[i].width) <= (box_arr[cur_bottomid].up_left_x + box_arr[cur_bottomid].width)) len2 = (box_arr[i].up_left_x + box_arr[i].width);
else len2 = (box_arr[cur_bottomid].up_left_x + box_arr[cur_bottomid].width);
box_arr[i].bottom_ov[j] = abs(len2 - len1);
}
}
// Calculate left overlap for each node.
// If 0, skip.
if (box_arr[i].num_left != 0) {
int j;
for (j = 0; j < box_arr[i].num_left; j++) {
// find bottom most of y_left and yleft_left
int cur_leftid = box_arr[i].left_ids[j];
int len2, len1;
if (box_arr[i].up_left_y >= box_arr[cur_leftid].up_left_y) len1 = box_arr[i].up_left_y;
else len1 = box_arr[cur_leftid].up_left_y;
// find top most of y_left + height and yleft_left + its height
if ((box_arr[i].up_left_y + box_arr[i].height) <= (box_arr[cur_leftid].up_left_y + box_arr[cur_leftid].height)) len2 = (box_arr[i].up_left_y + box_arr[i].height);
else len2 = (box_arr[cur_leftid].up_left_y + box_arr[cur_leftid].height);
box_arr[i].left_ov[j] = abs(len2 - len1);
}
}
// Calculate right overlap for each node.
// If 0, skip.
if (box_arr[i].num_right != 0) {
int j;
for (j = 0; j < box_arr[i].num_right; j++) {
// find bottom most of y_left and yright_left
int cur_rightid = box_arr[i].right_ids[j];
int len2, len1;
if (box_arr[i].up_left_y >= box_arr[cur_rightid].up_left_y) len1 = box_arr[i].up_left_y;
else len1 = box_arr[cur_rightid].up_left_y;
// find top most of y_left + height and yright_left + its height
if ((box_arr[i].up_left_y + box_arr[i].height) <= (box_arr[cur_rightid].up_left_y + box_arr[cur_rightid].height)) len2 = (box_arr[i].up_left_y + box_arr[i].height);
else len2 = (box_arr[cur_rightid].up_left_y + box_arr[cur_rightid].height);
box_arr[i].right_ov[j] = abs(len2 - len1);
}
}
}
}
void printboxes(struct Box* box_arr) {
int i;
for (i = 0; i < NUM_BOXES; i++) {
printf("================================");
printf("\n\nBox id: %d\n", box_arr[i].id);
printf("Box left_X, left_y, height, width, perimiter: %d, %d, %d, %d, %d\n", box_arr[i].up_left_x, box_arr[i].up_left_y, box_arr[i].height, box_arr[i].width, box_arr[i].perimeter);
printf("Box top neighbours and overlap: ");
int j;
for (j = 0; j < box_arr[i].num_top; j++) {
printf("%d:%d, ", box_arr[i].top_ids[j], box_arr[i].top_ov[j]);
}
printf("\n");
printf("Box bottom neighbours and overlap: ");
for (j = 0; j < box_arr[i].num_bottom; j++) {
printf("%d:%d, ", box_arr[i].bottom_ids[j], box_arr[i].bottom_ov[j]);
}
printf("\n");
printf("Box left neighbours: ");
for (j = 0; j < box_arr[i].num_left; j++) {
printf("%d:%d, ", box_arr[i].left_ids[j], box_arr[i].left_ov[j]);
}
printf("\n");
printf("Box right neighbours: ");
for (j = 0; j < box_arr[i].num_right; j++) {
printf("%d:%d, ", box_arr[i].right_ids[j], box_arr[i].right_ov[j]);
}
printf("\n");
printf("Box dsv value: %lf", box_arr[i].dsv);
printf("\n");
}
}
|
naugraph.c | /*****************************************************************************
* *
* Graph-specific auxiliary source file for version 2.2 of nauty. *
* *
* Copyright (1984-2002) Brendan McKay. All rights reserved. *
* Subject to waivers and disclaimers in nauty.h. *
* *
* CHANGE HISTORY *
* 16-Nov-00 : initial creation out of nautil.c *
* 22-Apr-01 : added aproto line for Magma *
* EXTDEFS is no longer required *
* removed dynamic allocation from refine1() *
* 21-Nov-01 : use NAUTYREQUIRED in naugraph_check() *
* *
*****************************************************************************/
#define ONE_WORD_SETS
#include "nauty.h"
/* macros for hash-codes: */
#define MASH(l,i) ((((l) ^ 065435) + (i)) & 077777)
/* : expression whose long value depends only on long l and int/long i.
Anything goes, preferably non-commutative. */
#define CLEANUP(l) ((int)((l) % 077777))
/* : expression whose value depends on long l and is less than 077777
when converted to int then short. Anything goes. */
#if MAXM==1
#define M 1
#else
#define M m
#endif
/* aproto: header new_nauty_protos.h */
dispatchvec dispatch_graph =
{isautom,testcanlab,updatecan,refine,refine1,cheapautom,bestcell,
naugraph_freedyn,naugraph_check,NULL,NULL};
#if !MAXN
DYNALLSTAT(set,workset,workset_sz);
DYNALLSTAT(permutation,workperm,workperm_sz);
DYNALLSTAT(int,bucket,bucket_sz);
#else
static set workset[MAXM]; /* used for scratch work */
static permutation workperm[MAXN];
static int bucket[MAXN+2];
#endif
/*****************************************************************************
* *
* isautom(g,perm,digraph,m,n) = TRUE iff perm is an automorphism of g *
* (i.e., g^perm = g). Symmetry is assumed unless digraph = TRUE. *
* *
*****************************************************************************/
boolean
isautom(graph *g, permutation *perm, boolean digraph, int m, int n)
{
boolean autom=TRUE;
#ifdef _OPENMP
#pragma omp parallel
#endif
{
int stride=1, offs=0;
register set *pg;
register int pos;
set *pgp;
int posp,i;
#ifdef _OPENMP
offs=omp_get_thread_num();
stride=omp_get_num_threads();
#endif
for (i = offs; autom && i < n; i+=stride)
{
pg=g+M*i;
pgp = GRAPHROW(g,perm[i],M);
pos = (digraph ? -1 : i);
while ((pos = nextelement(pg,M,pos)) >= 0)
{
posp = perm[pos];
if (!ISELEMENT(pgp,posp)) autom=FALSE;
}
}
}
return autom;
}
/*****************************************************************************
* *
* testcanlab(g,canong,lab,samerows,m,n) compares g^lab to canong, *
* using an ordering which is immaterial since it's only used here. The *
* value returned is -1,0,1 if g^lab <,=,> canong. *samerows is set to *
* the number of rows (0..n) of canong which are the same as those of g^lab. *
* *
* GLOBALS ACCESSED: workset<rw>,permset(),workperm<rw> *
* *
*****************************************************************************/
int
testcanlab(graph *g, graph *canong, int *lab, int *samerows, int m, int n)
{
register int i,j;
register set *ph;
#if !MAXN
DYNALLOC1(permutation,workperm,workperm_sz,n,"testcanlab");
DYNALLOC1(set,workset,workset_sz,m,"testcanlab");
#endif
for (i = 0; i < n; ++i) workperm[lab[i]] = i;
for (i = 0, ph = canong; i < n; ++i, ph += M)
{
permset(GRAPHROW(g,lab[i],M),workset,M,workperm);
for (j = 0; j < M; ++j)
if (workset[j] < ph[j])
{
*samerows = i;
return -1;
}
else if (workset[j] > ph[j])
{
*samerows = i;
return 1;
}
}
*samerows = n;
return 0;
}
/*****************************************************************************
* *
* updatecan(g,canong,lab,samerows,m,n) sets canong = g^lab, assuming *
* the first samerows of canong are ok already. *
* *
* GLOBALS ACCESSED: permset(),workperm<rw> *
* *
*****************************************************************************/
void
updatecan(graph *g, graph *canong, permutation *lab, int samerows, int m, int n)
{
register int i;
register set *ph;
#if !MAXN
DYNALLOC1(permutation,workperm,workperm_sz,n,"updatecan");
#endif
for (i = 0; i < n; ++i) workperm[lab[i]] = i;
for (i = samerows, ph = GRAPHROW(canong,samerows,M);
i < n; ++i, ph += M)
permset(GRAPHROW(g,lab[i],M),ph,M,workperm);
}
/*****************************************************************************
* *
* refine(g,lab,ptn,level,numcells,count,active,code,m,n) performs a *
* refinement operation on the partition at the specified level of the *
* partition nest (lab,ptn). *numcells is assumed to contain the number of *
* cells on input, and is updated. The initial set of active cells (alpha *
* in the paper) is specified in the set active. Precisely, x is in active *
* iff the cell starting at index x in lab is active. *
* The resulting partition is equitable if active is correct (see the paper *
* and the Guide). *
* *code is set to a value which depends on the fine detail of the *
* algorithm, but which is independent of the labelling of the graph. *
* count is used for work space. *
* *
* GLOBALS ACCESSED: workset<w>,bit<r>,nextelement(),bucket<w>,workperm<w> *
* *
*****************************************************************************/
void
refine(graph *g, int *lab, int *ptn, int level, int *numcells,
permutation *count, set *active, int *code, int m, int n)
{
#if MAXM==1
refine1(g,lab,ptn,level,numcells,count,active,code,m,n);
}
#else
register int i,c1,c2,labc1;
register setword x;
register set *set1,*set2;
int split1,split2,cell1,cell2;
int cnt,bmin,bmax;
long longcode;
set *gptr;
int maxcell,maxpos,hint;
#if !MAXN
DYNALLOC1(permutation,workperm,workperm_sz,n,"refine");
DYNALLOC1(set,workset,workset_sz,m,"refine");
DYNALLOC1(int,bucket,bucket_sz,n+2,"refine");
#endif
longcode = *numcells;
hint = 0;
while (*numcells < n && ((split1 = hint, ISELEMENT(active,split1))
|| (split1 = nextelement(active,M,split1)) >= 0
|| (split1 = nextelement(active,M,-1)) >= 0))
{
DELELEMENT(active,split1);
for (split2 = split1; ptn[split2] > level; ++split2) {}
longcode = MASH(longcode,split1+split2);
if (split1 == split2) /* trivial splitting cell */
{
gptr = GRAPHROW(g,lab[split1],M);
for (cell1 = 0; cell1 < n; cell1 = cell2 + 1)
{
for (cell2 = cell1; ptn[cell2] > level; ++cell2) {}
if (cell1 == cell2) continue;
c1 = cell1;
c2 = cell2;
while (c1 <= c2)
{
labc1 = lab[c1];
if (ISELEMENT(gptr,labc1))
++c1;
else
{
lab[c1] = lab[c2];
lab[c2] = labc1;
--c2;
}
}
if (c2 >= cell1 && c1 <= cell2)
{
ptn[c2] = level;
longcode = MASH(longcode,c2);
++*numcells;
if (ISELEMENT(active,cell1) || c2-cell1 >= cell2-c1)
{
ADDELEMENT(active,c1);
if (c1 == cell2) hint = c1;
}
else
{
ADDELEMENT(active,cell1);
if (c2 == cell1) hint = cell1;
}
}
}
}
else /* nontrivial splitting cell */
{
EMPTYSET(workset,m);
for (i = split1; i <= split2; ++i)
ADDELEMENT(workset,lab[i]);
longcode = MASH(longcode,split2-split1+1);
for (cell1 = 0; cell1 < n; cell1 = cell2 + 1)
{
for (cell2 = cell1; ptn[cell2] > level; ++cell2) {}
if (cell1 == cell2) continue;
i = cell1;
set1 = workset;
set2 = GRAPHROW(g,lab[i],m);
cnt = 0;
for (c1 = m; --c1 >= 0;)
if ((x = (*set1++) & (*set2++)) != 0)
cnt += POPCOUNT(x);
count[i] = bmin = bmax = cnt;
bucket[cnt] = 1;
while (++i <= cell2)
{
set1 = workset;
set2 = GRAPHROW(g,lab[i],m);
cnt = 0;
for (c1 = m; --c1 >= 0;)
if ((x = (*set1++) & (*set2++)) != 0)
cnt += POPCOUNT(x);
while (bmin > cnt) bucket[--bmin] = 0;
while (bmax < cnt) bucket[++bmax] = 0;
++bucket[cnt];
count[i] = cnt;
}
if (bmin == bmax)
{
longcode = MASH(longcode,bmin+cell1);
continue;
}
c1 = cell1;
maxcell = -1;
maxpos=0; // just to shut up gcc warning
for (i = bmin; i <= bmax; ++i)
if (bucket[i])
{
c2 = c1 + bucket[i];
bucket[i] = c1;
longcode = MASH(longcode,i+c1);
if (c2-c1 > maxcell)
{
maxcell = c2-c1;
maxpos = c1;
}
if (c1 != cell1)
{
ADDELEMENT(active,c1);
if (c2-c1 == 1) hint = c1;
++*numcells;
}
if (c2 <= cell2) ptn[c2-1] = level;
c1 = c2;
}
for (i = cell1; i <= cell2; ++i)
workperm[bucket[count[i]]++] = lab[i];
for (i = cell1; i <= cell2; ++i) lab[i] = workperm[i];
if (!ISELEMENT(active,cell1))
{
ADDELEMENT(active,cell1);
DELELEMENT(active,maxpos);
}
}
}
}
longcode = MASH(longcode,*numcells);
*code = CLEANUP(longcode);
}
#endif /* else case of MAXM==1 */
/*****************************************************************************
* *
* refine1(g,lab,ptn,level,numcells,count,active,code,m,n) is the same as *
* refine(g,lab,ptn,level,numcells,count,active,code,m,n), except that *
* m==1 is assumed for greater efficiency. The results are identical in all *
* respects. See refine (above) for the specs. *
* *
*****************************************************************************/
void
refine1(graph *g, int *lab, int *ptn, int level, int *numcells,
permutation *count, set *active, int *code, int m, int n)
{
register int i,c1,c2,labc1;
register setword x;
int split1,split2,cell1,cell2;
int cnt,bmin,bmax;
long longcode;
set *gptr,workset0;
int maxcell,maxpos,hint;
#if !MAXN
DYNALLOC1(permutation,workperm,workperm_sz,n,"refine1");
DYNALLOC1(int,bucket,bucket_sz,n+2,"refine1");
#endif
longcode = *numcells;
hint = 0;
while (*numcells < n && ((split1 = hint, ISELEMENT1(active,split1))
|| (split1 = nextelement(active,1,split1)) >= 0
|| (split1 = nextelement(active,1,-1)) >= 0))
{
DELELEMENT1(active,split1);
for (split2 = split1; ptn[split2] > level; ++split2) {}
longcode = MASH(longcode,split1+split2);
if (split1 == split2) /* trivial splitting cell */
{
gptr = GRAPHROW(g,lab[split1],1);
for (cell1 = 0; cell1 < n; cell1 = cell2 + 1)
{
for (cell2 = cell1; ptn[cell2] > level; ++cell2) {}
if (cell1 == cell2) continue;
c1 = cell1;
c2 = cell2;
while (c1 <= c2)
{
labc1 = lab[c1];
if (ISELEMENT1(gptr,labc1))
++c1;
else
{
lab[c1] = lab[c2];
lab[c2] = labc1;
--c2;
}
}
if (c2 >= cell1 && c1 <= cell2)
{
ptn[c2] = level;
longcode = MASH(longcode,c2);
++*numcells;
if (ISELEMENT1(active,cell1) || c2-cell1 >= cell2-c1)
{
ADDELEMENT1(active,c1);
if (c1 == cell2) hint = c1;
}
else
{
ADDELEMENT1(active,cell1);
if (c2 == cell1) hint = cell1;
}
}
}
}
else /* nontrivial splitting cell */
{
workset0 = 0;
for (i = split1; i <= split2; ++i)
ADDELEMENT1(&workset0,lab[i]);
longcode = MASH(longcode,split2-split1+1);
for (cell1 = 0; cell1 < n; cell1 = cell2 + 1)
{
for (cell2 = cell1; ptn[cell2] > level; ++cell2) {}
if (cell1 == cell2) continue;
i = cell1;
if ((x = workset0 & g[lab[i]]) != 0)
cnt = POPCOUNT(x);
else
cnt = 0;
count[i] = bmin = bmax = cnt;
bucket[cnt] = 1;
while (++i <= cell2)
{
if ((x = workset0 & g[lab[i]]) != 0)
cnt = POPCOUNT(x);
else
cnt = 0;
while (bmin > cnt) bucket[--bmin] = 0;
while (bmax < cnt) bucket[++bmax] = 0;
++bucket[cnt];
count[i] = cnt;
}
if (bmin == bmax)
{
longcode = MASH(longcode,bmin+cell1);
continue;
}
c1 = cell1;
maxcell = -1;
maxpos=0; // only needed to silence gcc warning
for (i = bmin; i <= bmax; ++i)
if (bucket[i])
{
c2 = c1 + bucket[i];
bucket[i] = c1;
longcode = MASH(longcode,i+c1);
if (c2-c1 > maxcell)
{
maxcell = c2-c1;
maxpos = c1;
}
if (c1 != cell1)
{
ADDELEMENT1(active,c1);
if (c2-c1 == 1) hint = c1;
++*numcells;
}
if (c2 <= cell2) ptn[c2-1] = level;
c1 = c2;
}
for (i = cell1; i <= cell2; ++i)
workperm[bucket[count[i]]++] = lab[i];
for (i = cell1; i <= cell2; ++i) lab[i] = workperm[i];
if (!ISELEMENT1(active,cell1))
{
ADDELEMENT1(active,cell1);
DELELEMENT1(active,maxpos);
}
}
}
}
longcode = MASH(longcode,*numcells);
*code = CLEANUP(longcode);
}
/*****************************************************************************
* *
* cheapautom(ptn,level,digraph,n) returns TRUE if the partition at the *
* specified level in the partition nest (lab,ptn) {lab is not needed here} *
* satisfies a simple sufficient condition for its cells to be the orbits of *
* some subgroup of the automorphism group. Otherwise it returns FALSE. *
* It always returns FALSE if digraph!=FALSE. *
* *
* nauty assumes that this function will always return TRUE for any *
* partition finer than one for which it returns TRUE. *
* *
*****************************************************************************/
boolean
cheapautom(int *ptn, int level, boolean digraph, int n)
{
register int i,k,nnt;
if (digraph) return FALSE;
k = n;
nnt = 0;
for (i = 0; i < n; ++i)
{
--k;
if (ptn[i] > level)
{
++nnt;
while (ptn[++i] > level) {}
}
}
return (k <= nnt + 1 || k <= 4);
}
/*****************************************************************************
* *
* bestcell(g,lab,ptn,level,tc_level,m,n) returns the index in lab of the *
* start of the "best non-singleton cell" for fixing. If there is no *
* non-singleton cell it returns n. *
* This implementation finds the first cell which is non-trivially joined *
* to the greatest number of other cells. *
* *
* GLOBALS ACCESSED: bit<r>,workperm<rw>,workset<rw>,bucket<rw> *
* *
*****************************************************************************/
int
bestcell(graph *g, int *lab, int *ptn, int level, int tc_level, int m, int n)
{
register int i;
set *gp;
register setword setword1,setword2;
int v1,v2,nnt;
#if !MAXN
DYNALLOC1(permutation,workperm,workperm_sz,n,"refine");
DYNALLOC1(set,workset,workset_sz,m,"refine");
DYNALLOC1(int,bucket,bucket_sz,n+2,"refine");
#endif
/* find non-singleton cells: put starts in workperm[0..nnt-1] */
i = nnt = 0;
while (i < n)
{
if (ptn[i] > level)
{
workperm[nnt++] = i;
while (ptn[i] > level) ++i;
}
++i;
}
if (nnt == 0) return n;
/* set bucket[i] to # non-trivial neighbours of n.s. cell i */
for (i = nnt; --i >= 0;) bucket[i] = 0;
for (v2 = 1; v2 < nnt; ++v2)
{
EMPTYSET(workset,m);
i = workperm[v2] - 1;
do
{
++i;
ADDELEMENT(workset,lab[i]);
}
while (ptn[i] > level);
for (v1 = 0; v1 < v2; ++v1)
{
gp = GRAPHROW(g,lab[workperm[v1]],m);
#if MAXM==1
setword1 = *workset & *gp;
setword2 = *workset & ~*gp;
#else
setword1 = setword2 = 0;
for (i = m; --i >= 0;)
{
setword1 |= workset[i] & gp[i];
setword2 |= workset[i] & ~gp[i];
}
#endif
if (setword1 != 0 && setword2 != 0)
{
++bucket[v1];
++bucket[v2];
}
}
}
/* find first greatest bucket value */
v1 = 0;
v2 = bucket[0];
for (i = 1; i < nnt; ++i)
if (bucket[i] > v2)
{
v1 = i;
v2 = bucket[i];
}
return (int)workperm[v1];
}
/*****************************************************************************
* *
* naugraph_check() checks that this file is compiled compatibly with the *
* given parameters. If not, call exit(1). *
* *
*****************************************************************************/
void
naugraph_check(int wordsize, int m, int n, int version)
{
if (wordsize != WORDSIZE)
{
fprintf(ERRFILE,"Error: WORDSIZE mismatch in naugraph.c\n");
exit(1);
}
#if MAXN
if (m > MAXM)
{
fprintf(ERRFILE,"Error: MAXM inadequate in naugraph.c\n");
exit(1);
}
if (n > MAXN)
{
fprintf(ERRFILE,"Error: MAXN inadequate in naugraph.c\n");
exit(1);
}
#endif
#ifdef BIGNAUTY
if ((version & 1) == 0)
{
fprintf(ERRFILE,"Error: BIGNAUTY mismatch in naugraph.c\n");
exit(1);
}
#else
if ((version & 1) == 1)
{
fprintf(ERRFILE,"Error: BIGNAUTY mismatch in naugraph.c\n");
exit(1);
}
#endif
if (version < NAUTYREQUIRED)
{
fprintf(ERRFILE,"Error: naugraph.c version mismatch\n");
exit(1);
}
}
/*****************************************************************************
* *
* naugraph_freedyn() - free the dynamic memory in this module *
* *
*****************************************************************************/
void
naugraph_freedyn(void)
{
#if !MAXN
DYNFREE(workset,workset_sz);
DYNFREE(workperm,workperm_sz);
DYNFREE(bucket,bucket_sz);
#endif
}
|
trust_worthiness.h | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "knn.h"
#include <algorithm>
#include <iostream>
#include <vector>
double euclidian_dist(const std::vector<int>& x, const std::vector<int>& y)
{
double total = 0;
auto i = x.begin();
auto j = y.begin();
for (; i != x.end() && j != y.end(); ++i, ++j) total += pow(*i, 2) - 2 * *i * *j + pow(*j, 2);
return sqrt(total);
}
std::vector<std::vector<double>> pairwise_distances(const std::vector<std::vector<int>>& X)
{
std::vector<std::vector<double>> distance_matrix(X.size(), std::vector<double>(X[0].size()));
#pragma omp parallel for
for (size_t i = 0; i < X.size(); ++i) {
for (size_t j = 0; j < i; ++j) {
const float val = euclidian_dist(X[i], X[j]);
distance_matrix[i][j] = val;
distance_matrix[j][i] = val;
}
}
return distance_matrix;
}
template <typename Iter, typename Compare>
std::vector<int> argsort(Iter begin, Iter end, Compare comp)
{
std::vector<std::pair<int, Iter>> pairList;
std::vector<int> ret;
int i = 0;
for (auto it = begin; it < end; it++) {
std::pair<int, Iter> pair(i, it);
pairList.push_back(pair);
i++;
}
std::stable_sort(pairList.begin(),
pairList.end(),
[comp](std::pair<int, Iter> prev, std::pair<int, Iter> next) -> bool {
return comp(*prev.second, *next.second);
});
for (auto i : pairList) ret.push_back(i.first);
return ret;
}
void fill_diag(std::vector<std::vector<double>>& X)
{
for (size_t i = 0; i < X.size(); ++i) {
for (size_t j = 0; j < X[i].size(); ++j) {
if (i == j) X[i][j] = INFINITY;
}
}
}
std::vector<std::vector<int>> get_knn_indices(const std::vector<std::vector<double>>& X,
const int k)
{
std::list<point> X_list;
for (size_t i = 0; i < X.size(); ++i) {
point p(X[i]);
X_list.push_back(p);
}
std::vector<std::vector<int>> ind_X_embedded;
for (auto i = X_list.begin(); i != X_list.end(); ++i) {
auto temp = knn_classify(X_list, *i, k);
ind_X_embedded.push_back(temp);
}
return ind_X_embedded;
}
double compute_rank(const std::vector<std::vector<int>>& ind_X,
std::vector<std::vector<int>>& ind_X_embedded,
const int k)
{
const auto n = ind_X.size();
auto rank = 0;
for (size_t i = 0; i < n; ++i) {
std::vector<int> ranks(k, 0);
for (auto j = 0; j < k; ++j) {
auto it = std::find(ind_X[i].begin(), ind_X[i].end(), ind_X_embedded[i][j]);
if (it != ind_X[i].end()) {
auto idx = std::distance(ind_X[i].begin(), it);
ranks[j] = idx;
}
}
for (auto& val : ranks) val -= k;
for (const auto& val : ranks)
if (val > 0) rank += val;
}
return rank;
}
template <typename T>
void print_matrix(const std::vector<std::vector<T>>& matrix)
{
for (size_t i = 0; i < matrix.size(); ++i) {
std::cout << "[ ";
for (size_t j = 0; j < matrix[i].size(); ++j) { std::cout << matrix[i][j] << ' '; }
std::cout << "]\n";
}
}
double trustworthiness_score(const std::vector<std::vector<int>>& X,
const std::vector<std::vector<double>>& Y,
int n,
int d,
int k)
{
auto dist_X = pairwise_distances(X);
fill_diag(dist_X);
std::vector<std::vector<int>> ind_X;
for (size_t i = 0; i < dist_X.size(); ++i) {
auto tmp = argsort(dist_X[i].begin(), dist_X[i].end(), std::less<double>());
ind_X.push_back(tmp);
}
auto ind_X_embedded = get_knn_indices(Y, k);
double t = compute_rank(ind_X, ind_X_embedded, k);
t = 1.0 - t * (2.0 / (n * k * (2.0 * n - 3.0 * k - 1.0)));
return t;
}
|
templatemath.h | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
/*
* templatemath.h
*
* Created on: Jan 1, 2016
* Author: agibsonccc
*/
#ifndef TEMPLATEMATH_H_
#define TEMPLATEMATH_H_
#include <math.h>
#include <cmath>
#include <dll.h>
#include <pointercast.h>
#define HALF_MAX_VALUE 65504.
#define FLOAT_MAX_VALUE 3.4028235E38
#define DOUBLE_MAX_VALUE 1.7976931348623157E308
#define FLOAT_MIN_NORMAL 1.17549435e-38
#ifndef M_E
#define M_E 2.718281828459
#endif
#ifdef __CUDACC__
#include <types/float16.h>
#define math_def __host__ __device__
#ifdef CUDA_9
struct HALFS{
half H;
half L;
__host__ __device__
HALFS() {};
__host__ __device__
~HALFS() {};
};
union PAIR {
HALFS B;
int W;
__host__ __device__
PAIR() {};
__host__ __device__
~PAIR(){}
};
#else
typedef union {
struct {
half H;
half L;
} B;
int W;
} PAIR;
#endif // cuda_9
#else
#define math_def
#include <types/float16.h>
#endif
namespace nd4j {
#ifdef __CUDACC__
#endif
namespace math {
template<typename T>
math_def inline T nd4j_abs(T value);
template<typename T>
math_def inline void nd4j_swap(T &val1, T &val2);
template<typename T>
math_def inline T nd4j_max(T val1, T val2);
template<typename T>
math_def inline T nd4j_min(T val1, T val2);
template<typename T>
math_def inline T nd4j_re(T val1, T val2);
template<typename T>
math_def inline T nd4j_rint(T val1);
template<typename T>
math_def inline T nd4j_copysign(T val1, T val2);
//#ifndef __CUDACC__
template<typename T>
math_def inline T nd4j_dot(T *x, T *y, int length);
//#endif
template<typename T>
math_def inline T nd4j_ceil(T val1);
template<typename T>
math_def inline bool nd4j_isnan(T val1);
template<typename T>
math_def inline bool nd4j_isinf(T val1);
template<typename T>
math_def inline bool nd4j_isfin(T val1);
template<typename T>
math_def inline T nd4j_cos(T val);
template<typename T>
math_def inline T nd4j_cosh(T val);
template<typename T>
math_def inline T nd4j_exp(T val);
template<typename T>
math_def inline T nd4j_floor(T val);
template<typename T>
math_def inline T nd4j_log(T val);
template<typename T>
math_def inline T nd4j_pow(T val, T val2);
template<typename T>
math_def inline T nd4j_round(T val);
template<typename T>
math_def inline T nd4j_remainder(T num, T denom);
template<typename T>
math_def inline T nd4j_fmod(T num, T denom);
template<typename T>
math_def inline T nd4j_erf(T num);
template<typename T>
math_def inline T nd4j_erfc(T num);
template<typename T>
math_def inline T nd4j_sigmoid(T val) {
return (T) 1.0 / ((T) 1.0 + nd4j_exp<T>(-val));
}
template<typename T>
math_def inline T nd4j_elu(T val) {
if (val >= (T) 0.0) return val;
else return nd4j_exp<T>(val) - (T) 1.0;
//return val >= 0.0 ? val : (nd4j_exp<T>(val) - 1.0);
}
template<typename T>
math_def inline T nd4j_leakyrelu(T val,T alpha) {
if (val < (T) 0.0f) return alpha * val;
else return val;
//return val < 0 ? alpha * val : val;
}
template<typename T>
math_def inline T nd4j_eluderivative(T val) {
if (val >= (T) 0.0f) return (T) 1.0f;
else return nd4j_exp<T>(val);
//return val >= 0.0 ? 1.0 : nd4j_exp(val);
}
template<typename T>
math_def inline T nd4j_sin(T val);
template<typename T>
math_def inline T nd4j_sinh(T val);
template<typename T>
math_def inline T softplus(T val) {
return nd4j_log<T>((T) 1.0f + nd4j_exp<T>(val));
}
template<typename T>
math_def inline T nd4j_softsign(T val) {
return val / ((T) 1.0f + nd4j::math::nd4j_abs<T>(val));
}
template<typename T>
math_def inline T nd4j_sqrt(T val);
template<typename T>
math_def inline T nd4j_tanh(T val);
template<typename T>
math_def inline T nd4j_tan(T val);
template<typename T>
math_def inline T nd4j_atan2(T val1, T val2);
template<>
math_def inline float16 nd4j_atan2<float16>(float16 value1, float16 value2) {
return (float16) atan2f((float) value1, (float) value2);
}
template<>
math_def inline float nd4j_atan2<float>(float value1, float value2) {
return atan2f(value1, value2);
}
template<>
math_def inline double nd4j_atan2<double>(double value1, double value2) {
return atan2(value1, value2);
}
template<typename T>
math_def inline T nd4j_tan(T val) {
return nd4j_log((val + 1 / (1 - val)) * 0.5);
}
template<typename T>
math_def inline T nd4j_tanhderivative(T val) {
T tanh = nd4j_tanh(val);
return (T) 1.0f - tanh * tanh;
}
template<typename T>
math_def inline T nd4j_sigmoidderivative(T val) {
T sigmoid = nd4j_sigmoid(val);
T out = sigmoid * ((T) 1.0f - sigmoid);
return out;
}
template<typename T>
math_def inline T nd4j_softsignderivative(T val) {
T y = (T) 1.0f + nd4j_abs(val);
return (T) 1.0f / (y * y);
}
template<typename T>
math_def inline T nd4j_sgn(T val) {
return val < (T) 0.0f ? (T) -1.0f : val > (T) 0.0f ? (T) 1.0f : (T) 0.0f;
}
template<typename T>
math_def inline T nd4j_sign(T val) {
return nd4j_sgn<T>(val);
}
template<typename T>
math_def inline T nd4j_signum(T val) {
return nd4j_sgn<T>(val);
}
//#ifndef __CUDACC__
template<>
math_def inline float16 nd4j_dot<float16>(float16 *x, float16 *y, int length) {
float16 dot = (float16) 0.0f;
// TODO: since we can't use simd on unions, we might use something else here.
for(int e = 0; e < length; e++) {
dot += x[e] * y[e];
}
return dot;
}
template<typename T>
math_def inline T nd4j_dot(T *x, T *y, int length) {
T dot = (T) 0.0f;
#pragma omp simd reduction(+:dot)
for(int e = 0; e < length; e++) {
dot += x[e] * y[e];
}
return dot;
}
//#endif
template<typename T>
math_def inline T nd4j_acos(T val);
template<typename T>
math_def inline T nd4j_acosh(T val);
template<typename T>
math_def inline T nd4j_asin(T val);
template<typename T>
math_def inline T nd4j_asinh(T val);
template<typename T>
math_def inline T nd4j_asinh(T val) {
//Math.log(Math.sqrt(Math.pow(x, 2) + 1) + x)
return nd4j_log(nd4j_sqrt(nd4j_pow(val, (T) 2) + (T) 1) + val);
}
template<typename T>
math_def inline T nd4j_atan(T val);
template<typename T>
math_def inline T nd4j_atanh(T val);
template<>
math_def inline float16 nd4j_abs<float16>(float16 value) {
#ifdef NATIVE_HALFS
if (value < (float16) 0.f) {
return float16(__hneg(value.data));
} else
return value;
#else
return (float16) fabsf((float) value);
#endif
}
template<>
math_def inline float nd4j_abs<float>(float value) {
return fabsf(value);
}
template<>
math_def inline double nd4j_abs<double>(double value) {
return fabs(value);
}
template<>
math_def inline int nd4j_abs<int>(int value) {
return abs(value);
}
template<>
math_def inline Nd4jLong nd4j_abs<Nd4jLong>(Nd4jLong value) {
return llabs(value);
}
template<>
math_def inline float16 nd4j_rint<float16>(float16 value) {
return (float16) rintf((float) value);
}
template<>
math_def inline float nd4j_rint<float>(float value) {
return rintf(value);
}
template<>
math_def inline double nd4j_rint<double>(double value) {
return rint(value);
}
template<>
math_def inline int nd4j_rint<int>(int value) {
return value;
}
template<>
math_def inline Nd4jLong nd4j_rint<Nd4jLong>(Nd4jLong value) {
return value;
}
template<>
math_def inline bool nd4j_isnan<float16>(float16 value) {
return *(value.data.getXP()) == 0x7fffU;
}
template<>
math_def inline bool nd4j_isnan<float>(float value) {
return value != value;
}
template<>
math_def inline bool nd4j_isnan<double>(double value) {
return value != value;
}
template<>
math_def inline bool nd4j_isnan<int>(int value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<Nd4jLong>(Nd4jLong value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<float16>(float16 value) {
return value < (float16) -HALF_MAX_VALUE || value > (float16) HALF_MAX_VALUE;
}
template<>
math_def inline bool nd4j_isinf<float>(float value) {
#ifdef __CUDACC__
return isinf(value);
#else
return std::isinf(value);
#endif
//return value < -FLOAT_MAX_VALUE || value > FLOAT_MAX_VALUE;
}
template<>
math_def inline bool nd4j_isinf<double>(double value) {
#ifdef __CUDACC__
return isinf(value);
#else
return std::isinf(value);
#endif
//return value < -DOUBLE_MAX_VALUE || value > DOUBLE_MAX_VALUE;
}
template<>
math_def inline bool nd4j_isinf<int>(int value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<Nd4jLong>(Nd4jLong value) {
return false;
}
template<typename T>
math_def inline bool nd4j_isfin(T value) {
return !nd4j_isnan<T>(value) && !nd4j_isinf<T>(value);
}
template<>
math_def inline float16 nd4j_copysign<float16>(float16 val1, float16 val2) {
return (float16) copysignf((float) val1, (float) val2);
}
template<>
math_def inline float nd4j_copysign<float>(float val1, float val2) {
return copysignf(val1, val2);
}
template<>
math_def inline double nd4j_copysign<double>(double val1, double val2) {
return copysign(val1, val2);
}
template<>
math_def inline int nd4j_copysign<int>(int val1, int val2) {
if (val2 < 0) return -(nd4j_abs<int>(val1));
else return nd4j_abs<int>(val1);
}
template<>
math_def inline Nd4jLong nd4j_copysign<Nd4jLong>(Nd4jLong val1, Nd4jLong val2) {
if (val2 < 0) return -(nd4j_abs<Nd4jLong>(val1));
else return nd4j_abs<Nd4jLong>(val1);
}
template<>
math_def inline float16 nd4j_max<float16>(float16 val1, float16 val2) {
return val1 > val2 ? val1 : val2;
}
template<>
math_def inline float nd4j_max<float>(float val1, float val2) {
return val1 > val2 ? val1 : val2;
}
template<>
math_def inline double nd4j_max<double>(double val1, double val2) {
return val1 > val2 ? val1 : val2;
}
template<>
math_def inline int nd4j_max<int>(int val1, int val2) {
return val1 > val2 ? val1 : val2;
}
template<>
math_def inline Nd4jLong nd4j_max<Nd4jLong>(Nd4jLong val1, Nd4jLong val2) {
return val1 > val2 ? val1 : val2;
}
template<>
math_def inline Nd4jLong nd4j_min<Nd4jLong>(Nd4jLong val1, Nd4jLong val2) {
return val1 < val2 ? val1 : val2;
}
template<>
math_def inline float16 nd4j_min<float16>(float16 val1, float16 val2) {
return val1 < val2 ? val1 : val2;
}
template<>
math_def inline float nd4j_min<float>(float val1, float val2) {
return val1 < val2 ? val1 : val2;
}
template<>
math_def inline double nd4j_min<double>(double val1, double val2) {
return val1 < val2 ? val1 : val2;
}
template<>
math_def inline int nd4j_min<int>(int val1, int val2) {
return val1 < val2 ? val1 : val2;
}
template<>
math_def inline float16 nd4j_ceil<float16>(float16 val) {
#ifdef NATIVE_HALFS
return hceil(val.data);
#else
return ceilf((float) val);
#endif
}
template<>
math_def inline float nd4j_ceil<float>(float val1) {
return ceilf(val1);
}
template<>
math_def inline double nd4j_ceil<double>(double val) {
return ceil(val);
}
template<>
math_def inline int nd4j_ceil<int>(int val) {
return ceil((float) val);
}
template<>
math_def inline float16 nd4j_cos<float16>(float16 val) {
#ifdef NATIVE_HALFS
return hcos(val.data);
#else
return cosf((float) val);
#endif
}
template<>
math_def inline float nd4j_cos<float>(float val) {
return cosf(val);
}
template<>
math_def inline double nd4j_cos<double>(double val) {
return cos(val);
}
template<>
math_def inline int nd4j_cos<int>(int val) {
return cosf((float) val);
}
template<>
math_def inline float16 nd4j_cosh<float16>(float16 val) {
return coshf((float) val);
}
template<>
math_def inline float nd4j_cosh<float>(float val) {
return coshf(val);
}
template<>
math_def inline double nd4j_cosh<double>(double val) {
return cosh(val);
}
template<>
math_def inline int nd4j_cosh<int>(int val) {
return coshf((float) val);
}
template<>
math_def inline float16 nd4j_exp<float16>(float16 val) {
#ifdef NATIVE_HALFS
return hexp(val.data);
#else
return (float16) expf((float) val);
#endif
}
template<>
math_def inline float nd4j_exp<float>(float val) {
return expf(val);
}
template<>
math_def inline double nd4j_exp<double>(double val) {
return exp(val);
}
template<>
math_def inline int nd4j_exp<int>(int val) {
return expf((float) val);
}
template<>
math_def inline float16 nd4j_floor<float16>(float16 val) {
#ifdef NATIVE_HALFS
return hfloor(val.data);
#else
return (float16) floorf((float) val);
#endif
}
template<>
math_def inline float nd4j_floor<float>(float val) {
return floorf(val);
}
template<>
math_def inline double nd4j_floor<double>(double val) {
return floor(val);
}
template<>
math_def inline int nd4j_floor<int>(int val) {
return floorf((float) val);
}
template<>
math_def inline float16 nd4j_log<float16>(float16 val) {
#ifdef NATIVE_HALFS
return hlog(val.data);
#else
return (float16) logf((float) val);
#endif
}
template<>
math_def inline float nd4j_log<float>(float val) {
return logf(val);
}
template<>
math_def inline double nd4j_log<double>(double val) {
return log(val);
}
template<>
math_def inline int nd4j_log<int>(int val) {
return logf((int) val);
}
template<>
math_def inline float16 nd4j_pow<float16>(float16 val, float16 val2) {
return (float16) powf((float) val, (float) val2);
}
template<>
math_def inline float nd4j_pow<float>(float val, float val2) {
return powf(val, val2);
}
template<>
math_def inline double nd4j_pow<double>(double val, double val2) {
return pow(val, val2);
}
template<>
math_def inline int nd4j_pow<int>(int val, int val2) {
return powf((float) val, (float) val2);
}
template<typename T>
math_def inline T nd4j_re(T val1, T val2) {
if (val1 == (T) 0.0f && val2 == (T) 0.0f)
return (T) 0.0f;
return nd4j_abs<T>(val1 - val2) / (nd4j_abs<T>(val1) + nd4j_abs<T>(val2));
}
template<>
math_def inline float16 nd4j_round<float16>(float16 val) {
return (float16) roundf((float) val);
}
template<>
math_def inline float nd4j_round<float>(float val) {
return roundf(val);
}
template<>
math_def inline float nd4j_remainder<float>(float num, float denom) {
return remainderf(num, denom);
}
template<>
math_def inline double nd4j_remainder<double>(double num, double denom) {
return remainder(num, denom);
}
template<>
math_def inline float16 nd4j_remainder<float16>(float16 num, float16 denom) {
return (float16) remainderf((float) num, (float) denom);
}
template<>
math_def inline float nd4j_fmod<float>(float num, float denom) {
return fmodf(num, denom);
}
template<>
math_def inline double nd4j_fmod<double>(double num, double denom) {
return fmod(num, denom);
}
template<>
math_def inline float16 nd4j_fmod<float16>(float16 num, float16 denom) {
return (float16) fmodf((float) num, (float) denom);
}
template<>
math_def inline float nd4j_erf<float>(float num) {
return erff(num);
}
template<>
math_def inline double nd4j_erf<double>(double num) {
return erf(num);
}
template<>
math_def inline float16 nd4j_erf<float16>(float16 num) {
return (float16) erff((float) num);
}
template<>
math_def inline float nd4j_erfc<float>(float num) {
return erfcf(num);
}
template<>
math_def inline double nd4j_erfc<double>(double num) {
return erfc(num);
}
template<>
math_def inline float16 nd4j_erfc<float16>(float16 num) {
return (float16) erfcf((float) num);
}
template<>
math_def inline double nd4j_round<double>(double val) {
return round(val);
}
template<>
math_def inline int nd4j_round<int>(int val) {
return round((float) val);
}
template<>
math_def inline float16 nd4j_sin<float16>(float16 val) {
#ifdef NATIVE_HALFS
return hsin(val.data);
#else
return (float16) sinf((float) val);
#endif
}
template<>
math_def inline float nd4j_sin<float>(float val) {
return sinf(val);
}
template<>
math_def inline double nd4j_sin<double>(double val) {
return sin(val);
}
template<>
math_def inline int nd4j_sin<int>(int val) {
return sin((float) val);
}
template<>
math_def inline float16 nd4j_sinh<float16>(float16 val) {
#ifdef NATIVE_HALFS
return hsin(val.data);
#else
return (float16) sinh((float) val);
#endif
}
template<>
math_def inline float nd4j_sinh<float>(float val) {
return sinhf(val);
}
template<>
math_def inline double nd4j_sinh<double>(double val) {
return sinh(val);
}
template<>
math_def inline int nd4j_sinh<int>(int val) {
return sinhf((float) val);
}
template<>
math_def inline float16 nd4j_sqrt<float16>(float16 val) {
#ifdef NATIVE_HALFS
return hsqrt(val.data);
#else
return (float16) sqrtf((float) val);
#endif
}
template<>
math_def inline float nd4j_sqrt<float>(float val) {
return sqrtf(val);
}
template<>
math_def inline double nd4j_sqrt<double>(double val) {
return sqrt(val);
}
template<>
math_def inline int nd4j_sqrt<int>(int val) {
return sqrtf((float) val);
}
template<>
math_def inline float16 nd4j_tanh<float16>(float16 val) {
return (float16) tanhf((float) val);
}
template<>
math_def inline float nd4j_tanh<float>(float val) {
return tanhf(val);
}
template<>
math_def inline double nd4j_tanh<double>(double val) {
return tanh(val);
}
template<>
math_def inline int nd4j_tanh<int>(int val) {
return tanhf((float) val);
}
template<>
math_def inline float16 nd4j_tan<float16>(float16 val) {
return (float16) tanf((float) val);
}
template<>
math_def inline float nd4j_tan<float>(float val) {
return tanf(val);
}
template<>
math_def inline double nd4j_tan<double>(double val) {
return tan(val);
}
template<>
math_def inline int nd4j_tan<int>(int val) {
return tanf((float) val);
}
template<>
math_def inline float16 nd4j_acos<float16>(float16 val) {
return (float16) acosf((float) val);
}
template<>
math_def inline float nd4j_acos<float>(float val) {
return acosf(val);
}
template<>
math_def inline double nd4j_acos<double>(double val) {
return acos(val);
}
template<>
math_def inline int nd4j_acos<int>(int val) {
return acosf((float) val);
}
template<>
math_def inline float16 nd4j_acosh<float16>(float16 val) {
return (float16) acoshf((float) val);
}
template<>
math_def inline float nd4j_acosh<float>(float val) {
return acoshf(val);
}
template<>
math_def inline double nd4j_acosh<double>(double val) {
return acosh(val);
}
template<>
math_def inline int nd4j_acosh<int>(int val) {
return acoshf((float) val);
}
template<>
math_def inline float16 nd4j_asin<float16>(float16 val) {
return (float16) asinf((float) val);
}
template<>
math_def inline float nd4j_asin<float>(float val) {
return asinf(val);
}
template<>
math_def inline double nd4j_asin<double>(double val) {
return asin(val);
}
template<>
math_def inline int nd4j_asin<int>(int val) {
return asinf((float) val);
}
template<>
math_def inline float16 nd4j_atan<float16>(float16 val) {
return (float16) atanf((float)val);
}
template<>
math_def inline float nd4j_atan<float>(float val) {
return atanf(val);
}
template<>
math_def inline double nd4j_atan<double>(double val) {
return atan(val);
}
template<>
math_def inline int nd4j_atan<int>(int val) {
return atanf((float) val);
}
template<>
math_def inline float16 nd4j_atanh<float16>(float16 val) {
return (float16) atanhf((float)val);
}
template<>
math_def inline float nd4j_atanh<float>(float val) {
return atanhf(val);
}
template<>
math_def inline double nd4j_atanh<double>(double val) {
return atanh(val);
}
template<>
math_def inline int nd4j_atanh<int>(int val) {
return atanhf((float) val);
}
template<typename T>
math_def inline void nd4j_swap(T &val1, T &val2) {
T temp = val1; val1=val2; val2=temp;
};
#ifdef __CUDACC__
namespace atomics {
template <typename T>
inline __device__ T nd4j_atomicAdd(T* address, T val);
template <typename T>
inline __device__ T nd4j_atomicSub(T* address, T val);
template <typename T>
inline __device__ T nd4j_atomicMul(T* address, T val);
template <typename T>
inline __device__ T nd4j_atomicDiv(T* address, T val);
template <>
inline __device__ double nd4j_atomicAdd<double>(double* address, double val) {
unsigned long long int* address_as_ull =
(unsigned long long int *) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
template <>
inline __device__ float16 nd4j_atomicAdd<float16>(float16* address, float16 val) {
int* address_as_ull = (int*) address;
long addr = (long) address;
bool misaligned = addr & 0x3;
if (misaligned)
address_as_ull = (int *) (addr - 2);
PAIR old, assumed, fresh;
old.W = *address_as_ull;
do {
if (!misaligned) {
float16 res = ((float16) old.B.H) + val;
fresh.B.H = res.data;
fresh.B.L = old.B.L;
} else {
float16 res = ((float16) old.B.L) + val;
fresh.B.L = res.data;
fresh.B.H = old.B.H;
}
assumed.W = old.W;
old.W = atomicCAS(address_as_ull, assumed.W, fresh.W);
} while (assumed.W != old.W);
if (!misaligned) return old.B.H;
else return old.B.L;
}
template <>
inline __device__ double nd4j_atomicSub<double>(double* address, double val) {
unsigned long long int* address_as_ull =
(unsigned long long int *) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val -
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
template <>
inline __device__ double nd4j_atomicMul<double>(double* address, double val) {
unsigned long long int* address_as_ull =
(unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val *
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
template <>
inline __device__ double nd4j_atomicDiv<double>(double* address, double val) {
unsigned long long int* address_as_ull =
(unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val /
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
template <>
inline __device__ float nd4j_atomicAdd<float>(float* address, float val) {
return atomicAdd(address,val);
}
template <>
inline __device__ float nd4j_atomicSub<float>(float* address, float val) {
int* address_as_ull = (int*) address;
int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __float_as_int(val -
__float_as_int(assumed)));
} while (assumed != old);
return __int_as_float(old);
}
template <>
inline __device__ float nd4j_atomicMul<float>(float* address, float val) {
int* address_as_ull =
( int*)address;
int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __float_as_int(val *
__float_as_int(assumed)));
} while (assumed != old);
return __int_as_float(old);
}
template <>
inline __device__ float nd4j_atomicDiv<float>(float* address, float val) {
int* address_as_ull =
(int*)address;
int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __float_as_int(val *
__float_as_int(assumed)));
} while (assumed != old);
return __int_as_float(old);
}
}
#endif
}
}
#endif /* TEMPLATEMATH_H_ */ |
segment.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS EEEEE GGGG M M EEEEE N N TTTTT %
% SS E G MM MM E NN N T %
% SSS EEE G GGG M M M EEE N N N T %
% SS E G G M M E N NN T %
% SSSSS EEEEE GGGG M M EEEEE N N T %
% %
% %
% MagickCore Methods to Segment an Image with Thresholding Fuzzy c-Means %
% %
% Software Design %
% Cristy %
% April 1993 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Segment segments an image by analyzing the histograms of the color
% components and identifying units that are homogeneous with the fuzzy
% c-means technique. The scale-space filter analyzes the histograms of
% the three color components of the image and identifies a set of
% classes. The extents of each class is used to coarsely segment the
% image with thresholding. The color associated with each class is
% determined by the mean color of all pixels within the extents of a
% particular class. Finally, any unclassified pixels are assigned to
% the closest class with the fuzzy c-means technique.
%
% The fuzzy c-Means algorithm can be summarized as follows:
%
% o Build a histogram, one for each color component of the image.
%
% o For each histogram, successively apply the scale-space filter and
% build an interval tree of zero crossings in the second derivative
% at each scale. Analyze this scale-space ''fingerprint'' to
% determine which peaks and valleys in the histogram are most
% predominant.
%
% o The fingerprint defines intervals on the axis of the histogram.
% Each interval contains either a minima or a maxima in the original
% signal. If each color component lies within the maxima interval,
% that pixel is considered ''classified'' and is assigned an unique
% class number.
%
% o Any pixel that fails to be classified in the above thresholding
% pass is classified using the fuzzy c-Means technique. It is
% assigned to one of the classes discovered in the histogram analysis
% phase.
%
% The fuzzy c-Means technique attempts to cluster a pixel by finding
% the local minima of the generalized within group sum of squared error
% objective function. A pixel is assigned to the closest class of
% which the fuzzy membership has a maximum value.
%
% Segment is strongly based on software written by Andy Gallo,
% University of Delaware.
%
% The following reference was used in creating this program:
%
% Young Won Lim, Sang Uk Lee, "On The Color Image Segmentation
% Algorithm Based on the Thresholding and the Fuzzy c-Means
% Techniques", Pattern Recognition, Volume 23, Number 9, pages
% 935-952, 1990.
%
%
*/
#include "MagickCore/studio.h"
#include "MagickCore/cache.h"
#include "MagickCore/color.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
/*
Define declarations.
*/
#define MaxDimension 3
#define DeltaTau 0.5f
#if defined(FastClassify)
#define WeightingExponent 2.0
#define SegmentPower(ratio) (ratio)
#else
#define WeightingExponent 2.5
#define SegmentPower(ratio) pow(ratio,(double) (1.0/(weighting_exponent-1.0)));
#endif
#define Tau 5.2f
/*
Typedef declarations.
*/
typedef struct _ExtentPacket
{
double
center;
ssize_t
index,
left,
right;
} ExtentPacket;
typedef struct _Cluster
{
struct _Cluster
*next;
ExtentPacket
red,
green,
blue;
ssize_t
count,
id;
} Cluster;
typedef struct _IntervalTree
{
double
tau;
ssize_t
left,
right;
double
mean_stability,
stability;
struct _IntervalTree
*sibling,
*child;
} IntervalTree;
typedef struct _ZeroCrossing
{
double
tau,
histogram[256];
short
crossings[256];
} ZeroCrossing;
/*
Constant declarations.
*/
static const int
Blue = 2,
Green = 1,
Red = 0,
SafeMargin = 3,
TreeLength = 600;
/*
Method prototypes.
*/
static double
OptimalTau(const ssize_t *,const double,const double,const double,
const double,short *);
static ssize_t
DefineRegion(const short *,ExtentPacket *);
static void
InitializeHistogram(const Image *,ssize_t **,ExceptionInfo *),
ScaleSpace(const ssize_t *,const double,double *),
ZeroCrossHistogram(double *,const double,short *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Classify() defines one or more classes. Each pixel is thresholded to
% determine which class it belongs to. If the class is not identified it is
% assigned to the closest class based on the fuzzy c-Means technique.
%
% The format of the Classify method is:
%
% MagickBooleanType Classify(Image *image,short **extrema,
% const double cluster_threshold,
% const double weighting_exponent,
% const MagickBooleanType verbose,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
% o cluster_threshold: This double represents the minimum number of
% pixels contained in a hexahedra before it can be considered valid
% (expressed as a percentage).
%
% o weighting_exponent: Specifies the membership weighting exponent.
%
% o verbose: A value greater than zero prints detailed information about
% the identified classes.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType Classify(Image *image,short **extrema,
const double cluster_threshold,
const double weighting_exponent,const MagickBooleanType verbose,
ExceptionInfo *exception)
{
#define SegmentImageTag "Segment/Image"
CacheView
*image_view;
Cluster
*cluster,
*head,
*last_cluster,
*next_cluster;
ExtentPacket
blue,
green,
red;
MagickOffsetType
progress;
double
*free_squares;
MagickStatusType
status;
register ssize_t
i;
register double
*squares;
size_t
number_clusters;
ssize_t
count,
y;
/*
Form clusters.
*/
cluster=(Cluster *) NULL;
head=(Cluster *) NULL;
(void) ResetMagickMemory(&red,0,sizeof(red));
(void) ResetMagickMemory(&green,0,sizeof(green));
(void) ResetMagickMemory(&blue,0,sizeof(blue));
while (DefineRegion(extrema[Red],&red) != 0)
{
green.index=0;
while (DefineRegion(extrema[Green],&green) != 0)
{
blue.index=0;
while (DefineRegion(extrema[Blue],&blue) != 0)
{
/*
Allocate a new class.
*/
if (head != (Cluster *) NULL)
{
cluster->next=(Cluster *) AcquireMagickMemory(
sizeof(*cluster->next));
cluster=cluster->next;
}
else
{
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
head=cluster;
}
if (cluster == (Cluster *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
}
}
}
if (head == (Cluster *) NULL)
{
/*
No classes were identified-- create one.
*/
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
if (cluster == (Cluster *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
head=cluster;
}
/*
Count the pixels for each cluster.
*/
status=MagickTrue;
count=0;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) >=
(cluster->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) <=
(cluster->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) >=
(cluster->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) <=
(cluster->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) >=
(cluster->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) <=
(cluster->blue.right+SafeMargin)))
{
/*
Count this pixel.
*/
count++;
cluster->red.center+=(double) ScaleQuantumToChar(
GetPixelRed(image,p));
cluster->green.center+=(double) ScaleQuantumToChar(
GetPixelGreen(image,p));
cluster->blue.center+=(double) ScaleQuantumToChar(
GetPixelBlue(image,p));
cluster->count++;
break;
}
p+=GetPixelChannels(image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_Classify)
#endif
proceed=SetImageProgress(image,SegmentImageTag,progress++,2*
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
/*
Remove clusters that do not meet minimum cluster threshold.
*/
count=0;
last_cluster=head;
next_cluster=head;
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
if ((cluster->count > 0) &&
(cluster->count >= (count*cluster_threshold/100.0)))
{
/*
Initialize cluster.
*/
cluster->id=count;
cluster->red.center/=cluster->count;
cluster->green.center/=cluster->count;
cluster->blue.center/=cluster->count;
count++;
last_cluster=cluster;
continue;
}
/*
Delete cluster.
*/
if (cluster == head)
head=next_cluster;
else
last_cluster->next=next_cluster;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
number_clusters=(size_t) count;
if (verbose != MagickFalse)
{
/*
Print cluster statistics.
*/
(void) FormatLocaleFile(stdout,"Fuzzy C-means Statistics\n");
(void) FormatLocaleFile(stdout,"===================\n\n");
(void) FormatLocaleFile(stdout,"\tCluster Threshold = %g\n",(double)
cluster_threshold);
(void) FormatLocaleFile(stdout,"\tWeighting Exponent = %g\n",(double)
weighting_exponent);
(void) FormatLocaleFile(stdout,"\tTotal Number of Clusters = %.20g\n\n",
(double) number_clusters);
/*
Print the total number of points per cluster.
*/
(void) FormatLocaleFile(stdout,"\n\nNumber of Vectors Per Cluster\n");
(void) FormatLocaleFile(stdout,"=============================\n\n");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
(void) FormatLocaleFile(stdout,"Cluster #%.20g = %.20g\n",(double)
cluster->id,(double) cluster->count);
/*
Print the cluster extents.
*/
(void) FormatLocaleFile(stdout,
"\n\n\nCluster Extents: (Vector Size: %d)\n",MaxDimension);
(void) FormatLocaleFile(stdout,"================");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
(void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double)
cluster->id);
(void) FormatLocaleFile(stdout,
"%.20g-%.20g %.20g-%.20g %.20g-%.20g\n",(double)
cluster->red.left,(double) cluster->red.right,(double)
cluster->green.left,(double) cluster->green.right,(double)
cluster->blue.left,(double) cluster->blue.right);
}
/*
Print the cluster center values.
*/
(void) FormatLocaleFile(stdout,
"\n\n\nCluster Center Values: (Vector Size: %d)\n",MaxDimension);
(void) FormatLocaleFile(stdout,"=====================");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
(void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double)
cluster->id);
(void) FormatLocaleFile(stdout,"%g %g %g\n",(double)
cluster->red.center,(double) cluster->green.center,(double)
cluster->blue.center);
}
(void) FormatLocaleFile(stdout,"\n");
}
if (number_clusters > 256)
ThrowBinaryException(ImageError,"TooManyClusters",image->filename);
/*
Speed up distance calculations.
*/
squares=(double *) AcquireQuantumMemory(513UL,sizeof(*squares));
if (squares == (double *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
squares+=255;
for (i=(-255); i <= 255; i++)
squares[i]=(double) i*(double) i;
/*
Allocate image colormap.
*/
if (AcquireImageColormap(image,number_clusters,exception) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
i=0;
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
image->colormap[i].red=(double) ScaleCharToQuantum((unsigned char)
(cluster->red.center+0.5));
image->colormap[i].green=(double) ScaleCharToQuantum((unsigned char)
(cluster->green.center+0.5));
image->colormap[i].blue=(double) ScaleCharToQuantum((unsigned char)
(cluster->blue.center+0.5));
i++;
}
/*
Do course grain classes.
*/
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Cluster
*cluster;
register const PixelInfo
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(image,0,q);
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,q)) >=
(cluster->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelRed(image,q)) <=
(cluster->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q)) >=
(cluster->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q)) <=
(cluster->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q)) >=
(cluster->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q)) <=
(cluster->blue.right+SafeMargin)))
{
/*
Classify this pixel.
*/
SetPixelIndex(image,(Quantum) cluster->id,q);
break;
}
}
if (cluster == (Cluster *) NULL)
{
double
distance_squared,
local_minima,
numerator,
ratio,
sum;
register ssize_t
j,
k;
/*
Compute fuzzy membership.
*/
local_minima=0.0;
for (j=0; j < (ssize_t) image->colors; j++)
{
sum=0.0;
p=image->colormap+j;
distance_squared=squares[(ssize_t) ScaleQuantumToChar(
GetPixelRed(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->red))]+squares[(ssize_t)
ScaleQuantumToChar(GetPixelGreen(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->green))]+squares[(ssize_t)
ScaleQuantumToChar(GetPixelBlue(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->blue))];
numerator=distance_squared;
for (k=0; k < (ssize_t) image->colors; k++)
{
p=image->colormap+k;
distance_squared=squares[(ssize_t) ScaleQuantumToChar(
GetPixelRed(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->red))]+squares[
(ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->green))]+squares[
(ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->blue))];
ratio=numerator/distance_squared;
sum+=SegmentPower(ratio);
}
if ((sum != 0.0) && ((1.0/sum) > local_minima))
{
/*
Classify this pixel.
*/
local_minima=1.0/sum;
SetPixelIndex(image,(Quantum) j,q);
}
}
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_Classify)
#endif
proceed=SetImageProgress(image,SegmentImageTag,progress++,
2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
status&=SyncImage(image,exception);
/*
Relinquish resources.
*/
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
squares-=255;
free_squares=squares;
free_squares=(double *) RelinquishMagickMemory(free_squares);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s o l i d a t e C r o s s i n g s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConsolidateCrossings() guarantees that an even number of zero crossings
% always lie between two crossings.
%
% The format of the ConsolidateCrossings method is:
%
% ConsolidateCrossings(ZeroCrossing *zero_crossing,
% const size_t number_crossings)
%
% A description of each parameter follows.
%
% o zero_crossing: Specifies an array of structures of type ZeroCrossing.
%
% o number_crossings: This size_t specifies the number of elements
% in the zero_crossing array.
%
*/
static void ConsolidateCrossings(ZeroCrossing *zero_crossing,
const size_t number_crossings)
{
register ssize_t
i,
j,
k,
l;
ssize_t
center,
correct,
count,
left,
right;
/*
Consolidate zero crossings.
*/
for (i=(ssize_t) number_crossings-1; i >= 0; i--)
for (j=0; j <= 255; j++)
{
if (zero_crossing[i].crossings[j] == 0)
continue;
/*
Find the entry that is closest to j and still preserves the
property that there are an even number of crossings between
intervals.
*/
for (k=j-1; k > 0; k--)
if (zero_crossing[i+1].crossings[k] != 0)
break;
left=MagickMax(k,0);
center=j;
for (k=j+1; k < 255; k++)
if (zero_crossing[i+1].crossings[k] != 0)
break;
right=MagickMin(k,255);
/*
K is the zero crossing just left of j.
*/
for (k=j-1; k > 0; k--)
if (zero_crossing[i].crossings[k] != 0)
break;
if (k < 0)
k=0;
/*
Check center for an even number of crossings between k and j.
*/
correct=(-1);
if (zero_crossing[i+1].crossings[j] != 0)
{
count=0;
for (l=k+1; l < center; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (center != k))
correct=center;
}
/*
Check left for an even number of crossings between k and j.
*/
if (correct == -1)
{
count=0;
for (l=k+1; l < left; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (left != k))
correct=left;
}
/*
Check right for an even number of crossings between k and j.
*/
if (correct == -1)
{
count=0;
for (l=k+1; l < right; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (right != k))
correct=right;
}
l=(ssize_t) zero_crossing[i].crossings[j];
zero_crossing[i].crossings[j]=0;
if (correct != -1)
zero_crossing[i].crossings[correct]=(short) l;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e R e g i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineRegion() defines the left and right boundaries of a peak region.
%
% The format of the DefineRegion method is:
%
% ssize_t DefineRegion(const short *extrema,ExtentPacket *extents)
%
% A description of each parameter follows.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
% o extents: This pointer to an ExtentPacket represent the extends
% of a particular peak or valley of a color component.
%
*/
static ssize_t DefineRegion(const short *extrema,ExtentPacket *extents)
{
/*
Initialize to default values.
*/
extents->left=0;
extents->center=0.0;
extents->right=255;
/*
Find the left side (maxima).
*/
for ( ; extents->index <= 255; extents->index++)
if (extrema[extents->index] > 0)
break;
if (extents->index > 255)
return(MagickFalse); /* no left side - no region exists */
extents->left=extents->index;
/*
Find the right side (minima).
*/
for ( ; extents->index <= 255; extents->index++)
if (extrema[extents->index] < 0)
break;
extents->right=extents->index-1;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e r i v a t i v e H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DerivativeHistogram() determines the derivative of the histogram using
% central differencing.
%
% The format of the DerivativeHistogram method is:
%
% DerivativeHistogram(const double *histogram,
% double *derivative)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of doubles representing the number
% of pixels for each intensity of a particular color component.
%
% o derivative: This array of doubles is initialized by
% DerivativeHistogram to the derivative of the histogram using central
% differencing.
%
*/
static void DerivativeHistogram(const double *histogram,
double *derivative)
{
register ssize_t
i,
n;
/*
Compute endpoints using second order polynomial interpolation.
*/
n=255;
derivative[0]=(-1.5*histogram[0]+2.0*histogram[1]-0.5*histogram[2]);
derivative[n]=(0.5*histogram[n-2]-2.0*histogram[n-1]+1.5*histogram[n]);
/*
Compute derivative using central differencing.
*/
for (i=1; i < n; i++)
derivative[i]=(histogram[i+1]-histogram[i-1])/2.0;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e D y n a m i c T h r e s h o l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDynamicThreshold() returns the dynamic threshold for an image.
%
% The format of the GetImageDynamicThreshold method is:
%
% MagickBooleanType GetImageDynamicThreshold(const Image *image,
% const double cluster_threshold,const double smooth_threshold,
% PixelInfo *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cluster_threshold: This double represents the minimum number of
% pixels contained in a hexahedra before it can be considered valid
% (expressed as a percentage).
%
% o smooth_threshold: the smoothing threshold eliminates noise in the second
% derivative of the histogram. As the value is increased, you can expect a
% smoother second derivative.
%
% o pixel: return the dynamic threshold here.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageDynamicThreshold(const Image *image,
const double cluster_threshold,const double smooth_threshold,
PixelInfo *pixel,ExceptionInfo *exception)
{
Cluster
*background,
*cluster,
*object,
*head,
*last_cluster,
*next_cluster;
ExtentPacket
blue,
green,
red;
MagickBooleanType
proceed;
double
threshold;
register const Quantum
*p;
register ssize_t
i,
x;
short
*extrema[MaxDimension];
ssize_t
count,
*histogram[MaxDimension],
y;
/*
Allocate histogram and extrema.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
GetPixelInfo(image,pixel);
for (i=0; i < MaxDimension; i++)
{
histogram[i]=(ssize_t *) AcquireQuantumMemory(256UL,sizeof(**histogram));
extrema[i]=(short *) AcquireQuantumMemory(256UL,sizeof(**histogram));
if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL))
{
for (i-- ; i >= 0; i--)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
}
/*
Initialize histogram.
*/
InitializeHistogram(image,histogram,exception);
(void) OptimalTau(histogram[Red],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Red]);
(void) OptimalTau(histogram[Green],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Green]);
(void) OptimalTau(histogram[Blue],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Blue]);
/*
Form clusters.
*/
cluster=(Cluster *) NULL;
head=(Cluster *) NULL;
(void) ResetMagickMemory(&red,0,sizeof(red));
(void) ResetMagickMemory(&green,0,sizeof(green));
(void) ResetMagickMemory(&blue,0,sizeof(blue));
while (DefineRegion(extrema[Red],&red) != 0)
{
green.index=0;
while (DefineRegion(extrema[Green],&green) != 0)
{
blue.index=0;
while (DefineRegion(extrema[Blue],&blue) != 0)
{
/*
Allocate a new class.
*/
if (head != (Cluster *) NULL)
{
cluster->next=(Cluster *) AcquireMagickMemory(
sizeof(*cluster->next));
cluster=cluster->next;
}
else
{
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
head=cluster;
}
if (cluster == (Cluster *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
return(MagickFalse);
}
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
}
}
}
if (head == (Cluster *) NULL)
{
/*
No classes were identified-- create one.
*/
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
if (cluster == (Cluster *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
head=cluster;
}
/*
Count the pixels for each cluster.
*/
count=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) >=
(cluster->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) <=
(cluster->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) >=
(cluster->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) <=
(cluster->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) >=
(cluster->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) <=
(cluster->blue.right+SafeMargin)))
{
/*
Count this pixel.
*/
count++;
cluster->red.center+=(double) ScaleQuantumToChar(
GetPixelRed(image,p));
cluster->green.center+=(double) ScaleQuantumToChar(
GetPixelGreen(image,p));
cluster->blue.center+=(double) ScaleQuantumToChar(
GetPixelBlue(image,p));
cluster->count++;
break;
}
p+=GetPixelChannels(image);
}
proceed=SetImageProgress(image,SegmentImageTag,(MagickOffsetType) y,
2*image->rows);
if (proceed == MagickFalse)
break;
}
/*
Remove clusters that do not meet minimum cluster threshold.
*/
count=0;
last_cluster=head;
next_cluster=head;
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
if ((cluster->count > 0) &&
(cluster->count >= (count*cluster_threshold/100.0)))
{
/*
Initialize cluster.
*/
cluster->id=count;
cluster->red.center/=cluster->count;
cluster->green.center/=cluster->count;
cluster->blue.center/=cluster->count;
count++;
last_cluster=cluster;
continue;
}
/*
Delete cluster.
*/
if (cluster == head)
head=next_cluster;
else
last_cluster->next=next_cluster;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
object=head;
background=head;
if (count > 1)
{
object=head->next;
for (cluster=object; cluster->next != (Cluster *) NULL; )
{
if (cluster->count < object->count)
object=cluster;
cluster=cluster->next;
}
background=head->next;
for (cluster=background; cluster->next != (Cluster *) NULL; )
{
if (cluster->count > background->count)
background=cluster;
cluster=cluster->next;
}
}
if (background != (Cluster *) NULL)
{
threshold=(background->red.center+object->red.center)/2.0;
pixel->red=(double) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
threshold=(background->green.center+object->green.center)/2.0;
pixel->green=(double) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
threshold=(background->blue.center+object->blue.center)/2.0;
pixel->blue=(double) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
}
/*
Relinquish resources.
*/
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
for (i=0; i < MaxDimension; i++)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I n i t i a l i z e H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InitializeHistogram() computes the histogram for an image.
%
% The format of the InitializeHistogram method is:
%
% InitializeHistogram(const Image *image,ssize_t **histogram)
%
% A description of each parameter follows.
%
% o image: Specifies a pointer to an Image structure; returned from
% ReadImage.
%
% o histogram: Specifies an array of integers representing the number
% of pixels for each intensity of a particular color component.
%
*/
static void InitializeHistogram(const Image *image,ssize_t **histogram,
ExceptionInfo *exception)
{
register const Quantum
*p;
register ssize_t
i,
x;
ssize_t
y;
/*
Initialize histogram.
*/
for (i=0; i <= 255; i++)
{
histogram[Red][i]=0;
histogram[Green][i]=0;
histogram[Blue][i]=0;
}
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
histogram[Red][(ssize_t) ScaleQuantumToChar(GetPixelRed(image,p))]++;
histogram[Green][(ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p))]++;
histogram[Blue][(ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p))]++;
p+=GetPixelChannels(image);
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I n i t i a l i z e I n t e r v a l T r e e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InitializeIntervalTree() initializes an interval tree from the lists of
% zero crossings.
%
% The format of the InitializeIntervalTree method is:
%
% InitializeIntervalTree(IntervalTree **list,ssize_t *number_nodes,
% IntervalTree *node)
%
% A description of each parameter follows.
%
% o zero_crossing: Specifies an array of structures of type ZeroCrossing.
%
% o number_crossings: This size_t specifies the number of elements
% in the zero_crossing array.
%
*/
static void InitializeList(IntervalTree **list,ssize_t *number_nodes,
IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->child == (IntervalTree *) NULL)
list[(*number_nodes)++]=node;
InitializeList(list,number_nodes,node->sibling);
InitializeList(list,number_nodes,node->child);
}
static void MeanStability(IntervalTree *node)
{
register IntervalTree
*child;
if (node == (IntervalTree *) NULL)
return;
node->mean_stability=0.0;
child=node->child;
if (child != (IntervalTree *) NULL)
{
register ssize_t
count;
register double
sum;
sum=0.0;
count=0;
for ( ; child != (IntervalTree *) NULL; child=child->sibling)
{
sum+=child->stability;
count++;
}
node->mean_stability=sum/(double) count;
}
MeanStability(node->sibling);
MeanStability(node->child);
}
static void Stability(IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->child == (IntervalTree *) NULL)
node->stability=0.0;
else
node->stability=node->tau-(node->child)->tau;
Stability(node->sibling);
Stability(node->child);
}
static IntervalTree *InitializeIntervalTree(const ZeroCrossing *zero_crossing,
const size_t number_crossings)
{
IntervalTree
*head,
**list,
*node,
*root;
register ssize_t
i;
ssize_t
j,
k,
left,
number_nodes;
/*
Allocate interval tree.
*/
list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength,
sizeof(*list));
if (list == (IntervalTree **) NULL)
return((IntervalTree *) NULL);
/*
The root is the entire histogram.
*/
root=(IntervalTree *) AcquireMagickMemory(sizeof(*root));
root->child=(IntervalTree *) NULL;
root->sibling=(IntervalTree *) NULL;
root->tau=0.0;
root->left=0;
root->right=255;
for (i=(-1); i < (ssize_t) number_crossings; i++)
{
/*
Initialize list with all nodes with no children.
*/
number_nodes=0;
InitializeList(list,&number_nodes,root);
/*
Split list.
*/
for (j=0; j < number_nodes; j++)
{
head=list[j];
left=head->left;
node=head;
for (k=head->left+1; k < head->right; k++)
{
if (zero_crossing[i+1].crossings[k] != 0)
{
if (node == head)
{
node->child=(IntervalTree *) AcquireMagickMemory(
sizeof(*node->child));
node=node->child;
}
else
{
node->sibling=(IntervalTree *) AcquireMagickMemory(
sizeof(*node->sibling));
node=node->sibling;
}
node->tau=zero_crossing[i+1].tau;
node->child=(IntervalTree *) NULL;
node->sibling=(IntervalTree *) NULL;
node->left=left;
node->right=k;
left=k;
}
}
if (left != head->left)
{
node->sibling=(IntervalTree *) AcquireMagickMemory(
sizeof(*node->sibling));
node=node->sibling;
node->tau=zero_crossing[i+1].tau;
node->child=(IntervalTree *) NULL;
node->sibling=(IntervalTree *) NULL;
node->left=left;
node->right=head->right;
}
}
}
/*
Determine the stability: difference between a nodes tau and its child.
*/
Stability(root->child);
MeanStability(root->child);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(root);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p t i m a l T a u %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OptimalTau() finds the optimal tau for each band of the histogram.
%
% The format of the OptimalTau method is:
%
% double OptimalTau(const ssize_t *histogram,const double max_tau,
% const double min_tau,const double delta_tau,
% const double smooth_threshold,short *extrema)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of integers representing the number
% of pixels for each intensity of a particular color component.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
*/
static void ActiveNodes(IntervalTree **list,ssize_t *number_nodes,
IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->stability >= node->mean_stability)
{
list[(*number_nodes)++]=node;
ActiveNodes(list,number_nodes,node->sibling);
}
else
{
ActiveNodes(list,number_nodes,node->sibling);
ActiveNodes(list,number_nodes,node->child);
}
}
static void FreeNodes(IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
FreeNodes(node->sibling);
FreeNodes(node->child);
node=(IntervalTree *) RelinquishMagickMemory(node);
}
static double OptimalTau(const ssize_t *histogram,const double max_tau,
const double min_tau,const double delta_tau,const double smooth_threshold,
short *extrema)
{
IntervalTree
**list,
*node,
*root;
MagickBooleanType
peak;
double
average_tau,
*derivative,
*second_derivative,
tau,
value;
register ssize_t
i,
x;
size_t
count,
number_crossings;
ssize_t
index,
j,
k,
number_nodes;
ZeroCrossing
*zero_crossing;
/*
Allocate interval tree.
*/
list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength,
sizeof(*list));
if (list == (IntervalTree **) NULL)
return(0.0);
/*
Allocate zero crossing list.
*/
count=(size_t) ((max_tau-min_tau)/delta_tau)+2;
zero_crossing=(ZeroCrossing *) AcquireQuantumMemory((size_t) count,
sizeof(*zero_crossing));
if (zero_crossing == (ZeroCrossing *) NULL)
return(0.0);
for (i=0; i < (ssize_t) count; i++)
zero_crossing[i].tau=(-1.0);
/*
Initialize zero crossing list.
*/
derivative=(double *) AcquireQuantumMemory(256,sizeof(*derivative));
second_derivative=(double *) AcquireQuantumMemory(256,
sizeof(*second_derivative));
if ((derivative == (double *) NULL) ||
(second_derivative == (double *) NULL))
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDerivatives");
i=0;
for (tau=max_tau; tau >= min_tau; tau-=delta_tau)
{
zero_crossing[i].tau=tau;
ScaleSpace(histogram,tau,zero_crossing[i].histogram);
DerivativeHistogram(zero_crossing[i].histogram,derivative);
DerivativeHistogram(derivative,second_derivative);
ZeroCrossHistogram(second_derivative,smooth_threshold,
zero_crossing[i].crossings);
i++;
}
/*
Add an entry for the original histogram.
*/
zero_crossing[i].tau=0.0;
for (j=0; j <= 255; j++)
zero_crossing[i].histogram[j]=(double) histogram[j];
DerivativeHistogram(zero_crossing[i].histogram,derivative);
DerivativeHistogram(derivative,second_derivative);
ZeroCrossHistogram(second_derivative,smooth_threshold,
zero_crossing[i].crossings);
number_crossings=(size_t) i;
derivative=(double *) RelinquishMagickMemory(derivative);
second_derivative=(double *)
RelinquishMagickMemory(second_derivative);
/*
Ensure the scale-space fingerprints form lines in scale-space, not loops.
*/
ConsolidateCrossings(zero_crossing,number_crossings);
/*
Force endpoints to be included in the interval.
*/
for (i=0; i <= (ssize_t) number_crossings; i++)
{
for (j=0; j < 255; j++)
if (zero_crossing[i].crossings[j] != 0)
break;
zero_crossing[i].crossings[0]=(-zero_crossing[i].crossings[j]);
for (j=255; j > 0; j--)
if (zero_crossing[i].crossings[j] != 0)
break;
zero_crossing[i].crossings[255]=(-zero_crossing[i].crossings[j]);
}
/*
Initialize interval tree.
*/
root=InitializeIntervalTree(zero_crossing,number_crossings);
if (root == (IntervalTree *) NULL)
return(0.0);
/*
Find active nodes: stability is greater (or equal) to the mean stability of
its children.
*/
number_nodes=0;
ActiveNodes(list,&number_nodes,root->child);
/*
Initialize extrema.
*/
for (i=0; i <= 255; i++)
extrema[i]=0;
for (i=0; i < number_nodes; i++)
{
/*
Find this tau in zero crossings list.
*/
k=0;
node=list[i];
for (j=0; j <= (ssize_t) number_crossings; j++)
if (zero_crossing[j].tau == node->tau)
k=j;
/*
Find the value of the peak.
*/
peak=zero_crossing[k].crossings[node->right] == -1 ? MagickTrue :
MagickFalse;
index=node->left;
value=zero_crossing[k].histogram[index];
for (x=node->left; x <= node->right; x++)
{
if (peak != MagickFalse)
{
if (zero_crossing[k].histogram[x] > value)
{
value=zero_crossing[k].histogram[x];
index=x;
}
}
else
if (zero_crossing[k].histogram[x] < value)
{
value=zero_crossing[k].histogram[x];
index=x;
}
}
for (x=node->left; x <= node->right; x++)
{
if (index == 0)
index=256;
if (peak != MagickFalse)
extrema[x]=(short) index;
else
extrema[x]=(short) (-index);
}
}
/*
Determine the average tau.
*/
average_tau=0.0;
for (i=0; i < number_nodes; i++)
average_tau+=list[i]->tau;
average_tau/=(double) number_nodes;
/*
Relinquish resources.
*/
FreeNodes(root);
zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(average_tau);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S c a l e S p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleSpace() performs a scale-space filter on the 1D histogram.
%
% The format of the ScaleSpace method is:
%
% ScaleSpace(const ssize_t *histogram,const double tau,
% double *scale_histogram)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of doubles representing the number
% of pixels for each intensity of a particular color component.
%
*/
static void ScaleSpace(const ssize_t *histogram,const double tau,
double *scale_histogram)
{
double
alpha,
beta,
*gamma,
sum;
register ssize_t
u,
x;
gamma=(double *) AcquireQuantumMemory(256,sizeof(*gamma));
if (gamma == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateGammaMap");
alpha=PerceptibleReciprocal(tau*sqrt(2.0*MagickPI));
beta=(-1.0*PerceptibleReciprocal(2.0*tau*tau));
for (x=0; x <= 255; x++)
gamma[x]=0.0;
for (x=0; x <= 255; x++)
{
gamma[x]=exp((double) beta*x*x);
if (gamma[x] < MagickEpsilon)
break;
}
for (x=0; x <= 255; x++)
{
sum=0.0;
for (u=0; u <= 255; u++)
sum+=(double) histogram[u]*gamma[MagickAbsoluteValue(x-u)];
scale_histogram[x]=alpha*sum;
}
gamma=(double *) RelinquishMagickMemory(gamma);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e g m e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SegmentImage() segment an image by analyzing the histograms of the color
% components and identifying units that are homogeneous with the fuzzy
% C-means technique.
%
% The format of the SegmentImage method is:
%
% MagickBooleanType SegmentImage(Image *image,
% const ColorspaceType colorspace,const MagickBooleanType verbose,
% const double cluster_threshold,const double smooth_threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o colorspace: Indicate the colorspace.
%
% o verbose: Set to MagickTrue to print detailed information about the
% identified classes.
%
% o cluster_threshold: This represents the minimum number of pixels
% contained in a hexahedra before it can be considered valid (expressed
% as a percentage).
%
% o smooth_threshold: the smoothing threshold eliminates noise in the second
% derivative of the histogram. As the value is increased, you can expect a
% smoother second derivative.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SegmentImage(Image *image,
const ColorspaceType colorspace,const MagickBooleanType verbose,
const double cluster_threshold,const double smooth_threshold,
ExceptionInfo *exception)
{
ColorspaceType
previous_colorspace;
MagickBooleanType
status;
register ssize_t
i;
short
*extrema[MaxDimension];
ssize_t
*histogram[MaxDimension];
/*
Allocate histogram and extrema.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
for (i=0; i < MaxDimension; i++)
{
histogram[i]=(ssize_t *) AcquireQuantumMemory(256,sizeof(**histogram));
extrema[i]=(short *) AcquireQuantumMemory(256,sizeof(**extrema));
if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL))
{
for (i-- ; i >= 0; i--)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename)
}
}
/*
Initialize histogram.
*/
previous_colorspace=image->colorspace;
(void) TransformImageColorspace(image,colorspace,exception);
InitializeHistogram(image,histogram,exception);
(void) OptimalTau(histogram[Red],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Red]);
(void) OptimalTau(histogram[Green],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Green]);
(void) OptimalTau(histogram[Blue],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Blue]);
/*
Classify using the fuzzy c-Means technique.
*/
status=Classify(image,extrema,cluster_threshold,WeightingExponent,verbose,
exception);
(void) TransformImageColorspace(image,previous_colorspace,exception);
/*
Relinquish resources.
*/
for (i=0; i < MaxDimension; i++)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Z e r o C r o s s H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroCrossHistogram() find the zero crossings in a histogram and marks
% directions as: 1 is negative to positive; 0 is zero crossing; and -1
% is positive to negative.
%
% The format of the ZeroCrossHistogram method is:
%
% ZeroCrossHistogram(double *second_derivative,
% const double smooth_threshold,short *crossings)
%
% A description of each parameter follows.
%
% o second_derivative: Specifies an array of doubles representing the
% second derivative of the histogram of a particular color component.
%
% o crossings: This array of integers is initialized with
% -1, 0, or 1 representing the slope of the first derivative of the
% of a particular color component.
%
*/
static void ZeroCrossHistogram(double *second_derivative,
const double smooth_threshold,short *crossings)
{
register ssize_t
i;
ssize_t
parity;
/*
Merge low numbers to zero to help prevent noise.
*/
for (i=0; i <= 255; i++)
if ((second_derivative[i] < smooth_threshold) &&
(second_derivative[i] >= -smooth_threshold))
second_derivative[i]=0.0;
/*
Mark zero crossings.
*/
parity=0;
for (i=0; i <= 255; i++)
{
crossings[i]=0;
if (second_derivative[i] < 0.0)
{
if (parity > 0)
crossings[i]=(-1);
parity=1;
}
else
if (second_derivative[i] > 0.0)
{
if (parity < 0)
crossings[i]=1;
parity=(-1);
}
}
}
|
par_rap.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "_hypre_utilities.h"
/*--------------------------------------------------------------------------
* hypre_BoomerAMGBuildCoarseOperator
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildCoarseOperator( hypre_ParCSRMatrix *RT,
hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *P,
hypre_ParCSRMatrix **RAP_ptr )
{
hypre_BoomerAMGBuildCoarseOperatorKT( RT, A, P, 0, RAP_ptr);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGBuildCoarseOperatorKT( hypre_ParCSRMatrix *RT,
hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *P,
HYPRE_Int keepTranspose,
hypre_ParCSRMatrix **RAP_ptr )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RAP] -= hypre_MPI_Wtime();
#endif
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *RT_diag = hypre_ParCSRMatrixDiag(RT);
hypre_CSRMatrix *RT_offd = hypre_ParCSRMatrixOffd(RT);
HYPRE_Int num_cols_diag_RT = hypre_CSRMatrixNumCols(RT_diag);
HYPRE_Int num_cols_offd_RT = hypre_CSRMatrixNumCols(RT_offd);
HYPRE_Int num_rows_offd_RT = hypre_CSRMatrixNumRows(RT_offd);
hypre_ParCSRCommPkg *comm_pkg_RT = hypre_ParCSRMatrixCommPkg(RT);
HYPRE_Int num_recvs_RT = 0;
HYPRE_Int num_sends_RT = 0;
HYPRE_Int *send_map_starts_RT;
HYPRE_Int *send_map_elmts_RT;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag);
HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd);
hypre_CSRMatrix *P_diag = hypre_ParCSRMatrixDiag(P);
HYPRE_Real *P_diag_data = hypre_CSRMatrixData(P_diag);
HYPRE_Int *P_diag_i = hypre_CSRMatrixI(P_diag);
HYPRE_Int *P_diag_j = hypre_CSRMatrixJ(P_diag);
hypre_CSRMatrix *P_offd = hypre_ParCSRMatrixOffd(P);
HYPRE_BigInt *col_map_offd_P = hypre_ParCSRMatrixColMapOffd(P);
HYPRE_Real *P_offd_data = hypre_CSRMatrixData(P_offd);
HYPRE_Int *P_offd_i = hypre_CSRMatrixI(P_offd);
HYPRE_Int *P_offd_j = hypre_CSRMatrixJ(P_offd);
HYPRE_BigInt first_col_diag_P = hypre_ParCSRMatrixFirstColDiag(P);
HYPRE_BigInt last_col_diag_P;
HYPRE_Int num_cols_diag_P = hypre_CSRMatrixNumCols(P_diag);
HYPRE_Int num_cols_offd_P = hypre_CSRMatrixNumCols(P_offd);
HYPRE_BigInt *coarse_partitioning = hypre_ParCSRMatrixColStarts(P);
HYPRE_BigInt *RT_partitioning = hypre_ParCSRMatrixColStarts(RT);
hypre_ParCSRMatrix *RAP;
HYPRE_BigInt *col_map_offd_RAP = NULL;
HYPRE_BigInt *new_col_map_offd_RAP = NULL;
hypre_CSRMatrix *RAP_int = NULL;
HYPRE_Real *RAP_int_data;
HYPRE_Int *RAP_int_i;
HYPRE_BigInt *RAP_int_j;
hypre_CSRMatrix *RAP_ext;
HYPRE_Real *RAP_ext_data = NULL;
HYPRE_Int *RAP_ext_i = NULL;
HYPRE_BigInt *RAP_ext_j = NULL;
hypre_CSRMatrix *RAP_diag;
HYPRE_Real *RAP_diag_data;
HYPRE_Int *RAP_diag_i;
HYPRE_Int *RAP_diag_j;
hypre_CSRMatrix *RAP_offd;
HYPRE_Real *RAP_offd_data = NULL;
HYPRE_Int *RAP_offd_i = NULL;
HYPRE_Int *RAP_offd_j = NULL;
HYPRE_Int RAP_size;
HYPRE_Int RAP_ext_size;
HYPRE_Int RAP_diag_size;
HYPRE_Int RAP_offd_size;
HYPRE_Int P_ext_diag_size;
HYPRE_Int P_ext_offd_size;
HYPRE_BigInt first_col_diag_RAP;
HYPRE_BigInt last_col_diag_RAP;
HYPRE_Int num_cols_offd_RAP = 0;
hypre_CSRMatrix *R_diag;
HYPRE_Real *R_diag_data;
HYPRE_Int *R_diag_i;
HYPRE_Int *R_diag_j;
hypre_CSRMatrix *R_offd;
HYPRE_Real *R_offd_data;
HYPRE_Int *R_offd_i;
HYPRE_Int *R_offd_j;
HYPRE_Real *RA_diag_data_array = NULL;
HYPRE_Int *RA_diag_j_array = NULL;
HYPRE_Real *RA_offd_data_array = NULL;
HYPRE_Int *RA_offd_j_array = NULL;
hypre_CSRMatrix *Ps_ext;
HYPRE_Real *Ps_ext_data;
HYPRE_Int *Ps_ext_i;
HYPRE_BigInt *Ps_ext_j;
HYPRE_Real *P_ext_diag_data = NULL;
HYPRE_Int *P_ext_diag_i = NULL;
HYPRE_Int *P_ext_diag_j = NULL;
HYPRE_Real *P_ext_offd_data = NULL;
HYPRE_Int *P_ext_offd_i = NULL;
HYPRE_Int *P_ext_offd_j = NULL;
HYPRE_BigInt *P_big_offd_j = NULL;
HYPRE_BigInt *col_map_offd_Pext;
HYPRE_Int *map_P_to_Pext = NULL;
HYPRE_Int *map_P_to_RAP = NULL;
HYPRE_Int *map_Pext_to_RAP = NULL;
HYPRE_Int *P_marker;
HYPRE_Int **P_mark_array;
HYPRE_Int **A_mark_array;
HYPRE_Int *A_marker;
HYPRE_BigInt *temp;
HYPRE_BigInt n_coarse, n_coarse_RT;
HYPRE_Int square = 1;
HYPRE_Int num_cols_offd_Pext = 0;
HYPRE_Int ic, i, j, k;
HYPRE_Int i1, i2, i3, ii, ns, ne, size, rest;
HYPRE_Int cnt = 0; /*value; */
HYPRE_Int jj1, jj2, jj3, jcol;
HYPRE_Int *jj_count, *jj_cnt_diag, *jj_cnt_offd;
HYPRE_Int jj_counter, jj_count_diag, jj_count_offd;
HYPRE_Int jj_row_begining, jj_row_begin_diag, jj_row_begin_offd;
HYPRE_Int start_indexing = 0; /* start indexing for RAP_data at 0 */
HYPRE_Int num_nz_cols_A;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Real r_entry;
HYPRE_Real r_a_product;
HYPRE_Real r_a_p_product;
HYPRE_Real zero = 0.0;
HYPRE_Int *prefix_sum_workspace;
/*-----------------------------------------------------------------------
* Copy ParCSRMatrix RT into CSRMatrix R so that we have row-wise access
* to restriction .
*-----------------------------------------------------------------------*/
hypre_MPI_Comm_size(comm, &num_procs);
num_threads = hypre_NumThreads();
if (comm_pkg_RT)
{
num_recvs_RT = hypre_ParCSRCommPkgNumRecvs(comm_pkg_RT);
num_sends_RT = hypre_ParCSRCommPkgNumSends(comm_pkg_RT);
send_map_starts_RT = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_RT);
send_map_elmts_RT = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_RT);
}
else if (num_procs > 1)
{
hypre_MatvecCommPkgCreate(RT);
comm_pkg_RT = hypre_ParCSRMatrixCommPkg(RT);
num_recvs_RT = hypre_ParCSRCommPkgNumRecvs(comm_pkg_RT);
num_sends_RT = hypre_ParCSRCommPkgNumSends(comm_pkg_RT);
send_map_starts_RT = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_RT);
send_map_elmts_RT = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_RT);
}
hypre_CSRMatrixTranspose(RT_diag, &R_diag, 1);
if (num_cols_offd_RT)
{
hypre_CSRMatrixTranspose(RT_offd, &R_offd, 1);
R_offd_data = hypre_CSRMatrixData(R_offd);
R_offd_i = hypre_CSRMatrixI(R_offd);
R_offd_j = hypre_CSRMatrixJ(R_offd);
}
/*-----------------------------------------------------------------------
* Access the CSR vectors for R. Also get sizes of fine and
* coarse grids.
*-----------------------------------------------------------------------*/
R_diag_data = hypre_CSRMatrixData(R_diag);
R_diag_i = hypre_CSRMatrixI(R_diag);
R_diag_j = hypre_CSRMatrixJ(R_diag);
n_coarse = hypre_ParCSRMatrixGlobalNumCols(P);
num_nz_cols_A = num_cols_diag_A + num_cols_offd_A;
n_coarse_RT = hypre_ParCSRMatrixGlobalNumCols(RT);
if (n_coarse != n_coarse_RT)
{
square = 0;
}
/*-----------------------------------------------------------------------
* Generate Ps_ext, i.e. portion of P that is stored on neighbor procs
* and needed locally for triple matrix product
*-----------------------------------------------------------------------*/
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_UnorderedIntMap send_map_elmts_RT_inverse_map;
HYPRE_Int *send_map_elmts_starts_RT_aggregated = NULL;
HYPRE_Int *send_map_elmts_RT_aggregated = NULL;
HYPRE_Int send_map_elmts_RT_inverse_map_initialized =
num_sends_RT > 0 && send_map_starts_RT[num_sends_RT] - send_map_starts_RT[0] > 0;
if (send_map_elmts_RT_inverse_map_initialized)
{
hypre_UnorderedIntSet send_map_elmts_set;
hypre_UnorderedIntSetCreate(&send_map_elmts_set,
2 * (send_map_starts_RT[num_sends_RT] - send_map_starts_RT[0]), 16 * hypre_NumThreads());
#pragma omp parallel for HYPRE_SMP_SCHEDULE
for (i = send_map_starts_RT[0]; i < send_map_starts_RT[num_sends_RT]; i++)
{
HYPRE_Int key = send_map_elmts_RT[i];
hypre_UnorderedIntSetPut(&send_map_elmts_set, key);
}
HYPRE_Int send_map_elmts_unique_size;
HYPRE_Int *send_map_elmts_unique = hypre_UnorderedIntSetCopyToArray(&send_map_elmts_set,
&send_map_elmts_unique_size);
hypre_UnorderedIntSetDestroy(&send_map_elmts_set);
hypre_UnorderedIntMapCreate(&send_map_elmts_RT_inverse_map, 2 * send_map_elmts_unique_size,
16 * hypre_NumThreads());
#pragma omp parallel for HYPRE_SMP_SCHEDULE
for (i = 0; i < send_map_elmts_unique_size; i++)
{
hypre_UnorderedIntMapPutIfAbsent(&send_map_elmts_RT_inverse_map, send_map_elmts_unique[i], i);
}
hypre_TFree(send_map_elmts_unique, HYPRE_MEMORY_HOST);
send_map_elmts_starts_RT_aggregated = hypre_TAlloc(HYPRE_Int, send_map_elmts_unique_size + 1,
HYPRE_MEMORY_HOST);
send_map_elmts_RT_aggregated = hypre_TAlloc(HYPRE_Int, send_map_starts_RT[num_sends_RT],
HYPRE_MEMORY_HOST);
#pragma omp parallel for HYPRE_SMP_SCHEDULE
for (i = 0; i < send_map_elmts_unique_size; i++)
{
send_map_elmts_starts_RT_aggregated[i] = 0;
}
#pragma omp parallel for HYPRE_SMP_SCHEDULE
for (i = send_map_starts_RT[0]; i < send_map_starts_RT[num_sends_RT]; i++)
{
HYPRE_Int idx = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, send_map_elmts_RT[i]);
#pragma omp atomic
send_map_elmts_starts_RT_aggregated[idx]++;
}
for (i = 0; i < send_map_elmts_unique_size - 1; i++)
{
send_map_elmts_starts_RT_aggregated[i + 1] += send_map_elmts_starts_RT_aggregated[i];
}
send_map_elmts_starts_RT_aggregated[send_map_elmts_unique_size] = send_map_starts_RT[num_sends_RT];
#pragma omp parallel for HYPRE_SMP_SCHEDULE
for (i = send_map_starts_RT[num_sends_RT] - 1; i >= send_map_starts_RT[0]; i--)
{
HYPRE_Int idx = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, send_map_elmts_RT[i]);
HYPRE_Int offset = hypre_fetch_and_add(send_map_elmts_starts_RT_aggregated + idx, -1) - 1;
send_map_elmts_RT_aggregated[offset] = i;
}
}
#endif /* HYPRE_CONCURRENT_HOPSCOTCH */
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime();
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX_RAP] -= hypre_MPI_Wtime();
#endif
if (num_procs > 1)
{
Ps_ext = hypre_ParCSRMatrixExtractBExt(P, A, 1);
Ps_ext_data = hypre_CSRMatrixData(Ps_ext);
Ps_ext_i = hypre_CSRMatrixI(Ps_ext);
Ps_ext_j = hypre_CSRMatrixBigJ(Ps_ext);
}
P_ext_diag_i = hypre_TAlloc(HYPRE_Int, num_cols_offd_A + 1, HYPRE_MEMORY_HOST);
P_ext_offd_i = hypre_TAlloc(HYPRE_Int, num_cols_offd_A + 1, HYPRE_MEMORY_HOST);
P_ext_diag_i[0] = 0;
P_ext_offd_i[0] = 0;
P_ext_diag_size = 0;
P_ext_offd_size = 0;
last_col_diag_P = first_col_diag_P + (HYPRE_BigInt) num_cols_diag_P - 1;
/*HYPRE_Int prefix_sum_workspace[2*(num_threads + 1)];*/
prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, 2 * (num_threads + 1), HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,j)
#endif /* This threading causes problem, maybe the prefix_sum in combination with BigInt? */
{
HYPRE_Int i_begin, i_end;
hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_A);
HYPRE_Int P_ext_diag_size_private = 0;
HYPRE_Int P_ext_offd_size_private = 0;
for (i = i_begin; i < i_end; i++)
{
for (j = Ps_ext_i[i]; j < Ps_ext_i[i + 1]; j++)
if (Ps_ext_j[j] < first_col_diag_P || Ps_ext_j[j] > last_col_diag_P)
{
P_ext_offd_size_private++;
}
else
{
P_ext_diag_size_private++;
}
}
hypre_prefix_sum_pair(&P_ext_diag_size_private, &P_ext_diag_size, &P_ext_offd_size_private,
&P_ext_offd_size, prefix_sum_workspace);
#ifdef HYPRE_USING_OPENMP
#pragma omp master
#endif
{
if (P_ext_diag_size)
{
P_ext_diag_j = hypre_CTAlloc(HYPRE_Int, P_ext_diag_size, HYPRE_MEMORY_HOST);
P_ext_diag_data = hypre_CTAlloc(HYPRE_Real, P_ext_diag_size, HYPRE_MEMORY_HOST);
}
if (P_ext_offd_size)
{
P_ext_offd_j = hypre_CTAlloc(HYPRE_Int, P_ext_offd_size, HYPRE_MEMORY_HOST);
P_big_offd_j = hypre_CTAlloc(HYPRE_BigInt, P_ext_offd_size, HYPRE_MEMORY_HOST);
P_ext_offd_data = hypre_CTAlloc(HYPRE_Real, P_ext_offd_size, HYPRE_MEMORY_HOST);
//temp = hypre_CTAlloc(HYPRE_BigInt, P_ext_offd_size+num_cols_offd_P, HYPRE_MEMORY_HOST);
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i = i_begin; i < i_end; i++)
{
for (j = Ps_ext_i[i]; j < Ps_ext_i[i + 1]; j++)
{
HYPRE_BigInt value = Ps_ext_j[j];
if (value < first_col_diag_P || value > last_col_diag_P)
{
//Ps_ext_j[P_ext_offd_size_private] = value;
//temp[P_ext_offd_size_private] = value;
P_big_offd_j[P_ext_offd_size_private] = value;
P_ext_offd_data[P_ext_offd_size_private++] = Ps_ext_data[j];
}
else
{
P_ext_diag_j[P_ext_diag_size_private] = (HYPRE_Int)(Ps_ext_j[j] - first_col_diag_P);
P_ext_diag_data[P_ext_diag_size_private++] = Ps_ext_data[j];
}
}
P_ext_diag_i[i + 1] = P_ext_diag_size_private;
P_ext_offd_i[i + 1] = P_ext_offd_size_private;
}
} /* omp parallel */
hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Ps_ext);
Ps_ext = NULL;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (P_ext_offd_size || num_cols_offd_P)
{
hypre_UnorderedBigIntSet found_set;
hypre_UnorderedBigIntSetCreate(&found_set, P_ext_offd_size + num_cols_offd_P,
16 * hypre_NumThreads());
#pragma omp parallel private(i)
{
#pragma omp for HYPRE_SMP_SCHEDULE
for (i = 0; i < P_ext_offd_size; i++)
{
//hypre_UnorderedBigIntSetPut(&found_set, Ps_ext_j[i]);
hypre_UnorderedBigIntSetPut(&found_set, P_big_offd_j[i]);
}
#pragma omp for HYPRE_SMP_SCHEDULE
for (i = 0; i < num_cols_offd_P; i++)
{
hypre_UnorderedBigIntSetPut(&found_set, col_map_offd_P[i]);
}
} /* omp parallel */
/* Warning on getting temp right !!!!! */
temp = hypre_UnorderedBigIntSetCopyToArray(&found_set, &num_cols_offd_Pext);
hypre_UnorderedBigIntSetDestroy(&found_set);
hypre_UnorderedBigIntMap col_map_offd_Pext_inverse;
hypre_big_sort_and_create_inverse_map(temp, num_cols_offd_Pext, &col_map_offd_Pext,
&col_map_offd_Pext_inverse);
#pragma omp parallel for HYPRE_SMP_SCHEDULE
for (i = 0 ; i < P_ext_offd_size; i++)
//Ps_ext_j[i] = hypre_UnorderedBigIntMapGet(&col_map_offd_Pext_inverse, Ps_ext_j[i]);
{
P_ext_offd_j[i] = hypre_UnorderedBigIntMapGet(&col_map_offd_Pext_inverse, P_big_offd_j[i]);
}
if (num_cols_offd_Pext) { hypre_UnorderedBigIntMapDestroy(&col_map_offd_Pext_inverse); }
}
#else /* !HYPRE_CONCURRENT_HOPSCOTCH */
if (P_ext_offd_size || num_cols_offd_P)
{
temp = hypre_CTAlloc(HYPRE_BigInt, P_ext_offd_size + num_cols_offd_P, HYPRE_MEMORY_HOST);
for (i = 0; i < P_ext_offd_size; i++)
//Ps_ext_j[i] = temp[i];
//temp[i] = Ps_ext_j[i];
{
temp[i] = P_big_offd_j[i];
}
cnt = P_ext_offd_size;
for (i = 0; i < num_cols_offd_P; i++)
{
temp[cnt++] = col_map_offd_P[i];
}
}
if (cnt)
{
hypre_BigQsort0(temp, 0, cnt - 1);
num_cols_offd_Pext = 1;
HYPRE_BigInt value = temp[0];
for (i = 1; i < cnt; i++)
{
if (temp[i] > value)
{
value = temp[i];
temp[num_cols_offd_Pext++] = value;
}
}
}
if (num_cols_offd_Pext)
{
col_map_offd_Pext = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_Pext, HYPRE_MEMORY_HOST);
}
for (i = 0; i < num_cols_offd_Pext; i++)
{
col_map_offd_Pext[i] = temp[i];
}
if (P_ext_offd_size || num_cols_offd_P)
{
hypre_TFree(temp, HYPRE_MEMORY_HOST);
}
/*if (P_ext_offd_size)
P_ext_offd_j = hypre_CTAlloc(HYPRE_Int, P_ext_offd_size, HYPRE_MEMORY_HOST);*/
for (i = 0 ; i < P_ext_offd_size; i++)
P_ext_offd_j[i] = hypre_BigBinarySearch(col_map_offd_Pext,
//Ps_ext_j[i],
P_big_offd_j[i],
num_cols_offd_Pext);
#endif /* !HYPRE_CONCURRENT_HOPSCOTCH */
if (P_ext_offd_size)
{
hypre_TFree(P_big_offd_j, HYPRE_MEMORY_HOST);
}
/*if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Ps_ext);
Ps_ext = NULL;
}*/
if (num_cols_offd_P)
{
map_P_to_Pext = hypre_CTAlloc(HYPRE_Int, num_cols_offd_P, HYPRE_MEMORY_HOST);
cnt = 0;
for (i = 0; i < num_cols_offd_Pext; i++)
if (col_map_offd_Pext[i] == col_map_offd_P[cnt])
{
map_P_to_Pext[cnt++] = i;
if (cnt == num_cols_offd_P) { break; }
}
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime();
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX_RAP] += hypre_MPI_Wtime();
#endif
/*-----------------------------------------------------------------------
* First Pass: Determine size of RAP_int and set up RAP_int_i if there
* are more than one processor and nonzero elements in R_offd
*-----------------------------------------------------------------------*/
P_mark_array = hypre_CTAlloc(HYPRE_Int *, num_threads, HYPRE_MEMORY_HOST);
A_mark_array = hypre_CTAlloc(HYPRE_Int *, num_threads, HYPRE_MEMORY_HOST);
if (num_cols_offd_RT)
{
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_counter,jj_row_begining,A_marker,P_marker) HYPRE_SMP_SCHEDULE
#endif
for (ii = 0; ii < num_threads; ii++)
{
size = num_cols_offd_RT / num_threads;
rest = num_cols_offd_RT - size * num_threads;
if (ii < rest)
{
ns = ii * size + ii;
ne = (ii + 1) * size + ii + 1;
}
else
{
ns = ii * size + rest;
ne = (ii + 1) * size + rest;
}
/*-----------------------------------------------------------------------
* Allocate marker arrays.
*-----------------------------------------------------------------------*/
if (num_cols_offd_Pext || num_cols_diag_P)
{
P_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_cols_diag_P + num_cols_offd_Pext,
HYPRE_MEMORY_HOST);
P_marker = P_mark_array[ii];
}
A_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_nz_cols_A, HYPRE_MEMORY_HOST);
A_marker = A_mark_array[ii];
/*-----------------------------------------------------------------------
* Initialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
for (ic = 0; ic < num_cols_diag_P + num_cols_offd_Pext; ic++)
{
P_marker[ic] = -1;
}
for (i = 0; i < num_nz_cols_A; i++)
{
A_marker[i] = -1;
}
/*-----------------------------------------------------------------------
* Loop over exterior c-points
*-----------------------------------------------------------------------*/
for (ic = ns; ic < ne; ic++)
{
jj_row_begining = jj_counter;
/*--------------------------------------------------------------------
* Loop over entries in row ic of R_offd.
*--------------------------------------------------------------------*/
for (jj1 = R_offd_i[ic]; jj1 < R_offd_i[ic + 1]; jj1++)
{
i1 = R_offd_j[jj1];
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_offd.
*-----------------------------------------------------------------*/
for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1 + 1]; jj2++)
{
i2 = A_offd_j[jj2];
/*--------------------------------------------------------------
* Check A_marker to see if point i2 has been previously
* visited. New entries in RAP only occur from unmarked points.
*--------------------------------------------------------------*/
if (A_marker[i2] != ic)
{
/*-----------------------------------------------------------
* Mark i2 as visited.
*-----------------------------------------------------------*/
A_marker[i2] = ic;
/*-----------------------------------------------------------
* Loop over entries in row i2 of P_ext.
*-----------------------------------------------------------*/
for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2 + 1]; jj3++)
{
i3 = P_ext_diag_j[jj3];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begining)
{
P_marker[i3] = jj_counter;
jj_counter++;
}
}
for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2 + 1]; jj3++)
{
i3 = P_ext_offd_j[jj3] + num_cols_diag_P;
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begining)
{
P_marker[i3] = jj_counter;
jj_counter++;
}
}
}
}
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_diag.
*-----------------------------------------------------------------*/
for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1 + 1]; jj2++)
{
i2 = A_diag_j[jj2];
/*--------------------------------------------------------------
* Check A_marker to see if point i2 has been previously
* visited. New entries in RAP only occur from unmarked points.
*--------------------------------------------------------------*/
if (A_marker[i2 + num_cols_offd_A] != ic)
{
/*-----------------------------------------------------------
* Mark i2 as visited.
*-----------------------------------------------------------*/
A_marker[i2 + num_cols_offd_A] = ic;
/*-----------------------------------------------------------
* Loop over entries in row i2 of P_diag.
*-----------------------------------------------------------*/
for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2 + 1]; jj3++)
{
i3 = P_diag_j[jj3];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begining)
{
P_marker[i3] = jj_counter;
jj_counter++;
}
}
/*-----------------------------------------------------------
* Loop over entries in row i2 of P_offd.
*-----------------------------------------------------------*/
for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2 + 1]; jj3++)
{
i3 = map_P_to_Pext[P_offd_j[jj3]] + num_cols_diag_P;
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begining)
{
P_marker[i3] = jj_counter;
jj_counter++;
}
}
}
}
}
}
jj_count[ii] = jj_counter;
}
/*-----------------------------------------------------------------------
* Allocate RAP_int_data and RAP_int_j arrays.
*-----------------------------------------------------------------------*/
for (i = 0; i < num_threads - 1; i++)
{
jj_count[i + 1] += jj_count[i];
}
RAP_size = jj_count[num_threads - 1];
RAP_int_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_RT + 1, HYPRE_MEMORY_HOST);
RAP_int_data = hypre_CTAlloc(HYPRE_Real, RAP_size, HYPRE_MEMORY_HOST);
RAP_int_j = hypre_CTAlloc(HYPRE_BigInt, RAP_size, HYPRE_MEMORY_HOST);
RAP_int_i[num_cols_offd_RT] = RAP_size;
/*-----------------------------------------------------------------------
* Second Pass: Fill in RAP_int_data and RAP_int_j.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_counter,jj_row_begining,A_marker,P_marker,r_entry,r_a_product,r_a_p_product) HYPRE_SMP_SCHEDULE
#endif
for (ii = 0; ii < num_threads; ii++)
{
size = num_cols_offd_RT / num_threads;
rest = num_cols_offd_RT - size * num_threads;
if (ii < rest)
{
ns = ii * size + ii;
ne = (ii + 1) * size + ii + 1;
}
else
{
ns = ii * size + rest;
ne = (ii + 1) * size + rest;
}
/*-----------------------------------------------------------------------
* Initialize some stuff.
*-----------------------------------------------------------------------*/
if (num_cols_offd_Pext || num_cols_diag_P)
{
P_marker = P_mark_array[ii];
}
A_marker = A_mark_array[ii];
jj_counter = start_indexing;
if (ii > 0) { jj_counter = jj_count[ii - 1]; }
for (ic = 0; ic < num_cols_diag_P + num_cols_offd_Pext; ic++)
{
P_marker[ic] = -1;
}
for (i = 0; i < num_nz_cols_A; i++)
{
A_marker[i] = -1;
}
/*-----------------------------------------------------------------------
* Loop over exterior c-points.
*-----------------------------------------------------------------------*/
for (ic = ns; ic < ne; ic++)
{
jj_row_begining = jj_counter;
RAP_int_i[ic] = jj_counter;
/*--------------------------------------------------------------------
* Loop over entries in row ic of R_offd.
*--------------------------------------------------------------------*/
for (jj1 = R_offd_i[ic]; jj1 < R_offd_i[ic + 1]; jj1++)
{
i1 = R_offd_j[jj1];
r_entry = R_offd_data[jj1];
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_offd.
*-----------------------------------------------------------------*/
for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1 + 1]; jj2++)
{
i2 = A_offd_j[jj2];
r_a_product = r_entry * A_offd_data[jj2];
/*--------------------------------------------------------------
* Check A_marker to see if point i2 has been previously
* visited. New entries in RAP only occur from unmarked points.
*--------------------------------------------------------------*/
if (A_marker[i2] != ic)
{
/*-----------------------------------------------------------
* Mark i2 as visited.
*-----------------------------------------------------------*/
A_marker[i2] = ic;
/*-----------------------------------------------------------
* Loop over entries in row i2 of P_ext.
*-----------------------------------------------------------*/
for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2 + 1]; jj3++)
{
i3 = P_ext_diag_j[jj3];
r_a_p_product = r_a_product * P_ext_diag_data[jj3];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begining)
{
P_marker[i3] = jj_counter;
RAP_int_data[jj_counter] = r_a_p_product;
RAP_int_j[jj_counter] = (HYPRE_BigInt)i3 + first_col_diag_P;
jj_counter++;
}
else
{
RAP_int_data[P_marker[i3]] += r_a_p_product;
}
}
for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2 + 1]; jj3++)
{
i3 = P_ext_offd_j[jj3] + num_cols_diag_P;
r_a_p_product = r_a_product * P_ext_offd_data[jj3];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begining)
{
P_marker[i3] = jj_counter;
RAP_int_data[jj_counter] = r_a_p_product;
RAP_int_j[jj_counter]
= col_map_offd_Pext[i3 - num_cols_diag_P];
jj_counter++;
}
else
{
RAP_int_data[P_marker[i3]] += r_a_p_product;
}
}
}
/*--------------------------------------------------------------
* If i2 is previously visited ( A_marker[12]=ic ) it yields
* no new entries in RAP and can just add new contributions.
*--------------------------------------------------------------*/
else
{
for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2 + 1]; jj3++)
{
i3 = P_ext_diag_j[jj3];
r_a_p_product = r_a_product * P_ext_diag_data[jj3];
RAP_int_data[P_marker[i3]] += r_a_p_product;
}
for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2 + 1]; jj3++)
{
i3 = P_ext_offd_j[jj3] + num_cols_diag_P;
r_a_p_product = r_a_product * P_ext_offd_data[jj3];
RAP_int_data[P_marker[i3]] += r_a_p_product;
}
}
}
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_diag.
*-----------------------------------------------------------------*/
for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1 + 1]; jj2++)
{
i2 = A_diag_j[jj2];
r_a_product = r_entry * A_diag_data[jj2];
/*--------------------------------------------------------------
* Check A_marker to see if point i2 has been previously
* visited. New entries in RAP only occur from unmarked points.
*--------------------------------------------------------------*/
if (A_marker[i2 + num_cols_offd_A] != ic)
{
/*-----------------------------------------------------------
* Mark i2 as visited.
*-----------------------------------------------------------*/
A_marker[i2 + num_cols_offd_A] = ic;
/*-----------------------------------------------------------
* Loop over entries in row i2 of P_diag.
*-----------------------------------------------------------*/
for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2 + 1]; jj3++)
{
i3 = P_diag_j[jj3];
r_a_p_product = r_a_product * P_diag_data[jj3];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begining)
{
P_marker[i3] = jj_counter;
RAP_int_data[jj_counter] = r_a_p_product;
RAP_int_j[jj_counter] = (HYPRE_BigInt)i3 + first_col_diag_P;
jj_counter++;
}
else
{
RAP_int_data[P_marker[i3]] += r_a_p_product;
}
}
for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2 + 1]; jj3++)
{
i3 = map_P_to_Pext[P_offd_j[jj3]] + num_cols_diag_P;
r_a_p_product = r_a_product * P_offd_data[jj3];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begining)
{
P_marker[i3] = jj_counter;
RAP_int_data[jj_counter] = r_a_p_product;
RAP_int_j[jj_counter] =
col_map_offd_Pext[i3 - num_cols_diag_P];
jj_counter++;
}
else
{
RAP_int_data[P_marker[i3]] += r_a_p_product;
}
}
}
/*--------------------------------------------------------------
* If i2 is previously visited ( A_marker[12]=ic ) it yields
* no new entries in RAP and can just add new contributions.
*--------------------------------------------------------------*/
else
{
for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2 + 1]; jj3++)
{
i3 = P_diag_j[jj3];
r_a_p_product = r_a_product * P_diag_data[jj3];
RAP_int_data[P_marker[i3]] += r_a_p_product;
}
for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2 + 1]; jj3++)
{
i3 = map_P_to_Pext[P_offd_j[jj3]] + num_cols_diag_P;
r_a_p_product = r_a_product * P_offd_data[jj3];
RAP_int_data[P_marker[i3]] += r_a_p_product;
}
}
}
}
}
if (num_cols_offd_Pext || num_cols_diag_P)
{
hypre_TFree(P_mark_array[ii], HYPRE_MEMORY_HOST);
}
hypre_TFree(A_mark_array[ii], HYPRE_MEMORY_HOST);
}
RAP_int = hypre_CSRMatrixCreate(num_cols_offd_RT, num_rows_offd_RT, RAP_size);
hypre_CSRMatrixMemoryLocation(RAP_int) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixI(RAP_int) = RAP_int_i;
hypre_CSRMatrixBigJ(RAP_int) = RAP_int_j;
hypre_CSRMatrixData(RAP_int) = RAP_int_data;
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime();
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX_RAP] -= hypre_MPI_Wtime();
#endif
RAP_ext_size = 0;
if (num_sends_RT || num_recvs_RT)
{
void *request;
hypre_ExchangeExternalRowsInit(RAP_int, comm_pkg_RT, &request);
RAP_ext = hypre_ExchangeExternalRowsWait(request);
RAP_ext_i = hypre_CSRMatrixI(RAP_ext);
RAP_ext_j = hypre_CSRMatrixBigJ(RAP_ext);
RAP_ext_data = hypre_CSRMatrixData(RAP_ext);
RAP_ext_size = RAP_ext_i[hypre_CSRMatrixNumRows(RAP_ext)];
}
if (num_cols_offd_RT)
{
hypre_CSRMatrixDestroy(RAP_int);
RAP_int = NULL;
}
RAP_diag_i = hypre_TAlloc(HYPRE_Int, num_cols_diag_RT + 1, HYPRE_MEMORY_DEVICE);
RAP_offd_i = hypre_TAlloc(HYPRE_Int, num_cols_diag_RT + 1, HYPRE_MEMORY_DEVICE);
first_col_diag_RAP = first_col_diag_P;
last_col_diag_RAP = first_col_diag_P + num_cols_diag_P - 1;
/*-----------------------------------------------------------------------
* check for new nonzero columns in RAP_offd generated through RAP_ext
*-----------------------------------------------------------------------*/
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_UnorderedBigIntMap col_map_offd_RAP_inverse;
if (RAP_ext_size || num_cols_offd_Pext)
{
hypre_UnorderedBigIntSet found_set;
hypre_UnorderedBigIntSetCreate(&found_set, 2 * (RAP_ext_size + num_cols_offd_Pext),
16 * hypre_NumThreads());
cnt = 0;
#pragma omp parallel private(i)
{
#pragma omp for HYPRE_SMP_SCHEDULE
for (i = 0; i < RAP_ext_size; i++)
{
if (RAP_ext_j[i] < first_col_diag_RAP
|| RAP_ext_j[i] > last_col_diag_RAP)
{
hypre_UnorderedBigIntSetPut(&found_set, RAP_ext_j[i]);
}
}
#pragma omp for HYPRE_SMP_SCHEDULE
for (i = 0; i < num_cols_offd_Pext; i++)
{
hypre_UnorderedBigIntSetPut(&found_set, col_map_offd_Pext[i]);
}
} /* omp parallel */
temp = hypre_UnorderedBigIntSetCopyToArray(&found_set, &num_cols_offd_RAP);
hypre_UnorderedBigIntSetDestroy(&found_set);
hypre_big_sort_and_create_inverse_map(temp, num_cols_offd_RAP, &col_map_offd_RAP,
&col_map_offd_RAP_inverse);
}
#else /* !HYPRE_CONCURRENT_HOPSCOTCH */
if (RAP_ext_size || num_cols_offd_Pext)
{
temp = hypre_CTAlloc(HYPRE_BigInt, RAP_ext_size + num_cols_offd_Pext, HYPRE_MEMORY_HOST);
cnt = 0;
for (i = 0; i < RAP_ext_size; i++)
if (RAP_ext_j[i] < first_col_diag_RAP
|| RAP_ext_j[i] > last_col_diag_RAP)
{
temp[cnt++] = RAP_ext_j[i];
}
for (i = 0; i < num_cols_offd_Pext; i++)
{
temp[cnt++] = col_map_offd_Pext[i];
}
if (cnt)
{
hypre_BigQsort0(temp, 0, cnt - 1);
HYPRE_BigInt value = temp[0];
num_cols_offd_RAP = 1;
for (i = 1; i < cnt; i++)
{
if (temp[i] > value)
{
value = temp[i];
temp[num_cols_offd_RAP++] = value;
}
}
}
/* now evaluate col_map_offd_RAP */
if (num_cols_offd_RAP)
{
col_map_offd_RAP = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_RAP, HYPRE_MEMORY_HOST);
}
for (i = 0 ; i < num_cols_offd_RAP; i++)
{
col_map_offd_RAP[i] = temp[i];
}
hypre_TFree(temp, HYPRE_MEMORY_HOST);
}
#endif /* !HYPRE_CONCURRENT_HOPSCOTCH */
if (num_cols_offd_P)
{
map_P_to_RAP = hypre_TAlloc(HYPRE_Int, num_cols_offd_P, HYPRE_MEMORY_HOST);
cnt = 0;
for (i = 0; i < num_cols_offd_RAP; i++)
if (col_map_offd_RAP[i] == col_map_offd_P[cnt])
{
map_P_to_RAP[cnt++] = i;
if (cnt == num_cols_offd_P) { break; }
}
}
if (num_cols_offd_Pext)
{
map_Pext_to_RAP = hypre_TAlloc(HYPRE_Int, num_cols_offd_Pext, HYPRE_MEMORY_HOST);
cnt = 0;
for (i = 0; i < num_cols_offd_RAP; i++)
if (col_map_offd_RAP[i] == col_map_offd_Pext[cnt])
{
map_Pext_to_RAP[cnt++] = i;
if (cnt == num_cols_offd_Pext) { break; }
}
}
/*-----------------------------------------------------------------------
* Convert RAP_ext column indices
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < RAP_ext_size; i++)
if (RAP_ext_j[i] < first_col_diag_RAP
|| RAP_ext_j[i] > last_col_diag_RAP)
RAP_ext_j[i] = (HYPRE_BigInt)num_cols_diag_P
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
+ (HYPRE_BigInt)hypre_UnorderedBigIntMapGet(&col_map_offd_RAP_inverse, RAP_ext_j[i]);
#else
+(HYPRE_BigInt)hypre_BigBinarySearch(col_map_offd_RAP, RAP_ext_j[i], num_cols_offd_RAP);
#endif
else
{
RAP_ext_j[i] -= first_col_diag_RAP;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (num_cols_offd_RAP)
{
hypre_UnorderedBigIntMapDestroy(&col_map_offd_RAP_inverse);
}
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime();
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX_RAP] += hypre_MPI_Wtime();
#endif
/* need to allocate new P_marker etc. and make further changes */
/*-----------------------------------------------------------------------
* Initialize some stuff.
*-----------------------------------------------------------------------*/
jj_cnt_diag = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_cnt_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,k,jcol,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_count_diag,jj_count_offd,jj_row_begin_diag,jj_row_begin_offd,A_marker,P_marker) HYPRE_SMP_SCHEDULE
#endif
for (ii = 0; ii < num_threads; ii++)
{
size = num_cols_diag_RT / num_threads;
rest = num_cols_diag_RT - size * num_threads;
if (ii < rest)
{
ns = ii * size + ii;
ne = (ii + 1) * size + ii + 1;
}
else
{
ns = ii * size + rest;
ne = (ii + 1) * size + rest;
}
P_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_cols_diag_P + num_cols_offd_RAP,
HYPRE_MEMORY_HOST);
A_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_nz_cols_A, HYPRE_MEMORY_HOST);
P_marker = P_mark_array[ii];
A_marker = A_mark_array[ii];
jj_count_diag = start_indexing;
jj_count_offd = start_indexing;
for (ic = 0; ic < num_cols_diag_P + num_cols_offd_RAP; ic++)
{
P_marker[ic] = -1;
}
for (i = 0; i < num_nz_cols_A; i++)
{
A_marker[i] = -1;
}
/*-----------------------------------------------------------------------
* Loop over interior c-points.
*-----------------------------------------------------------------------*/
for (ic = ns; ic < ne; ic++)
{
/*--------------------------------------------------------------------
* Set marker for diagonal entry, RAP_{ic,ic}. and for all points
* being added to row ic of RAP_diag and RAP_offd through RAP_ext
*--------------------------------------------------------------------*/
jj_row_begin_diag = jj_count_diag;
jj_row_begin_offd = jj_count_offd;
if (square)
{
P_marker[ic] = jj_count_diag++;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (send_map_elmts_RT_inverse_map_initialized)
{
HYPRE_Int i = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, ic);
if (i != -1)
{
for (j = send_map_elmts_starts_RT_aggregated[i]; j < send_map_elmts_starts_RT_aggregated[i + 1];
j++)
{
HYPRE_Int jj = send_map_elmts_RT_aggregated[j];
for (k = RAP_ext_i[jj]; k < RAP_ext_i[jj + 1]; k++)
{
jcol = (HYPRE_Int)RAP_ext_j[k];
if (jcol < num_cols_diag_P)
{
if (P_marker[jcol] < jj_row_begin_diag)
{
P_marker[jcol] = jj_count_diag;
jj_count_diag++;
}
}
else
{
if (P_marker[jcol] < jj_row_begin_offd)
{
P_marker[jcol] = jj_count_offd;
jj_count_offd++;
}
}
}
}
} // if (set)
}
#else /* !HYPRE_CONCURRENT_HOPSCOTCH */
for (i = 0; i < num_sends_RT; i++)
for (j = send_map_starts_RT[i]; j < send_map_starts_RT[i + 1]; j++)
if (send_map_elmts_RT[j] == ic)
{
for (k = RAP_ext_i[j]; k < RAP_ext_i[j + 1]; k++)
{
jcol = (HYPRE_Int) RAP_ext_j[k];
if (jcol < num_cols_diag_P)
{
if (P_marker[jcol] < jj_row_begin_diag)
{
P_marker[jcol] = jj_count_diag;
jj_count_diag++;
}
}
else
{
if (P_marker[jcol] < jj_row_begin_offd)
{
P_marker[jcol] = jj_count_offd;
jj_count_offd++;
}
}
}
break;
}
#endif /* !HYPRE_CONCURRENT_HOPSCOTCH */
/*--------------------------------------------------------------------
* Loop over entries in row ic of R_diag.
*--------------------------------------------------------------------*/
for (jj1 = R_diag_i[ic]; jj1 < R_diag_i[ic + 1]; jj1++)
{
i1 = R_diag_j[jj1];
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_offd.
*-----------------------------------------------------------------*/
if (num_cols_offd_A)
{
for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1 + 1]; jj2++)
{
i2 = A_offd_j[jj2];
/*--------------------------------------------------------------
* Check A_marker to see if point i2 has been previously
* visited. New entries in RAP only occur from unmarked points.
*--------------------------------------------------------------*/
if (A_marker[i2] != ic)
{
/*-----------------------------------------------------------
* Mark i2 as visited.
*-----------------------------------------------------------*/
A_marker[i2] = ic;
/*-----------------------------------------------------------
* Loop over entries in row i2 of P_ext.
*-----------------------------------------------------------*/
for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2 + 1]; jj3++)
{
i3 = P_ext_diag_j[jj3];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begin_diag)
{
P_marker[i3] = jj_count_diag;
jj_count_diag++;
}
}
for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2 + 1]; jj3++)
{
i3 = map_Pext_to_RAP[P_ext_offd_j[jj3]] + num_cols_diag_P;
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begin_offd)
{
P_marker[i3] = jj_count_offd;
jj_count_offd++;
}
}
}
}
}
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_diag.
*-----------------------------------------------------------------*/
for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1 + 1]; jj2++)
{
i2 = A_diag_j[jj2];
/*--------------------------------------------------------------
* Check A_marker to see if point i2 has been previously
* visited. New entries in RAP only occur from unmarked points.
*--------------------------------------------------------------*/
if (A_marker[i2 + num_cols_offd_A] != ic)
{
/*-----------------------------------------------------------
* Mark i2 as visited.
*-----------------------------------------------------------*/
A_marker[i2 + num_cols_offd_A] = ic;
/*-----------------------------------------------------------
* Loop over entries in row i2 of P_diag.
*-----------------------------------------------------------*/
for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2 + 1]; jj3++)
{
i3 = P_diag_j[jj3];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begin_diag)
{
P_marker[i3] = jj_count_diag;
jj_count_diag++;
}
}
/*-----------------------------------------------------------
* Loop over entries in row i2 of P_offd.
*-----------------------------------------------------------*/
if (num_cols_offd_P)
{
for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2 + 1]; jj3++)
{
i3 = map_P_to_RAP[P_offd_j[jj3]] + num_cols_diag_P;
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begin_offd)
{
P_marker[i3] = jj_count_offd;
jj_count_offd++;
}
}
}
}
}
}
/*--------------------------------------------------------------------
* Set RAP_diag_i and RAP_offd_i for this row.
*--------------------------------------------------------------------*/
/*
RAP_diag_i[ic] = jj_row_begin_diag;
RAP_offd_i[ic] = jj_row_begin_offd;
*/
}
jj_cnt_diag[ii] = jj_count_diag;
jj_cnt_offd[ii] = jj_count_offd;
}
for (i = 0; i < num_threads - 1; i++)
{
jj_cnt_diag[i + 1] += jj_cnt_diag[i];
jj_cnt_offd[i + 1] += jj_cnt_offd[i];
}
jj_count_diag = jj_cnt_diag[num_threads - 1];
jj_count_offd = jj_cnt_offd[num_threads - 1];
RAP_diag_i[num_cols_diag_RT] = jj_count_diag;
RAP_offd_i[num_cols_diag_RT] = jj_count_offd;
/*-----------------------------------------------------------------------
* Allocate RAP_diag_data and RAP_diag_j arrays.
* Allocate RAP_offd_data and RAP_offd_j arrays.
*-----------------------------------------------------------------------*/
RAP_diag_size = jj_count_diag;
if (RAP_diag_size)
{
RAP_diag_data = hypre_CTAlloc(HYPRE_Real, RAP_diag_size, HYPRE_MEMORY_DEVICE);
RAP_diag_j = hypre_CTAlloc(HYPRE_Int, RAP_diag_size, HYPRE_MEMORY_DEVICE);
}
RAP_offd_size = jj_count_offd;
if (RAP_offd_size)
{
RAP_offd_data = hypre_CTAlloc(HYPRE_Real, RAP_offd_size, HYPRE_MEMORY_DEVICE);
RAP_offd_j = hypre_CTAlloc(HYPRE_Int, RAP_offd_size, HYPRE_MEMORY_DEVICE);
}
if (RAP_offd_size == 0 && num_cols_offd_RAP != 0)
{
num_cols_offd_RAP = 0;
hypre_TFree(col_map_offd_RAP, HYPRE_MEMORY_HOST);
}
RA_diag_data_array = hypre_TAlloc(HYPRE_Real, num_cols_diag_A * num_threads, HYPRE_MEMORY_HOST);
RA_diag_j_array = hypre_TAlloc(HYPRE_Int, num_cols_diag_A * num_threads, HYPRE_MEMORY_HOST);
if (num_cols_offd_A)
{
RA_offd_data_array = hypre_TAlloc(HYPRE_Real, num_cols_offd_A * num_threads, HYPRE_MEMORY_HOST);
RA_offd_j_array = hypre_TAlloc(HYPRE_Int, num_cols_offd_A * num_threads, HYPRE_MEMORY_HOST);
}
/*-----------------------------------------------------------------------
* Second Pass: Fill in RAP_diag_data and RAP_diag_j.
* Second Pass: Fill in RAP_offd_data and RAP_offd_j.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,k,jcol,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_count_diag,jj_count_offd,jj_row_begin_diag,jj_row_begin_offd,A_marker,P_marker,r_entry,r_a_product,r_a_p_product) HYPRE_SMP_SCHEDULE
#endif
for (ii = 0; ii < num_threads; ii++)
{
size = num_cols_diag_RT / num_threads;
rest = num_cols_diag_RT - size * num_threads;
if (ii < rest)
{
ns = ii * size + ii;
ne = (ii + 1) * size + ii + 1;
}
else
{
ns = ii * size + rest;
ne = (ii + 1) * size + rest;
}
/*-----------------------------------------------------------------------
* Initialize some stuff.
*-----------------------------------------------------------------------*/
P_marker = P_mark_array[ii];
A_marker = A_mark_array[ii];
for (ic = 0; ic < num_cols_diag_P + num_cols_offd_RAP; ic++)
{
P_marker[ic] = -1;
}
for (i = 0; i < num_nz_cols_A ; i++)
{
A_marker[i] = -1;
}
jj_count_diag = start_indexing;
jj_count_offd = start_indexing;
if (ii > 0)
{
jj_count_diag = jj_cnt_diag[ii - 1];
jj_count_offd = jj_cnt_offd[ii - 1];
}
// temporal matrix RA = R*A
// only need to store one row per thread because R*A and (R*A)*P are fused
// into one loop.
hypre_CSRMatrix RA_diag, RA_offd;
RA_diag.data = RA_diag_data_array + num_cols_diag_A * ii;
RA_diag.j = RA_diag_j_array + num_cols_diag_A * ii;
RA_diag.num_nonzeros = 0;
RA_offd.num_nonzeros = 0;
if (num_cols_offd_A)
{
RA_offd.data = RA_offd_data_array + num_cols_offd_A * ii;
RA_offd.j = RA_offd_j_array + num_cols_offd_A * ii;
}
/*-----------------------------------------------------------------------
* Loop over interior c-points.
*-----------------------------------------------------------------------*/
for (ic = ns; ic < ne; ic++)
{
/*--------------------------------------------------------------------
* Create diagonal entry, RAP_{ic,ic} and add entries of RAP_ext
*--------------------------------------------------------------------*/
jj_row_begin_diag = jj_count_diag;
jj_row_begin_offd = jj_count_offd;
RAP_diag_i[ic] = jj_row_begin_diag;
RAP_offd_i[ic] = jj_row_begin_offd;
HYPRE_Int ra_row_begin_diag = RA_diag.num_nonzeros;
HYPRE_Int ra_row_begin_offd = RA_offd.num_nonzeros;
if (square)
{
P_marker[ic] = jj_count_diag;
RAP_diag_data[jj_count_diag] = zero;
RAP_diag_j[jj_count_diag] = ic;
jj_count_diag++;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (send_map_elmts_RT_inverse_map_initialized)
{
HYPRE_Int i = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, ic);
if (i != -1)
{
for (j = send_map_elmts_starts_RT_aggregated[i]; j < send_map_elmts_starts_RT_aggregated[i + 1];
j++)
{
HYPRE_Int jj = send_map_elmts_RT_aggregated[j];
for (k = RAP_ext_i[jj]; k < RAP_ext_i[jj + 1]; k++)
{
jcol = (HYPRE_Int)RAP_ext_j[k];
if (jcol < num_cols_diag_P)
{
if (P_marker[jcol] < jj_row_begin_diag)
{
P_marker[jcol] = jj_count_diag;
RAP_diag_data[jj_count_diag]
= RAP_ext_data[k];
RAP_diag_j[jj_count_diag] = jcol;
jj_count_diag++;
}
else
RAP_diag_data[P_marker[jcol]]
+= RAP_ext_data[k];
}
else
{
if (P_marker[jcol] < jj_row_begin_offd)
{
P_marker[jcol] = jj_count_offd;
RAP_offd_data[jj_count_offd]
= RAP_ext_data[k];
RAP_offd_j[jj_count_offd]
= jcol - num_cols_diag_P;
jj_count_offd++;
}
else
RAP_offd_data[P_marker[jcol]]
+= RAP_ext_data[k];
}
}
}
} // if (set)
}
#else /* !HYPRE_CONCURRENT_HOPSCOTCH */
for (i = 0; i < num_sends_RT; i++)
for (j = send_map_starts_RT[i]; j < send_map_starts_RT[i + 1]; j++)
if (send_map_elmts_RT[j] == ic)
{
for (k = RAP_ext_i[j]; k < RAP_ext_i[j + 1]; k++)
{
jcol = (HYPRE_Int)RAP_ext_j[k];
if (jcol < num_cols_diag_P)
{
if (P_marker[jcol] < jj_row_begin_diag)
{
P_marker[jcol] = jj_count_diag;
RAP_diag_data[jj_count_diag]
= RAP_ext_data[k];
RAP_diag_j[jj_count_diag] = jcol;
jj_count_diag++;
}
else
RAP_diag_data[P_marker[jcol]]
+= RAP_ext_data[k];
}
else
{
if (P_marker[jcol] < jj_row_begin_offd)
{
P_marker[jcol] = jj_count_offd;
RAP_offd_data[jj_count_offd]
= RAP_ext_data[k];
RAP_offd_j[jj_count_offd]
= jcol - num_cols_diag_P;
jj_count_offd++;
}
else
RAP_offd_data[P_marker[jcol]]
+= RAP_ext_data[k];
}
}
break;
}
#endif /* !HYPRE_CONCURRENT_HOPSCOTCH */
/*--------------------------------------------------------------------
* Loop over entries in row ic of R_diag and compute row ic of RA.
*--------------------------------------------------------------------*/
for (jj1 = R_diag_i[ic]; jj1 < R_diag_i[ic + 1]; jj1++)
{
i1 = R_diag_j[jj1];
r_entry = R_diag_data[jj1];
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_offd.
*-----------------------------------------------------------------*/
if (num_cols_offd_A)
{
for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1 + 1]; jj2++)
{
i2 = A_offd_j[jj2];
HYPRE_Real a_entry = A_offd_data[jj2];
HYPRE_Int marker = A_marker[i2];
/*--------------------------------------------------------------
* Check A_marker to see if point i2 has been previously
* visited. New entries in RAP only occur from unmarked points.
*--------------------------------------------------------------*/
if (marker < ra_row_begin_offd)
{
/*-----------------------------------------------------------
* Mark i2 as visited.
*-----------------------------------------------------------*/
A_marker[i2] = RA_offd.num_nonzeros;
RA_offd.data[RA_offd.num_nonzeros - ra_row_begin_offd] = r_entry * a_entry;
RA_offd.j[RA_offd.num_nonzeros - ra_row_begin_offd] = i2;
RA_offd.num_nonzeros++;
}
/*--------------------------------------------------------------
* If i2 is previously visited ( A_marker[12]=ic ) it yields
* no new entries in RA and can just add new contributions.
*--------------------------------------------------------------*/
else
{
RA_offd.data[marker - ra_row_begin_offd] += r_entry * a_entry;
// JSP: compiler will more likely to generate FMA instructions
// when we don't eliminate common subexpressions of
// r_entry * A_offd_data[jj2] manually.
}
} // loop over entries in row i1 of A_offd
} // num_cols_offd_A
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_diag.
*-----------------------------------------------------------------*/
for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1 + 1]; jj2++)
{
i2 = A_diag_j[jj2];
HYPRE_Real a_entry = A_diag_data[jj2];
HYPRE_Int marker = A_marker[i2 + num_cols_offd_A];
/*--------------------------------------------------------------
* Check A_marker to see if point i2 has been previously
* visited. New entries in RAP only occur from unmarked points.
*--------------------------------------------------------------*/
if (marker < ra_row_begin_diag)
{
/*-----------------------------------------------------------
* Mark i2 as visited.
*-----------------------------------------------------------*/
A_marker[i2 + num_cols_offd_A] = RA_diag.num_nonzeros;
RA_diag.data[RA_diag.num_nonzeros - ra_row_begin_diag] = r_entry * a_entry;
RA_diag.j[RA_diag.num_nonzeros - ra_row_begin_diag] = i2;
RA_diag.num_nonzeros++;
}
/*--------------------------------------------------------------
* If i2 is previously visited ( A_marker[12]=ic ) it yields
* no new entries in RA and can just add new contributions.
*--------------------------------------------------------------*/
else
{
RA_diag.data[marker - ra_row_begin_diag] += r_entry * a_entry;
}
} // loop over entries in row i1 of A_diag
} // loop over entries in row ic of R_diag
/*--------------------------------------------------------------------
* Loop over entries in row ic of RA_offd.
*--------------------------------------------------------------------*/
for (jj1 = ra_row_begin_offd; jj1 < RA_offd.num_nonzeros; jj1++)
{
i1 = RA_offd.j[jj1 - ra_row_begin_offd];
r_a_product = RA_offd.data[jj1 - ra_row_begin_offd];
/*-----------------------------------------------------------
* Loop over entries in row i1 of P_ext.
*-----------------------------------------------------------*/
for (jj2 = P_ext_diag_i[i1]; jj2 < P_ext_diag_i[i1 + 1]; jj2++)
{
i2 = P_ext_diag_j[jj2];
HYPRE_Real p_entry = P_ext_diag_data[jj2];
HYPRE_Int marker = P_marker[i2];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i2} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (marker < jj_row_begin_diag)
{
P_marker[i2] = jj_count_diag;
RAP_diag_data[jj_count_diag] = r_a_product * p_entry;
RAP_diag_j[jj_count_diag] = i2;
jj_count_diag++;
}
else
{
RAP_diag_data[marker] += r_a_product * p_entry;
}
}
for (jj2 = P_ext_offd_i[i1]; jj2 < P_ext_offd_i[i1 + 1]; jj2++)
{
i2 = map_Pext_to_RAP[P_ext_offd_j[jj2]] + num_cols_diag_P;
HYPRE_Real p_entry = P_ext_offd_data[jj2];
HYPRE_Int marker = P_marker[i2];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i2} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (marker < jj_row_begin_offd)
{
P_marker[i2] = jj_count_offd;
RAP_offd_data[jj_count_offd] = r_a_product * p_entry;
RAP_offd_j[jj_count_offd] = i2 - num_cols_diag_P;
jj_count_offd++;
}
else
{
RAP_offd_data[marker] += r_a_product * p_entry;
}
}
} // loop over entries in row ic of RA_offd
/*--------------------------------------------------------------------
* Loop over entries in row ic of RA_diag.
*--------------------------------------------------------------------*/
for (jj1 = ra_row_begin_diag; jj1 < RA_diag.num_nonzeros; jj1++)
{
HYPRE_Int i1 = RA_diag.j[jj1 - ra_row_begin_diag];
HYPRE_Real r_a_product = RA_diag.data[jj1 - ra_row_begin_diag];
/*-----------------------------------------------------------------
* Loop over entries in row i1 of P_diag.
*-----------------------------------------------------------------*/
for (jj2 = P_diag_i[i1]; jj2 < P_diag_i[i1 + 1]; jj2++)
{
i2 = P_diag_j[jj2];
HYPRE_Real p_entry = P_diag_data[jj2];
HYPRE_Int marker = P_marker[i2];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i2} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (marker < jj_row_begin_diag)
{
P_marker[i2] = jj_count_diag;
RAP_diag_data[jj_count_diag] = r_a_product * p_entry;
RAP_diag_j[jj_count_diag] = i2;
jj_count_diag++;
}
else
{
RAP_diag_data[marker] += r_a_product * p_entry;
}
}
if (num_cols_offd_P)
{
for (jj2 = P_offd_i[i1]; jj2 < P_offd_i[i1 + 1]; jj2++)
{
i2 = map_P_to_RAP[P_offd_j[jj2]] + num_cols_diag_P;
HYPRE_Real p_entry = P_offd_data[jj2];
HYPRE_Int marker = P_marker[i2];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i2} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (marker < jj_row_begin_offd)
{
P_marker[i2] = jj_count_offd;
RAP_offd_data[jj_count_offd] = r_a_product * p_entry;
RAP_offd_j[jj_count_offd] = i2 - num_cols_diag_P;
jj_count_offd++;
}
else
{
RAP_offd_data[marker] += r_a_product * p_entry;
}
}
} // num_cols_offd_P
} // loop over entries in row ic of RA_diag.
} // Loop over interior c-points.
hypre_TFree(P_mark_array[ii], HYPRE_MEMORY_HOST);
hypre_TFree(A_mark_array[ii], HYPRE_MEMORY_HOST);
} // omp parallel for
/* check if really all off-diagonal entries occurring in col_map_offd_RAP
are represented and eliminate if necessary */
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_offd_RAP, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols_offd_RAP; i++)
{
P_marker[i] = -1;
}
jj_count_offd = 0;
#ifdef HYPRE_USING_ATOMIC
#pragma omp parallel for private(i3) reduction(+:jj_count_offd) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < RAP_offd_size; i++)
{
i3 = RAP_offd_j[i];
#ifdef HYPRE_USING_ATOMIC
if (hypre_compare_and_swap(P_marker + i3, -1, 0) == -1)
{
jj_count_offd++;
}
#else
if (P_marker[i3])
{
P_marker[i3] = 0;
jj_count_offd++;
}
#endif
}
if (jj_count_offd < num_cols_offd_RAP)
{
new_col_map_offd_RAP = hypre_CTAlloc(HYPRE_BigInt, jj_count_offd, HYPRE_MEMORY_HOST);
jj_counter = 0;
for (i = 0; i < num_cols_offd_RAP; i++)
if (!P_marker[i])
{
P_marker[i] = jj_counter;
new_col_map_offd_RAP[jj_counter++] = col_map_offd_RAP[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i3) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < RAP_offd_size; i++)
{
i3 = RAP_offd_j[i];
RAP_offd_j[i] = P_marker[i3];
}
num_cols_offd_RAP = jj_count_offd;
hypre_TFree(col_map_offd_RAP, HYPRE_MEMORY_HOST);
col_map_offd_RAP = new_col_map_offd_RAP;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
RAP = hypre_ParCSRMatrixCreate(comm, n_coarse_RT, n_coarse,
RT_partitioning, coarse_partitioning,
num_cols_offd_RAP, RAP_diag_size,
RAP_offd_size);
RAP_diag = hypre_ParCSRMatrixDiag(RAP);
hypre_CSRMatrixI(RAP_diag) = RAP_diag_i;
if (RAP_diag_size)
{
hypre_CSRMatrixData(RAP_diag) = RAP_diag_data;
hypre_CSRMatrixJ(RAP_diag) = RAP_diag_j;
}
RAP_offd = hypre_ParCSRMatrixOffd(RAP);
hypre_CSRMatrixI(RAP_offd) = RAP_offd_i;
if (num_cols_offd_RAP)
{
hypre_CSRMatrixData(RAP_offd) = RAP_offd_data;
hypre_CSRMatrixJ(RAP_offd) = RAP_offd_j;
hypre_ParCSRMatrixColMapOffd(RAP) = col_map_offd_RAP;
}
if (num_procs > 1)
{
/* hypre_GenerateRAPCommPkg(RAP, A); */
hypre_MatvecCommPkgCreate(RAP);
}
*RAP_ptr = RAP;
/*-----------------------------------------------------------------------
* Free R, P_ext and marker arrays.
*-----------------------------------------------------------------------*/
if (keepTranspose)
{
hypre_ParCSRMatrixDiagT(RT) = R_diag;
}
else
{
hypre_CSRMatrixDestroy(R_diag);
}
R_diag = NULL;
if (num_cols_offd_RT)
{
if (keepTranspose)
{
hypre_ParCSRMatrixOffdT(RT) = R_offd;
}
else
{
hypre_CSRMatrixDestroy(R_offd);
}
R_offd = NULL;
}
if (num_sends_RT || num_recvs_RT)
{
hypre_CSRMatrixDestroy(RAP_ext);
RAP_ext = NULL;
}
hypre_TFree(P_mark_array, HYPRE_MEMORY_HOST);
hypre_TFree(A_mark_array, HYPRE_MEMORY_HOST);
hypre_TFree(P_ext_diag_i, HYPRE_MEMORY_HOST);
hypre_TFree(P_ext_offd_i, HYPRE_MEMORY_HOST);
hypre_TFree(jj_cnt_diag, HYPRE_MEMORY_HOST);
hypre_TFree(jj_cnt_offd, HYPRE_MEMORY_HOST);
if (num_cols_offd_P)
{
hypre_TFree(map_P_to_Pext, HYPRE_MEMORY_HOST);
hypre_TFree(map_P_to_RAP, HYPRE_MEMORY_HOST);
}
if (num_cols_offd_Pext)
{
hypre_TFree(col_map_offd_Pext, HYPRE_MEMORY_HOST);
hypre_TFree(map_Pext_to_RAP, HYPRE_MEMORY_HOST);
}
if (P_ext_diag_size)
{
hypre_TFree(P_ext_diag_data, HYPRE_MEMORY_HOST);
hypre_TFree(P_ext_diag_j, HYPRE_MEMORY_HOST);
}
if (P_ext_offd_size)
{
hypre_TFree(P_ext_offd_data, HYPRE_MEMORY_HOST);
hypre_TFree(P_ext_offd_j, HYPRE_MEMORY_HOST);
}
hypre_TFree(RA_diag_data_array, HYPRE_MEMORY_HOST);
hypre_TFree(RA_diag_j_array, HYPRE_MEMORY_HOST);
if (num_cols_offd_A)
{
hypre_TFree(RA_offd_data_array, HYPRE_MEMORY_HOST);
hypre_TFree(RA_offd_j_array, HYPRE_MEMORY_HOST);
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (send_map_elmts_RT_inverse_map_initialized)
{
hypre_UnorderedIntMapDestroy(&send_map_elmts_RT_inverse_map);
}
hypre_TFree(send_map_elmts_starts_RT_aggregated, HYPRE_MEMORY_HOST);
hypre_TFree(send_map_elmts_RT_aggregated, HYPRE_MEMORY_HOST);
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RAP] += hypre_MPI_Wtime();
#endif
return (0);
}
|
if-clause-Modificado.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main(int argc, char **argv)
{
int i, n=20, tid,num_threads;
int a[n],suma=0,sumalocal;
if(argc < 3) {
fprintf(stderr,"[ERROR]-Falta iteraciones\n");
exit(-1);
}
n = atoi(argv[1]); if (n>20) n=20;
num_threads = atoi(argv[2]);
for (i=0; i<n; i++) {
a[i] = i;
}
#pragma omp parallel if(n>4) default(none) \
private(sumalocal,tid) shared(a,suma,n) num_threads(num_threads)
{
sumalocal=0;
tid=omp_get_thread_num();
#pragma omp for private(i) schedule(static) nowait
for (i=0; i<n; i++)
{
sumalocal += a[i];
printf(" thread %d suma de a[%d]=%d sumalocal=%d \n",
tid,i,a[i],sumalocal);
}
#pragma omp atomic
suma += sumalocal;
#pragma omp barrier
#pragma omp master
printf("thread master=%d imprime suma=%d\n",tid,suma);
}
}
|
declare-variant-5.c | /* { dg-do compile { target i?86-*-* x86_64-*-* } } */
/* { dg-additional-options "-mavx2" } */
typedef float __v4sf __attribute__((vector_size (16)));
typedef int __v4si __attribute__((vector_size (16)));
typedef float __v8sf __attribute__((vector_size (32)));
typedef int __v8si __attribute__((vector_size (32)));
__v4si f1 (__v4sf, __v4sf, float *);
__v8si f2 (__v8sf, __v8sf, float *);
__v4si f3 (__v4si, int, __v4si);
#pragma omp declare variant (f1) match (construct={parallel,for,simd(simdlen(4),notinbranch,uniform(z),aligned(z:4 * sizeof (*z)))})
#pragma omp declare variant (f2) match (construct={for,simd(uniform(z),simdlen(8),notinbranch)})
int f4 (float x, float y, float *z);
#pragma omp declare variant (f3) match (construct={simd(simdlen(4),inbranch,linear(y:1))})
int f5 (int x, int y);
void
test (int *x, float *y, float *z, float *w)
{
#pragma omp parallel
#pragma omp for simd aligned (w:4 * sizeof (float))
for (int i = 0; i < 1024; i++)
x[i] = f4 (y[i], z[i], w);
#pragma omp parallel for simd aligned (w:4 * sizeof (float)) simdlen(4)
for (int i = 1024; i < 2048; i++)
x[i] = f4 (y[i], z[i], w);
#pragma omp simd aligned (w:4 * sizeof (float))
for (int i = 2048; i < 4096; i++)
x[i] = f4 (y[i], z[i], w);
#pragma omp simd
for (int i = 4096; i < 8192; i++)
if (x[i] > 10)
x[i] = f5 (x[i], i);
}
|
displacement_residual_contact_criteria.h | // KRATOS ___| | | |
// \___ \ __| __| | | __| __| | | __| _` | |
// | | | | | ( | | | | ( | |
// _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS
//
// License: BSD License
// license: StructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_DISPLACEMENT_RESIDUAL_CONTACT_CRITERIA_H)
#define KRATOS_DISPLACEMENT_RESIDUAL_CONTACT_CRITERIA_H
/* System includes */
/* External includes */
/* Project includes */
#include "utilities/table_stream_utility.h"
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
#include "utilities/color_utilities.h"
namespace Kratos
{
///@addtogroup ContactStructuralMechanicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@name Kratos Classes
///@{
/**
* @class DisplacementResidualContactCriteria
* @ingroup ContactStructuralMechanicsApplication
* @brief Convergence criteria for contact problems
* This class implements a convergence control based on nodal displacement (for penalty contact)
* @author Vicente Mataix Ferrandiz
*/
template< class TSparseSpace,
class TDenseSpace >
class DisplacementResidualContactCriteria
: public ConvergenceCriteria< TSparseSpace, TDenseSpace >
{
public:
///@name Type Definitions
///@{
/// Pointer definition of DisplacementResidualContactCriteria
KRATOS_CLASS_POINTER_DEFINITION( DisplacementResidualContactCriteria );
/// Local Flags
KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT );
KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED );
KRATOS_DEFINE_LOCAL_FLAG( ROTATION_DOF_IS_CONSIDERED );
KRATOS_DEFINE_LOCAL_FLAG( INITIAL_RESIDUAL_IS_SET );
/// The base class definition (and it subclasses)
typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
/// The sparse space used
typedef TSparseSpace SparseSpaceType;
/// The r_table stream definition TODO: Replace by logger
typedef TableStreamUtility::Pointer TablePrinterPointerType;
/// The index type definition
typedef std::size_t IndexType;
/// The key type definition
typedef std::size_t KeyType;
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor
* @param DispRatioTolerance Relative tolerance for displacement residual error
* @param DispAbsTolerance Absolute tolerance for displacement residual error
* @param RotRatioTolerance Relative tolerance for rotation residual error
* @param RotAbsTolerance Absolute tolerance for rotation residual error
* @param pTable The pointer to the output r_table
* @param PrintingOutput If the output is going to be printed in a txt file
*/
explicit DisplacementResidualContactCriteria(
const TDataType DispRatioTolerance,
const TDataType DispAbsTolerance,
const TDataType RotRatioTolerance,
const TDataType RotAbsTolerance,
const bool PrintingOutput = false
)
: BaseType()
{
// Set local flags
mOptions.Set(DisplacementResidualContactCriteria::PRINTING_OUTPUT, PrintingOutput);
mOptions.Set(DisplacementResidualContactCriteria::TABLE_IS_INITIALIZED, false);
mOptions.Set(DisplacementResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED, false);
mOptions.Set(DisplacementResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false);
// The displacement residual
mDispRatioTolerance = DispRatioTolerance;
mDispAbsTolerance = DispAbsTolerance;
// The rotation residual
mRotRatioTolerance = RotRatioTolerance;
mRotAbsTolerance = RotAbsTolerance;
}
/**
* @brief Default constructor (parameters)
* @param ThisParameters The configuration parameters
*/
explicit DisplacementResidualContactCriteria( Parameters ThisParameters = Parameters(R"({})"))
: BaseType()
{
// Validate and assign defaults
ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters());
this->AssignSettings(ThisParameters);
}
//* Copy constructor.
DisplacementResidualContactCriteria( DisplacementResidualContactCriteria const& rOther )
:BaseType(rOther)
,mOptions(rOther.mOptions)
,mDispRatioTolerance(rOther.mDispRatioTolerance)
,mDispAbsTolerance(rOther.mDispAbsTolerance)
,mDispInitialResidualNorm(rOther.mDispInitialResidualNorm)
,mDispCurrentResidualNorm(rOther.mDispCurrentResidualNorm)
,mRotRatioTolerance(rOther.mRotRatioTolerance)
,mRotAbsTolerance(rOther.mRotAbsTolerance)
,mRotInitialResidualNorm(rOther.mRotInitialResidualNorm)
,mRotCurrentResidualNorm(rOther.mRotCurrentResidualNorm)
{
}
/// Destructor.
~DisplacementResidualContactCriteria() override = default;
///@}
///@name Operators
///@{
/**
* @brief Compute relative and absolute error.
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
* @return true if convergence is achieved, false otherwise
*/
bool PostCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
if (SparseSpaceType::Size(rb) != 0) { //if we are solving for something
// Initialize
TDataType disp_residual_solution_norm = 0.0;
IndexType disp_dof_num(0);
TDataType rot_residual_solution_norm = 0.0;
IndexType rot_dof_num(0);
// First iterator
const auto it_dof_begin = rDofSet.begin();
// Auxiliar values
std::size_t dof_id = 0;
TDataType residual_dof_value = 0.0;
// Auxiliar displacement DoF check
const std::function<bool(const VariableData&)> check_without_rot =
[](const VariableData& rCurrVar) -> bool {return true;};
const std::function<bool(const VariableData&)> check_with_rot =
[](const VariableData& rCurrVar) -> bool {return ((rCurrVar == DISPLACEMENT_X) || (rCurrVar == DISPLACEMENT_Y) || (rCurrVar == DISPLACEMENT_Z));};
const auto* p_check_disp = (mOptions.Is(DisplacementResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) ? &check_with_rot : &check_without_rot;
// Loop over Dofs
#pragma omp parallel for reduction(+:disp_residual_solution_norm,disp_dof_num,rot_residual_solution_norm,rot_dof_num,dof_id,residual_dof_value)
for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) {
auto it_dof = it_dof_begin + i;
if (it_dof->IsFree()) {
dof_id = it_dof->EquationId();
residual_dof_value = rb[dof_id];
const auto& r_curr_var = it_dof->GetVariable();
if ((*p_check_disp)(r_curr_var)) {
disp_residual_solution_norm += std::pow(residual_dof_value, 2);
++disp_dof_num;
} else { // We will assume is rotation dof
KRATOS_DEBUG_ERROR_IF_NOT((r_curr_var == ROTATION_X) || (r_curr_var == ROTATION_Y) || (r_curr_var == ROTATION_Z)) << "Variable must be a ROTATION and it is: " << r_curr_var.Name() << std::endl;
rot_residual_solution_norm += std::pow(residual_dof_value, 2);
++rot_dof_num;
}
}
}
mDispCurrentResidualNorm = disp_residual_solution_norm;
mRotCurrentResidualNorm = rot_residual_solution_norm;
TDataType residual_disp_ratio = 1.0;
TDataType residual_rot_ratio = 1.0;
// We initialize the solution
if (mOptions.IsNot(DisplacementResidualContactCriteria::INITIAL_RESIDUAL_IS_SET)) {
mDispInitialResidualNorm = (disp_residual_solution_norm == 0.0) ? 1.0 : disp_residual_solution_norm;
residual_disp_ratio = 1.0;
if (mOptions.Is(DisplacementResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
mRotInitialResidualNorm = (rot_residual_solution_norm == 0.0) ? 1.0 : rot_residual_solution_norm;
residual_rot_ratio = 1.0;
}
mOptions.Set(DisplacementResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, true);
}
// We calculate the ratio of the displacements
residual_disp_ratio = mDispCurrentResidualNorm/mDispInitialResidualNorm;
residual_rot_ratio = mRotCurrentResidualNorm/mRotInitialResidualNorm;
// We calculate the absolute norms
const TDataType residual_disp_abs = mDispCurrentResidualNorm/disp_dof_num;
const TDataType residual_rot_abs = mRotCurrentResidualNorm/rot_dof_num;
// The process info of the model part
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// We print the results // TODO: Replace for the new log
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
std::cout.precision(4);
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.Is(DisplacementResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
r_table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance << residual_rot_ratio << mRotRatioTolerance << residual_rot_abs << mRotAbsTolerance;
} else {
r_table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance;
}
} else {
std::cout.precision(4);
if (mOptions.IsNot(DisplacementResidualContactCriteria::PRINTING_OUTPUT)) {
KRATOS_INFO("DisplacementResidualContactCriteria") << BOLDFONT("RESIDUAL CONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific;
KRATOS_INFO("DisplacementResidualContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << residual_disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << residual_disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl;
if (mOptions.Is(DisplacementResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
KRATOS_INFO("DisplacementResidualContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << residual_rot_ratio << BOLDFONT(" EXP.RATIO = ") << mRotRatioTolerance << BOLDFONT(" ABS = ") << residual_rot_abs << BOLDFONT(" EXP.ABS = ") << mRotAbsTolerance << std::endl;
}
} else {
KRATOS_INFO("DisplacementResidualContactCriteria") << "RESIDUAL CONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific;
KRATOS_INFO("DisplacementResidualContactCriteria") << "\tDISPLACEMENT: RATIO = " << residual_disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << residual_disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl;
if (mOptions.Is(DisplacementResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
KRATOS_INFO("DisplacementResidualContactCriteria") << "\tDISPLACEMENT: RATIO = " << residual_rot_ratio << " EXP.RATIO = " << mRotRatioTolerance << " ABS = " << residual_rot_abs << " EXP.ABS = " << mRotAbsTolerance << std::endl;
}
}
}
}
r_process_info[CONVERGENCE_RATIO] = residual_disp_ratio;
r_process_info[RESIDUAL_NORM] = residual_disp_abs;
// We check if converged
const bool disp_converged = (residual_disp_ratio <= mDispRatioTolerance || residual_disp_abs <= mDispAbsTolerance);
const bool rot_converged = (mOptions.Is(DisplacementResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) ? (residual_rot_ratio <= mRotRatioTolerance || residual_rot_abs <= mRotAbsTolerance) : true;
if (disp_converged && rot_converged) {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.IsNot(DisplacementResidualContactCriteria::PRINTING_OUTPUT))
r_table << BOLDFONT(FGRN(" Achieved"));
else
r_table << "Achieved";
} else {
if (mOptions.IsNot(DisplacementResidualContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementResidualContactCriteria") << BOLDFONT("\tResidual") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl;
else
KRATOS_INFO("DisplacementResidualContactCriteria") << "\tResidual convergence is achieved" << std::endl;
}
}
return true;
} else {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.IsNot(DisplacementResidualContactCriteria::PRINTING_OUTPUT))
r_table << BOLDFONT(FRED(" Not achieved"));
else
r_table << "Not achieved";
} else {
if (mOptions.IsNot(DisplacementResidualContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementResidualContactCriteria") << BOLDFONT("\tResidual") << " convergence is " << BOLDFONT(FRED(" not achieved")) << std::endl;
else
KRATOS_INFO("DisplacementResidualContactCriteria") << "\tResidual convergence is not achieved" << std::endl;
}
}
return false;
}
} else // In this case all the displacements are imposed!
return true;
}
/**
* @brief This function initialize the convergence criteria
* @param rModelPart Reference to the ModelPart containing the contact problem. (unused)
*/
void Initialize( ModelPart& rModelPart) override
{
// Initialize
BaseType::mConvergenceCriteriaIsInitialized = true;
// Check rotation dof
mOptions.Set(DisplacementResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED, ContactUtilities::CheckModelPartHasRotationDoF(rModelPart));
// Initialize header
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementResidualContactCriteria::TABLE_IS_INITIALIZED)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
r_table.AddColumn("DP RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
if (mOptions.Is(DisplacementResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
r_table.AddColumn("RT RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
}
r_table.AddColumn("CONVERGENCE", 15);
mOptions.Set(DisplacementResidualContactCriteria::TABLE_IS_INITIALIZED, true);
}
}
/**
* @brief This function initializes the solution step
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
*/
void InitializeSolutionStep(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
mOptions.Set(DisplacementResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false);
}
/**
* @brief This method provides the defaults parameters to avoid conflicts between the different constructors
* @return The default parameters
*/
Parameters GetDefaultParameters() const override
{
Parameters default_parameters = Parameters(R"(
{
"name" : "displacement_residual_contact_criteria",
"ensure_contact" : false,
"print_convergence_criterion" : false,
"residual_relative_tolerance" : 1.0e-4,
"residual_absolute_tolerance" : 1.0e-9,
"rotation_residual_relative_tolerance" : 1.0e-4,
"rotation_residual_absolute_tolerance" : 1.0e-9
})");
// Getting base class default parameters
const Parameters base_default_parameters = BaseType::GetDefaultParameters();
default_parameters.RecursivelyAddMissingParameters(base_default_parameters);
return default_parameters;
}
/**
* @brief Returns the name of the class as used in the settings (snake_case format)
* @return The name of the class
*/
static std::string Name()
{
return "displacement_residual_contact_criteria";
}
///@}
///@name Operations
///@{
///@}
///@name Acces
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief This method assigns settings to member variables
* @param ThisParameters Parameters that are assigned to the member variables
*/
void AssignSettings(const Parameters ThisParameters) override
{
BaseType::AssignSettings(ThisParameters);
// The displacement residual
mDispRatioTolerance = ThisParameters["residual_relative_tolerance"].GetDouble();
mDispAbsTolerance = ThisParameters["residual_absolute_tolerance"].GetDouble();
// The rotation residual
mRotRatioTolerance = ThisParameters["rotation_residual_relative_tolerance"].GetDouble();
mRotAbsTolerance = ThisParameters["rotation_residual_absolute_tolerance"].GetDouble();
// Set local flags
mOptions.Set(DisplacementResidualContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool());
mOptions.Set(DisplacementResidualContactCriteria::TABLE_IS_INITIALIZED, false);
mOptions.Set(DisplacementResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED, false);
mOptions.Set(DisplacementResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false);
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
Flags mOptions; /// Local flags
TDataType mDispRatioTolerance; /// The ratio threshold for the norm of the displacement residual
TDataType mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement residual
TDataType mDispInitialResidualNorm; /// The reference norm of the displacement residual
TDataType mDispCurrentResidualNorm; /// The current norm of the displacement residual
TDataType mRotRatioTolerance; /// The ratio threshold for the norm of the rotation residual
TDataType mRotAbsTolerance; /// The absolute value threshold for the norm of the rotation residual
TDataType mRotInitialResidualNorm; /// The reference norm of the rotation residual
TDataType mRotCurrentResidualNorm; /// The current norm of the rotation residual
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Unaccessible methods
///@{
///@}
}; // Kratos DisplacementResidualContactCriteria
///@name Local flags creation
///@{
/// Local Flags
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementResidualContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementResidualContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementResidualContactCriteria<TSparseSpace, TDenseSpace>::ROTATION_DOF_IS_CONSIDERED(Kratos::Flags::Create(3));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementResidualContactCriteria<TSparseSpace, TDenseSpace>::INITIAL_RESIDUAL_IS_SET(Kratos::Flags::Create(4));
}
#endif /* KRATOS_DISPLACEMENT_RESIDUAL_CONTACT_CRITERIA_H */
|
serial_tree_learner.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
#define LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
#include <LightGBM/dataset.h>
#include <LightGBM/tree.h>
#include <LightGBM/tree_learner.h>
#include <LightGBM/cuda/vector_cudahost.h>
#include <LightGBM/utils/array_args.h>
#include <LightGBM/utils/json11.h>
#include <LightGBM/utils/random.h>
#include <string>
#include <cmath>
#include <cstdio>
#include <memory>
#include <random>
#include <vector>
#include "col_sampler.hpp"
#include "data_partition.hpp"
#include "feature_histogram.hpp"
#include "leaf_splits.hpp"
#include "monotone_constraints.hpp"
#include "split_info.hpp"
#ifdef USE_GPU
// Use 4KBytes aligned allocator for ordered gradients and ordered hessians when GPU is enabled.
// This is necessary to pin the two arrays in memory and make transferring faster.
#include <boost/align/aligned_allocator.hpp>
#endif
namespace LightGBM {
using json11::Json;
/*! \brief forward declaration */
class CostEfficientGradientBoosting;
/*!
* \brief Used for learning a tree by single machine
*/
class SerialTreeLearner: public TreeLearner {
public:
friend CostEfficientGradientBoosting;
explicit SerialTreeLearner(const Config* config);
~SerialTreeLearner();
void Init(const Dataset* train_data, bool is_constant_hessian) override;
void ResetTrainingData(const Dataset* train_data,
bool is_constant_hessian) override {
ResetTrainingDataInner(train_data, is_constant_hessian, true);
}
void ResetIsConstantHessian(bool is_constant_hessian) override {
share_state_->is_constant_hessian = is_constant_hessian;
}
virtual void ResetTrainingDataInner(const Dataset* train_data,
bool is_constant_hessian,
bool reset_multi_val_bin);
void ResetConfig(const Config* config) override;
inline void SetForcedSplit(const Json* forced_split_json) override {
if (forced_split_json != nullptr && !forced_split_json->is_null()) {
forced_split_json_ = forced_split_json;
} else {
forced_split_json_ = nullptr;
}
}
Tree* Train(const score_t* gradients, const score_t *hessians) override;
Tree* FitByExistingTree(const Tree* old_tree, const score_t* gradients, const score_t* hessians) const override;
Tree* FitByExistingTree(const Tree* old_tree, const std::vector<int>& leaf_pred,
const score_t* gradients, const score_t* hessians) override;
void SetBaggingData(const Dataset* subset, const data_size_t* used_indices, data_size_t num_data) override {
if (subset == nullptr) {
data_partition_->SetUsedDataIndices(used_indices, num_data);
share_state_->SetUseSubrow(false);
} else {
ResetTrainingDataInner(subset, share_state_->is_constant_hessian, false);
share_state_->SetUseSubrow(true);
share_state_->SetSubrowCopied(false);
share_state_->bagging_use_indices = used_indices;
share_state_->bagging_indices_cnt = num_data;
}
}
void AddPredictionToScore(const Tree* tree,
double* out_score) const override {
if (tree->num_leaves() <= 1) {
return;
}
CHECK_LE(tree->num_leaves(), data_partition_->num_leaves());
#pragma omp parallel for schedule(static, 1)
for (int i = 0; i < tree->num_leaves(); ++i) {
double output = static_cast<double>(tree->LeafOutput(i));
data_size_t cnt_leaf_data = 0;
auto tmp_idx = data_partition_->GetIndexOnLeaf(i, &cnt_leaf_data);
for (data_size_t j = 0; j < cnt_leaf_data; ++j) {
out_score[tmp_idx[j]] += output;
}
}
}
void RenewTreeOutput(Tree* tree, const ObjectiveFunction* obj, std::function<double(const label_t*, int)> residual_getter,
data_size_t total_num_data, const data_size_t* bag_indices, data_size_t bag_cnt) const override;
/*! \brief Get output of parent node, used for path smoothing */
double GetParentOutput(const Tree* tree, const LeafSplits* leaf_splits) const;
protected:
void ComputeBestSplitForFeature(FeatureHistogram* histogram_array_,
int feature_index, int real_fidx,
int8_t is_feature_used, int num_data,
const LeafSplits* leaf_splits,
SplitInfo* best_split, double parent_output);
void GetShareStates(const Dataset* dataset, bool is_constant_hessian, bool is_first_time);
void RecomputeBestSplitForLeaf(int leaf, SplitInfo* split);
/*!
* \brief Some initial works before training
*/
virtual void BeforeTrain();
/*!
* \brief Some initial works before FindBestSplit
*/
virtual bool BeforeFindBestSplit(const Tree* tree, int left_leaf, int right_leaf);
virtual void FindBestSplits(const Tree* tree);
virtual void ConstructHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract);
virtual void FindBestSplitsFromHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract, const Tree*);
/*!
* \brief Partition tree and data according best split.
* \param tree Current tree, will be splitted on this function.
* \param best_leaf The index of leaf that will be splitted.
* \param left_leaf The index of left leaf after splitted.
* \param right_leaf The index of right leaf after splitted.
*/
inline virtual void Split(Tree* tree, int best_leaf, int* left_leaf,
int* right_leaf) {
SplitInner(tree, best_leaf, left_leaf, right_leaf, true);
}
void SplitInner(Tree* tree, int best_leaf, int* left_leaf, int* right_leaf,
bool update_cnt);
/* Force splits with forced_split_json dict and then return num splits forced.*/
int32_t ForceSplits(Tree* tree, int* left_leaf, int* right_leaf,
int* cur_depth);
/*!
* \brief Get the number of data in a leaf
* \param leaf_idx The index of leaf
* \return The number of data in the leaf_idx leaf
*/
inline virtual data_size_t GetGlobalDataCountInLeaf(int leaf_idx) const;
/*! \brief number of data */
data_size_t num_data_;
/*! \brief number of features */
int num_features_;
/*! \brief training data */
const Dataset* train_data_;
/*! \brief gradients of current iteration */
const score_t* gradients_;
/*! \brief hessians of current iteration */
const score_t* hessians_;
/*! \brief training data partition on leaves */
std::unique_ptr<DataPartition> data_partition_;
/*! \brief pointer to histograms array of parent of current leaves */
FeatureHistogram* parent_leaf_histogram_array_;
/*! \brief pointer to histograms array of smaller leaf */
FeatureHistogram* smaller_leaf_histogram_array_;
/*! \brief pointer to histograms array of larger leaf */
FeatureHistogram* larger_leaf_histogram_array_;
/*! \brief store best split points for all leaves */
std::vector<SplitInfo> best_split_per_leaf_;
/*! \brief store best split per feature for all leaves */
std::vector<SplitInfo> splits_per_leaf_;
/*! \brief stores minimum and maximum constraints for each leaf */
std::unique_ptr<LeafConstraintsBase> constraints_;
/*! \brief stores best thresholds for all feature for smaller leaf */
std::unique_ptr<LeafSplits> smaller_leaf_splits_;
/*! \brief stores best thresholds for all feature for larger leaf */
std::unique_ptr<LeafSplits> larger_leaf_splits_;
#ifdef USE_GPU
/*! \brief gradients of current iteration, ordered for cache optimized, aligned to 4K page */
std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_gradients_;
/*! \brief hessians of current iteration, ordered for cache optimized, aligned to 4K page */
std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_hessians_;
#elif USE_CUDA
/*! \brief gradients of current iteration, ordered for cache optimized */
std::vector<score_t, CHAllocator<score_t>> ordered_gradients_;
/*! \brief hessians of current iteration, ordered for cache optimized */
std::vector<score_t, CHAllocator<score_t>> ordered_hessians_;
#else
/*! \brief gradients of current iteration, ordered for cache optimized */
std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> ordered_gradients_;
/*! \brief hessians of current iteration, ordered for cache optimized */
std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> ordered_hessians_;
#endif
/*! \brief used to cache historical histogram to speed up*/
HistogramPool histogram_pool_;
/*! \brief config of tree learner*/
const Config* config_;
ColSampler col_sampler_;
const Json* forced_split_json_;
std::unique_ptr<TrainingShareStates> share_state_;
std::unique_ptr<CostEfficientGradientBoosting> cegb_;
};
inline data_size_t SerialTreeLearner::GetGlobalDataCountInLeaf(int leaf_idx) const {
if (leaf_idx >= 0) {
return data_partition_->leaf_count(leaf_idx);
} else {
return 0;
}
}
} // namespace LightGBM
#endif // LightGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
|
mm.c | #include <stdio.h>
#include <stdlib.h>
/*
TEMPO SEQUENCIAL PERF:
56,306335417 seconds time elapsed
TEMPO PARALELO PERF:
12,394167292 seconds time elapsed
SPEED UP = 4,5
*/
void mm(double* a, double* b, double* c, int width)
{
#pragma omp parallel for
for (int i = 0; i < width; i++) {
for (int j = 0; j < width; j++) {
double sum = 0;
for (int k = 0; k < width; k++) {
double x = a[i * width + k];
double y = b[k * width + j];
sum += x * y;
}
c[i * width + j] = sum;
}
}
}
int main()
{
int width = 2000;
double *a = (double*) malloc (width * width * sizeof(double));
double *b = (double*) malloc (width * width * sizeof(double));
double *c = (double*) malloc (width * width * sizeof(double));
#pragma omp parallel for
for(int i = 0; i < width; i++) {
for(int j = 0; j < width; j++) {
a[i*width+j] = i;
b[i*width+j] = j;
c[i*width+j] = 0;
}
}
mm(a,b,c,width);
// for(int i = 0; i < width; i++) {
// for(int j = 0; j < width; j++) {
// printf("\n c[%d][%d] = %f",i,j,c[i*width+j]);
// }
// }
}
|
GB_unop__identity_uint64_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_uint64_uint16
// op(A') function: GB_unop_tran__identity_uint64_uint16
// C type: uint64_t
// A type: uint16_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint64_t z = (uint64_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint64_t z = (uint64_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_uint64_uint16
(
uint64_t *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint16_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
uint64_t z = (uint64_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
uint64_t z = (uint64_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_uint64_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
linAlgAXPBY.c | /*
The MIT License (MIT)
Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
extern "C"
void FUNC(axpby)(const dlong & N, const dlong & xOffset, const dlong& yOffset, const dfloat & alpha, const dfloat * __restrict__ cpu_a,
const dfloat &beta, dfloat * __restrict__ cpu_b){
#ifdef __NEKRS__OMP__
#pragma omp parallel for
#endif
for(dlong i=0;i<N;++i){
const dfloat ai = cpu_a[i + xOffset];
const dfloat bi = cpu_b[i + yOffset];
cpu_b[i + yOffset] = alpha*ai + beta*bi;
}
}
extern "C"
void FUNC(axpbyMany)(const dlong & N, const dlong & Nfields, const dlong & offset, const dfloat & alpha, const dfloat * __restrict__ cpu_a,
const dfloat & beta, dfloat * __restrict__ cpu_b){
#ifdef __NEKRS__OMP__
#pragma omp parallel for collapse(2)
#endif
for(int fld=0;fld<Nfields;fld++) {
for(dlong i=0;i<N;++i){
const dlong id = i + fld*offset;
const dfloat ai = cpu_a[id];
const dfloat bi = cpu_b[id];
cpu_b[id] = alpha*ai + beta*bi;
}
}
}
|
target_update-2.c | /* { dg-do run } */
#include <stdlib.h>
const int MAX = 1800;
void check (int *a, int *b, int N)
{
int i;
for (i = 0; i < N; i++)
if (a[i] != b[i])
abort ();
}
void init (int *a1, int *a2, int N)
{
int i, s = -1;
for (i = 0; i < N; i++)
{
a1[i] = s;
a2[i] = i;
s = -s;
}
}
int maybe_init_again (int *a, int N)
{
int i;
for (i = 0; i < N; i++)
a[i] = i;
return 1;
}
void vec_mult_ref (int *p, int *v1, int *v2, int N)
{
int i;
init (v1, v2, N);
for (i = 0; i < N; i++)
p[i] = v1[i] * v2[i];
maybe_init_again (v1, N);
maybe_init_again (v2, N);
for (i = 0; i < N; i++)
p[i] = p[i] + (v1[i] * v2[i]);
}
void vec_mult (int *p, int *v1, int *v2, int N)
{
int i;
init (v1, v2, N);
#pragma omp target data map(to: v1[:N], v2[:N]) map(from: p[0:N])
{
int changed;
#pragma omp target
#pragma omp parallel for
for (i = 0; i < N; i++)
p[i] = v1[i] * v2[i];
changed = maybe_init_again (v1, N);
#pragma omp target update if (changed) to(v1[:N])
changed = maybe_init_again (v2, N);
#pragma omp target update if (changed) to(v2[:N])
#pragma omp target
#pragma omp parallel for
for (i = 0; i < N; i++)
p[i] = p[i] + (v1[i] * v2[i]);
}
}
int main ()
{
int *p = (int *) malloc (MAX * sizeof (int));
int *p1 = (int *) malloc (MAX * sizeof (int));
int *v1 = (int *) malloc (MAX * sizeof (int));
int *v2 = (int *) malloc (MAX * sizeof (int));
vec_mult_ref (p, v1, v2, MAX);
vec_mult (p1, v1, v2, MAX);
check (p, p1, MAX);
free (p);
free (p1);
free (v1);
free (v2);
return 0;
}
|
ten_tusscher_2004_RS_CPU_epi_S1.c | // Ten Tusscher version for the Scenario 1 (AP + max:dvdt)
#include <assert.h>
#include <stdlib.h>
#include "ten_tusscher_2004_epi_S1.h"
GET_CELL_MODEL_DATA(init_cell_model_data) {
assert(cell_model);
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
//TODO: this should be called only once for the whole mesh, like in the GPU code
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {
// Default initial condition
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
//real sv_sst[]={-86.7599490237245,0.00123831208622928,0.784376608695859,0.784218467628080,0.000170016808347696,0.487085364989106,0.00290043259117021,0.999998410220405,1.87270147822737e-08,1.84334654710491e-05,0.999776444937499,1.00727320017378,0.999997421410314,4.09813553215966e-05,1.00091265418338,9.36478320062292,139.974256946572};
real sv_sst[]={-86.7787928226268,0.00123339508649700,0.784831144233936,0.784673023102172,0.000169405106163081,0.487281523786458,0.00289654265697758,0.999998418745548,1.86681673058670e-08,1.83872100639159e-05,0.999777546403090,1.00731261455043,0.999997755681027,4.00467125306598e-05,0.953040239833913,9.39175391367938,139.965667493392};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) {
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++) {
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = i;
for (int j = 0; j < num_steps; ++j) {
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current) {
assert(sv);
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) {
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
//real Vmaxup=0.000425f;
real Vmaxup=0.000714016847624717;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
// real Gkr=0.096;
real Gkr=0.129819327185159;
//Parameters for Iks
real pKNa=0.03;
#ifdef EPI
// real Gks=0.245;
real Gks=0.227808856917217;
#endif
#ifdef ENDO
real Gks=0.245;
#endif
#ifdef MCELL
real Gks=0.062;
#endif
//Parameters for Ik1
// real GK1=5.405;
real GK1=3.92366049957936;
//Parameters for Ito
#ifdef EPI
// real Gto=0.294;
real Gto=0.290683783819880;
#endif
#ifdef ENDO
real Gto=0.073;
#endif
#ifdef MCELL
real Gto=0.294;
#endif
//Parameters for INa
//real GNa=14.838;
real GNa=13.4587995801200;
//Parameters for IbNa
// real GbNa=0.00029;
real GbNa=0.000132990931598298;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
// real knak=1.362;
real knak=2.84430638940750;
//Parameters for ICaL
//real GCaL=0.000175;
real GCaL=0.000158212114858015;
//Parameters for IbCa
// real GbCa=0.000592;
real GbCa=0.000706297098320405;
//Parameters for INaCa
//real knaca=1000;
real knaca=1096.43133943582;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
// real GpCa=0.825;
real GpCa=0.390810222439592;
real KpCa=0.0005;
//Parameters for IpK;
// real GpK=0.0146;
real GpK=0.0199551557341385;
// Setting Elnaz's parameters
real parameters [] = {13.7730247891532,0.000208550376791424,0.000166345602997405,0.000314427207496467,0.272150547490643,0.206045798160674,0.134878222351137,2.91860118931279,0.0222099400341836,2.12194476134155,1099.53480175178,0.000604923870766662,0.118384383617544,0.0193733747777405,0.00390066599158743,2.21704721596155e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
// Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrtf(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
GB_builder.c | //------------------------------------------------------------------------------
// GB_builder: build a matrix from tuples
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// CALLED BY: GB_build, GB_wait, GB_transpose, GB_concat_hyper
// This function is called by GB_build to build a matrix T for GrB_Matrix_build
// or GrB_Vector_build, by GB_wait to build a matrix T from the list of pending
// tuples, and by GB_transpose to transpose a matrix or vector. Duplicates can
// appear if called by GB_build or GB_wait, but not GB_transpose.
// The indices are provided either as (I_input,J_input) or (I_work,J_work), not
// both. The values are provided as S_input or S_work, not both. On return,
// the *work arrays are either transplanted into T, or freed, since they are
// temporary workspaces.
// The work is done in major 5 Steps, some of which can be skipped, depending
// on how the tuples are provided (*_work or *_input), and whether or not they
// are sorted, or have duplicates. If vdim <= 1, some work is skipped (for
// GrB_Vectors, and single-vector GrB_Matrices). Let e be the of tuples on
// input. Let p be the # of threads used.
// STEP 1: copy user input. O(e/p) read/write per thread, or skipped.
// STEP 2: sort the tuples. Time: O((e log e)/p), read/write, or skipped if
// the tuples are already sorted.
// STEP 3: count vectors and duplicates. O(e/p) reads, per thread, if no
// duplicates, or skipped if already done. O(e/p) read/writes
// per thread if duplicates appear.
// STEP 4: construct T->h and T->p. O(e/p) reads per thread, or skipped if
// T is a vector.
// STEP 5: assemble the tuples. O(e/p) read/writes per thread, or O(1) if the
// values can be transplanted into T as-is.
// For GrB_Matrix_build: If the input (I_input, J_input, S_input) is already
// sorted with no duplicates, and no typecasting needs to be done, then Step 1
// still must be done (each thread does O(e/p) reads of (I_input,J_input) and
// writes to I_work), but Step 1 also does the work for Step 3. Step 2 and 3
// are skipped. Step 4 does O(e/p) reads per thread (J_input only). Then
// I_work is transplanted into T->i. Step 5 does O(e/p) read/writes per thread
// to copy Sx into T->x.
// For GrB_Vector_build: as GrB_Matrix_build, Step 1 does O(e/p) read/writes
// per thread. The input is always a vector, so vdim == 1 always holds. Step
// 2 is skipped if the indices are already sorted, and Step 3 does no work at
// all unless duplicates appear. Step 4 takes no time, for any vector. Step 5
// does O(e/p) reads/writes per thread.
// For GB_wait: the pending tuples are provided as I_work, J_work, and S_work,
// so Step 1 is skipped (no need to check for invalid indices). The input
// J_work may be null (vdim can be anything, since GB_wait is used for both
// vectors and matrices). The tuples might be in sorted order already, which
// is known precisely known from A->Pending->sorted. Step 2 does
// O((e log e)/p) work to sort the tuples. Duplicates may appear, and
// out-of-order tuples are likely. Step 3 does O(e/p) read/writes. Step 4
// does O(e/p) reads per thread of (I_work,J_work), or just I_work. Step 5
// does O(e/p) read/writes per thread, or O(1) time if S_work can be
// transplanted into T->x.
// For GB_transpose: uses I_work, J_work, and either S_input (if no op applied
// to the values) or S_work (if an op was applied to the A->x values). This is
// only done for matrices, not vectors, so vdim > 1 will always hold. The
// indices are valid so Step 1 is skipped. The tuples are not sorted, so Step
// 2 takes O((e log e)/p) time to do the sort. There are no duplicates, so
// Step 3 only does O(e/p) reads of J_work to count the vectors in each slice.
// Step 4 only does O(e/p) reads of J_work to compute T->h and T->p. Step 5
// does O(e/p) read/writes per thread, but it uses the simpler case in
// GB_reduce_build_template since no duplicates can appear. It is unlikely
// able to transplant S_work into T->x since the input will almost always be
// unsorted.
// For GB_concat_hyper: uses I_work, J_work, and S_work. No duplicates
// appear. Tuples are not sorted on input. I_work is transplanted into C->i.
// J_work and S_work are freed on output. S_work is not transplanted into
// C->x.
// For iso inputs/outputs: T and Sx have the same iso property. If
// they are iso, then dup is always NULL. Duplicates may or may not appear
// if T and Sx are iso.
// (1) GrB_Matrix_build, GrB_Vector_build, and GB_wait do not pass in an iso
// Sx array, where Sx is S_input for GrB*build, and S_work for GB_wait.
// Sx and Tx are not iso. Duplicates may appear. dup is always present
// for GrB*build, but may be either NULL or non-NULL for GB_wait.
// (2) GxB_Matrix_build_Scalar and GxB_Vector_build_Scalar: always construct
// iso matrices. For those methods Sx and Tx are always iso, and no dup
// operator is be passed in (dup is NULL here, which is the implied 2nd
// operator). Duplicates may appear.
// (3) GB_transpose and GB_concat_hyper can pass in Sx as iso or
// non-iso, and always passes in dup as NULL since there are no
// duplicates. Sx and Tx are either both iso, or both non-iso.
// This method always returns T as hypersparse, and T is iso if and only
// if Sx is iso.
#include "GB_build.h"
#include "GB_sort.h"
#include "GB_binop.h"
#ifndef GBCOMPACT
#include "GB_red__include.h"
#endif
#define GB_I_WORK(t) (((t) < 0) ? -1 : I_work [t])
#define GB_J_WORK(t) (((t) < 0) ? -1 : ((J_work == NULL) ? 0 : J_work [t]))
#define GB_K_WORK(t) (((t) < 0) ? -1 : ((K_work == NULL) ? t : K_work [t]))
#define GB_FREE_WORKSPACE \
{ \
GB_WERK_POP (Work, int64_t) ; \
GB_FREE (I_work_handle, *I_work_size_handle) ; \
GB_FREE (J_work_handle, *J_work_size_handle) ; \
GB_FREE (S_work_handle, *S_work_size_handle) ; \
GB_FREE_WORK (&K_work, K_work_size) ; \
}
//------------------------------------------------------------------------------
// GB_builder
//------------------------------------------------------------------------------
GrB_Info GB_builder // build a matrix from tuples
(
GrB_Matrix T, // matrix to build, static or dynamic header
const GrB_Type ttype, // type of output matrix T
const int64_t vlen, // length of each vector of T
const int64_t vdim, // number of vectors in T
const bool is_csc, // true if T is CSC, false if CSR
int64_t **I_work_handle, // for (i,k) or (j,i,k) tuples
size_t *I_work_size_handle,
int64_t **J_work_handle, // for (j,i,k) tuples
size_t *J_work_size_handle,
GB_void **S_work_handle, // array of values of tuples, size ijslen,
// or size 1 if S is iso
size_t *S_work_size_handle,
bool known_sorted, // true if tuples known to be sorted
bool known_no_duplicates, // true if tuples known to not have dupl
int64_t ijslen, // size of I_work and J_work arrays
const bool is_matrix, // true if T a GrB_Matrix, false if vector
const int64_t *restrict I_input,// original indices, size nvals
const int64_t *restrict J_input,// original indices, size nvals
const GB_void *restrict S_input,// array of values of tuples, size nvals,
// or size 1 if S_input or S_work are iso
const bool S_iso, // true if S_input or S_work are iso
const int64_t nvals, // number of tuples, and size of K_work
const GrB_BinaryOp dup, // binary function to assemble duplicates,
// if NULL use the SECOND operator to
// keep the most recent duplicate.
const GrB_Type stype, // the type of S_work or S_input
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (T != NULL) ; // T is a static or dynamic header on input
ASSERT (nvals >= 0) ;
ASSERT_TYPE_OK (ttype, "ttype for builder", GB0) ;
ASSERT_BINARYOP_OK_OR_NULL (dup, "dup for builder", GB0) ;
ASSERT (I_work_handle != NULL) ;
ASSERT (J_work_handle != NULL) ;
ASSERT (S_work_handle != NULL) ;
ASSERT (!GB_OP_IS_POSITIONAL (dup)) ;
ASSERT (I_work_size_handle != NULL) ;
ASSERT (J_work_size_handle != NULL) ;
ASSERT (S_work_size_handle != NULL) ;
//--------------------------------------------------------------------------
// get Sx
//--------------------------------------------------------------------------
GB_void *restrict S_work = (*S_work_handle) ;
const GB_void *restrict Sx = (S_work == NULL) ? S_input : S_work ;
ASSERT (GB_IMPLIES (nvals > 0, Sx != NULL)) ;
ASSERT (GB_IMPLIES (S_iso, ttype == stype)) ;
ASSERT (GB_IMPLIES (S_iso, dup == NULL)) ;
//==========================================================================
// symbolic phase of the build =============================================
//==========================================================================
// The symbolic phase sorts the tuples and finds any duplicates. The
// output matrix T is constructed (not including T->i and T->x), and T->h
// and T->p are computed. Then I_work is transplanted into T->i, or T->i is
// allocated. T->x is then allocated. It is not computed until the
// numeric phase.
// When this function returns, I_work is either freed or transplanted into
// T->i. J_work is freed, and the I_work and J_work pointers (in the
// caller) are set to NULL by setting their handles to NULL. Note that
// J_work may already be NULL on input, if T has one or zero vectors
// (J_work_handle is always non-NULL however).
GrB_Info info ;
int64_t *restrict I_work = (*I_work_handle) ;
int64_t *restrict J_work = (*J_work_handle) ;
int64_t *restrict K_work = NULL ; size_t K_work_size = 0 ;
ASSERT (*J_work_size_handle == GB_Global_memtable_size (J_work)) ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (nvals, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
GB_WERK_DECLARE (Work, int64_t) ;
GB_WERK_PUSH (Work, 5*(nthreads+1), int64_t) ;
if (Work == NULL)
{
// out of memory
GB_FREE_WORKSPACE ;
return (GrB_OUT_OF_MEMORY) ;
}
memset (Work, 0, Work_nitems * sizeof (int64_t)) ;
int64_t *restrict tstart_slice = Work ; // nthreads+1
int64_t *restrict tnvec_slice = Work + (nthreads+1) ; // nthreads+1
int64_t *restrict tnz_slice = Work + 2*(nthreads+1) ; // nthreads+1
int64_t *restrict kbad = Work + 3*(nthreads+1) ; // nthreads
int64_t *restrict ilast_slice = Work + 4*(nthreads+1) ; // nthreads
//--------------------------------------------------------------------------
// partition the tuples for the threads
//--------------------------------------------------------------------------
// Thread tid handles tuples tstart_slice [tid] to tstart_slice [tid+1]-1.
// Each thread handles about the same number of tuples. This partition
// depends only on nvals.
GB_eslice (tstart_slice, nvals, nthreads) ;
// tstart_slice [tid]: first tuple in slice tid
// tnvec_slice [tid]: # of vectors that start in a slice. If a vector
// starts in one slice and ends in another, it is
// counted as being in the first slice.
// tnz_slice [tid]: # of entries in a slice after removing duplicates
// sentinel values for the final cumulative sum
tnvec_slice [nthreads] = 0 ;
tnz_slice [nthreads] = 0 ;
// this becomes true if the first pass computes tnvec_slice and tnz_slice,
// and if the (I_input,J_input) tuples were found to be already sorted with
// no duplicates present.
bool tnvec_and_tnz_slice_computed = false ;
//--------------------------------------------------------------------------
// STEP 1: copy user input and check if valid
//--------------------------------------------------------------------------
// If the indices are provided by (I_input,J_input), then import them into
// (I_work,J_work) and check if they are valid, and sorted. If the input
// happens to be already sorted, then duplicates are detected and the # of
// vectors in each slice is counted.
if (I_work == NULL)
{
//----------------------------------------------------------------------
// allocate I_work
//----------------------------------------------------------------------
// allocate workspace to load and sort the index tuples:
// vdim <= 1: I_work and K_work for (i,k) tuples, where i = I_input [k]
// vdim > 1: also J_work for (j,i,k) tuples where i = I_input [k] and
// j = J_input [k]. If the tuples are found to be already sorted on
// input, then J_work is not allocated, and J_input is used instead.
// The k value in the tuple gives the position in the original set of
// tuples: I_input [k] and Sx [k] when vdim <= 1, and also J_input [k]
// for matrices with vdim > 1.
// The workspace I_work and J_work are allocated here but freed (or
// transplanted) inside GB_builder. K_work is allocated, used, and
// freed in GB_builder.
ASSERT (J_work == NULL) ;
I_work = GB_MALLOC (nvals, int64_t, I_work_size_handle) ;
(*I_work_handle) = I_work ;
ijslen = nvals ;
if (I_work == NULL)
{
// out of memory
GB_FREE_WORKSPACE ;
return (GrB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// create the tuples to sort, and check for any invalid indices
//----------------------------------------------------------------------
known_sorted = true ;
bool no_duplicates_found = true ;
if (nvals == 0)
{
// nothing to do
}
else if (is_matrix)
{
//------------------------------------------------------------------
// C is a matrix; check both I_input and J_input
//------------------------------------------------------------------
ASSERT (J_input != NULL) ;
ASSERT (I_work != NULL) ;
ASSERT (vdim >= 0) ;
ASSERT (I_input != NULL) ;
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static) \
reduction(&&:known_sorted) reduction(&&:no_duplicates_found)
for (tid = 0 ; tid < nthreads ; tid++)
{
kbad [tid] = -1 ;
int64_t my_tnvec = 0 ;
int64_t kstart = tstart_slice [tid] ;
int64_t kend = tstart_slice [tid+1] ;
int64_t ilast = (kstart == 0) ? -1 : I_input [kstart-1] ;
int64_t jlast = (kstart == 0) ? -1 : J_input [kstart-1] ;
for (int64_t k = kstart ; k < kend ; k++)
{
// get k-th index from user input: (i,j)
int64_t i = I_input [k] ;
int64_t j = J_input [k] ;
if (i < 0 || i >= vlen || j < 0 || j >= vdim)
{
// halt if out of bounds
kbad [tid] = k ;
break ;
}
// check if the tuples are already sorted
known_sorted = known_sorted &&
((jlast < j) || (jlast == j && ilast <= i)) ;
// check if this entry is a duplicate of the one before it
no_duplicates_found = no_duplicates_found &&
(!(jlast == j && ilast == i)) ;
// copy the tuple into I_work. J_work is done later.
I_work [k] = i ;
if (j > jlast)
{
// vector j starts in this slice (but this is
// valid only if J_input is sorted on input)
my_tnvec++ ;
}
// log the last index seen
ilast = i ; jlast = j ;
}
// these are valid only if I_input and J_input are sorted on
// input, with no duplicates present.
tnvec_slice [tid] = my_tnvec ;
tnz_slice [tid] = kend - kstart ;
}
// collect the report from each thread
for (int tid = 0 ; tid < nthreads ; tid++)
{
if (kbad [tid] >= 0)
{
// invalid index
int64_t i = I_input [kbad [tid]] ;
int64_t j = J_input [kbad [tid]] ;
int64_t row = is_csc ? i : j ;
int64_t col = is_csc ? j : i ;
int64_t nrows = is_csc ? vlen : vdim ;
int64_t ncols = is_csc ? vdim : vlen ;
GB_FREE_WORKSPACE ;
GB_ERROR (GrB_INDEX_OUT_OF_BOUNDS,
"index (" GBd "," GBd ") out of bounds,"
" must be < (" GBd ", " GBd ")",
row, col, nrows, ncols) ;
}
}
// if the tuples were found to be already in sorted order, and if
// no duplicates were found, then tnvec_slice and tnz_slice are now
// valid, Otherwise, they can only be computed after sorting.
tnvec_and_tnz_slice_computed = known_sorted && no_duplicates_found ;
//------------------------------------------------------------------
// allocate J_work, if needed
//------------------------------------------------------------------
if (vdim > 1 && !known_sorted)
{
// copy J_input into J_work, so the tuples can be sorted
J_work = GB_MALLOC (nvals, int64_t, J_work_size_handle) ;
(*J_work_handle) = J_work ;
if (J_work == NULL)
{
// out of memory
GB_FREE_WORKSPACE ;
return (GrB_OUT_OF_MEMORY) ;
}
GB_memcpy (J_work, J_input, nvals * sizeof (int64_t), nthreads);
}
else
{
// J_work is a shallow copy of J_input. The pointer is not
// copied into (*J_work_handle), so it will not be freed.
// J_input is not modified, even though it is typecast to the
// int64_t *J_work, since J_work is not modified in this case.
J_work = (int64_t *) J_input ;
}
}
else
{
//------------------------------------------------------------------
// C is a typecasted GrB_Vector; check only I_input
//------------------------------------------------------------------
ASSERT (I_input != NULL) ;
ASSERT (J_input == NULL) ;
ASSERT (vdim == 1) ;
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static) \
reduction(&&:known_sorted) reduction(&&:no_duplicates_found)
for (tid = 0 ; tid < nthreads ; tid++)
{
kbad [tid] = -1 ;
int64_t kstart = tstart_slice [tid] ;
int64_t kend = tstart_slice [tid+1] ;
int64_t ilast = (kstart == 0) ? -1 : I_input [kstart-1] ;
for (int64_t k = kstart ; k < kend ; k++)
{
// get k-th index from user input: (i)
int64_t i = I_input [k] ;
if (i < 0 || i >= vlen)
{
// halt if out of bounds
kbad [tid] = k ;
break ;
}
// check if the tuples are already sorted
known_sorted = known_sorted && (ilast <= i) ;
// check if this entry is a duplicate of the one before it
no_duplicates_found = no_duplicates_found &&
(!(ilast == i)) ;
// copy the tuple into the work arrays to be sorted
I_work [k] = i ;
// log the last index seen
ilast = i ;
}
}
// collect the report from each thread
for (int tid = 0 ; tid < nthreads ; tid++)
{
if (kbad [tid] >= 0)
{
// invalid index
int64_t i = I_input [kbad [tid]] ;
GB_FREE_WORKSPACE ;
GB_ERROR (GrB_INDEX_OUT_OF_BOUNDS,
"index (" GBd ") out of bounds, must be < (" GBd ")",
i, vlen) ;
}
}
}
//----------------------------------------------------------------------
// determine if duplicates are possible
//----------------------------------------------------------------------
// The input is now known to be sorted, or not. If it is sorted, and
// if no duplicates were found, then it is known to have no duplicates.
// Otherwise, duplicates might appear, but a sort is required first to
// check for duplicates.
known_no_duplicates = known_sorted && no_duplicates_found ;
}
//--------------------------------------------------------------------------
// STEP 2: sort the tuples in ascending order
//--------------------------------------------------------------------------
// If the tuples are known to already be sorted, Step 2 is skipped. In
// that case, K_work is NULL (not allocated), which implicitly means that
// K_work [k] = k for all k = 0:nvals-1. K_work is always NULL if Sx and
// Tx are iso.
if (!known_sorted)
{
//----------------------------------------------------------------------
// allocate K_work workspace (not needed if T and Sx are iso)
//----------------------------------------------------------------------
if (!S_iso)
{
// create the k part of each tuple
K_work = GB_MALLOC_WORK (nvals, int64_t, &K_work_size) ;
if (K_work == NULL)
{
// out of memory
GB_FREE_WORKSPACE ;
return (GrB_OUT_OF_MEMORY) ;
}
// The k part of each tuple (i,k) or (j,i,k) records the original
// position of the tuple in the input list. This allows an
// unstable sorting algorithm to be used. Since k is unique, it
// forces the result of the sort to be stable regardless of whether
// or not the sorting algorithm is stable. It also keeps track of
// where the numerical value of the tuple can be found; it is in
// Sx[k] for the tuple (i,k) or (j,i,k), regardless of where the
// tuple appears in the list after it is sorted.
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < nvals ; k++)
{
K_work [k] = k ;
}
}
//----------------------------------------------------------------------
// sort all the tuples
//----------------------------------------------------------------------
if (vdim > 1)
{
//------------------------------------------------------------------
// sort a set of (j,i,k) tuples
//------------------------------------------------------------------
if (S_iso)
{
// K_work is NULL; only sort (j,i)
info = GB_msort_2 (J_work, I_work, nvals, nthreads) ;
}
else
{
info = GB_msort_3 (J_work, I_work, K_work, nvals, nthreads) ;
}
#ifdef GB_DEBUG
if (info == GrB_SUCCESS)
{
int64_t ilast = -1 ;
int64_t jlast = -1 ;
for (int64_t k = 0 ; k < nvals ; k++)
{
int64_t i = I_work [k] ;
int64_t j = J_work [k] ;
ASSERT ((jlast < j) || (jlast == j && ilast <= i)) ;
ilast = i ;
jlast = j ;
}
}
#endif
}
else
{
//------------------------------------------------------------------
// sort a set of (i,k) tuples
//------------------------------------------------------------------
if (S_iso)
{
// K_work is NULL; only sort (i)
info = GB_msort_1 (I_work, nvals, nthreads) ;
}
else
{
info = GB_msort_2 (I_work, K_work, nvals, nthreads) ;
}
#ifdef GB_DEBUG
if (info == GrB_SUCCESS)
{
int64_t ilast = -1 ;
for (int64_t k = 0 ; k < nvals ; k++)
{
int64_t i = I_work [k] ;
ASSERT (ilast <= i) ;
ilast = i ;
}
}
#endif
}
if (info != GrB_SUCCESS)
{
// out of memory in GB_msort_*
GB_FREE_WORKSPACE ;
return (GrB_OUT_OF_MEMORY) ;
}
}
//--------------------------------------------------------------------------
// STEP 3: count vectors and duplicates in each slice
//--------------------------------------------------------------------------
// Duplicates are located, counted and their indices negated. The # of
// vectors in each slice is counted. If the indices are known to not have
// duplicates, then only the vectors are counted. Counting the # of
// vectors is skipped if already done by Step 1.
if (known_no_duplicates)
{
//----------------------------------------------------------------------
// no duplicates: just count # vectors in each slice
//----------------------------------------------------------------------
// This is much faster, particularly if the # of vectors in each slice
// has already been computed.
#ifdef GB_DEBUG
{
// assert that there are no duplicates
int64_t ilast = -1, jlast = -1 ;
for (int64_t t = 0 ; t < nvals ; t++)
{
int64_t i = GB_I_WORK (t), j = GB_J_WORK (t) ;
bool is_duplicate = (i == ilast && j == jlast) ;
ASSERT (!is_duplicate) ;
ilast = i ; jlast = j ;
}
}
#endif
if (vdim <= 1)
{
// all tuples appear in at most one vector, and there are no
// duplicates, so there is no need to scan I_work or J_work.
for (int tid = 0 ; tid < nthreads ; tid++)
{
int64_t tstart = tstart_slice [tid] ;
int64_t tend = tstart_slice [tid+1] ;
tnvec_slice [tid] = 0 ;
tnz_slice [tid] = tend - tstart ;
}
tnvec_slice [0] = (nvals == 0) ? 0 : 1 ;
}
else
{
// count the # of unique vector indices in J_work. No need to scan
// I_work since there are no duplicates to be found. Also no need
// to compute them if already found in Step 1.
if (!tnvec_and_tnz_slice_computed)
{
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t my_tnvec = 0 ;
int64_t tstart = tstart_slice [tid] ;
int64_t tend = tstart_slice [tid+1] ;
int64_t jlast = GB_J_WORK (tstart-1) ;
for (int64_t t = tstart ; t < tend ; t++)
{
// get the t-th tuple
int64_t j = J_work [t] ;
if (j > jlast)
{
// vector j starts in this slice
my_tnvec++ ;
jlast = j ;
}
}
tnvec_slice [tid] = my_tnvec ;
tnz_slice [tid] = tend - tstart ;
}
}
}
}
else
{
//----------------------------------------------------------------------
// look for duplicates and count # vectors in each slice
//----------------------------------------------------------------------
for (int tid = 0 ; tid < nthreads ; tid++)
{
int64_t tstart = tstart_slice [tid] ;
ilast_slice [tid] = GB_I_WORK (tstart-1) ;
}
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t my_tnvec = 0 ;
int64_t my_ndupl = 0 ;
int64_t tstart = tstart_slice [tid] ;
int64_t tend = tstart_slice [tid+1] ;
int64_t ilast = ilast_slice [tid] ;
int64_t jlast = GB_J_WORK (tstart-1) ;
for (int64_t t = tstart ; t < tend ; t++)
{
// get the t-th tuple
int64_t i = I_work [t] ;
int64_t j = GB_J_WORK (t) ;
// tuples are now sorted but there may be duplicates
ASSERT ((jlast < j) || (jlast == j && ilast <= i)) ;
// check if (j,i,k) is a duplicate
if (i == ilast && j == jlast)
{
// flag the tuple as a duplicate
I_work [t] = -1 ;
my_ndupl++ ;
// the sort places earlier duplicate tuples (with smaller
// k) after later ones (with larger k).
ASSERT (GB_K_WORK (t-1) < GB_K_WORK (t)) ;
}
else
{
// this is a new tuple
if (j > jlast)
{
// vector j starts in this slice
my_tnvec++ ;
jlast = j ;
}
ilast = i ;
}
}
tnvec_slice [tid] = my_tnvec ;
tnz_slice [tid] = (tend - tstart) - my_ndupl ;
}
}
//--------------------------------------------------------------------------
// find total # of vectors and duplicates in all tuples
//--------------------------------------------------------------------------
// Replace tnvec_slice with its cumulative sum, after which each slice tid
// will be responsible for the # vectors in T that range from tnvec_slice
// [tid] to tnvec_slice [tid+1]-1.
GB_cumsum (tnvec_slice, nthreads, NULL, 1, NULL) ;
int64_t tnvec = tnvec_slice [nthreads] ;
// Replace tnz_slice with its cumulative sum
GB_cumsum (tnz_slice, nthreads, NULL, 1, NULL) ;
// find the total # of final entries, after assembling duplicates
int64_t tnz = tnz_slice [nthreads] ;
int64_t ndupl = nvals - tnz ;
//--------------------------------------------------------------------------
// allocate T; always hypersparse
//--------------------------------------------------------------------------
// allocate T; allocate T->p and T->h but do not initialize them.
// T is always hypersparse. The header T always exists on input, as
// either a static or dynamic header.
bool static_header = T->static_header ;
info = GB_new (&T, static_header, // always hyper, static or dynamic header
ttype, vlen, vdim, GB_Ap_malloc, is_csc,
GxB_HYPERSPARSE, GB_ALWAYS_HYPER, tnvec, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
GB_FREE_WORKSPACE ;
return (info) ;
}
ASSERT (T->p != NULL) ;
ASSERT (T->h != NULL) ;
ASSERT (T->b == NULL) ;
ASSERT (T->i == NULL) ;
ASSERT (T->x == NULL) ;
T->iso = S_iso ; // OK: T is iso if and only if Sx is iso
bool do_burble = (vlen > 1 || vdim > 1) && (nvals > 1) ;
if (do_burble)
{
if (S_iso)
{
GBURBLE ("(iso build) ") ;
}
else
{
GBURBLE ("(build) ") ;
}
}
//--------------------------------------------------------------------------
// STEP 4: construct the vector pointers and hyperlist for T
//--------------------------------------------------------------------------
// Step 4 scans the J_work indices and constructs T->h and T->p.
int64_t *restrict Th = T->h ;
int64_t *restrict Tp = T->p ;
if (vdim <= 1)
{
//----------------------------------------------------------------------
// special case for vectors
//----------------------------------------------------------------------
ASSERT (tnvec == 0 || tnvec == 1) ;
if (tnvec > 0)
{
Th [0] = 0 ;
Tp [0] = 0 ;
}
}
else if (ndupl == 0)
{
//----------------------------------------------------------------------
// no duplicates appear
//----------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t my_tnvec = tnvec_slice [tid] ;
int64_t tstart = tstart_slice [tid] ;
int64_t tend = tstart_slice [tid+1] ;
int64_t jlast = GB_J_WORK (tstart-1) ;
for (int64_t t = tstart ; t < tend ; t++)
{
// get the t-th tuple
int64_t j = GB_J_WORK (t) ;
if (j > jlast)
{
// vector j starts in this slice
Th [my_tnvec] = j ;
Tp [my_tnvec] = t ;
my_tnvec++ ;
jlast = j ;
}
}
}
}
else
{
//----------------------------------------------------------------------
// it is known that at least one duplicate appears
//----------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t my_tnz = tnz_slice [tid] ;
int64_t my_tnvec = tnvec_slice [tid] ;
int64_t tstart = tstart_slice [tid] ;
int64_t tend = tstart_slice [tid+1] ;
int64_t jlast = GB_J_WORK (tstart-1) ;
for (int64_t t = tstart ; t < tend ; t++)
{
// get the t-th tuple
int64_t i = I_work [t] ;
int64_t j = GB_J_WORK (t) ;
if (i >= 0)
{
// this is a new tuple
if (j > jlast)
{
// vector j starts in this slice
Th [my_tnvec] = j ;
Tp [my_tnvec] = my_tnz ;
my_tnvec++ ;
jlast = j ;
}
my_tnz++ ;
}
}
}
}
// log the end of the last vector
T->nvec_nonempty = tnvec ;
T->nvec = tnvec ;
Tp [tnvec] = tnz ;
ASSERT (T->nvec == T->plen) ;
T->magic = GB_MAGIC ;
//--------------------------------------------------------------------------
// free J_work if it exists
//--------------------------------------------------------------------------
ASSERT (J_work_handle != NULL) ;
GB_FREE (J_work_handle, *J_work_size_handle) ;
J_work = NULL ;
//--------------------------------------------------------------------------
// allocate T->i
//--------------------------------------------------------------------------
if (ndupl == 0)
{
// shrink I_work from size ijslen to size tnz
if (tnz < ijslen)
{
// this cannot fail since the size is shrinking.
bool ok ;
GB_REALLOC (I_work, tnz, int64_t, I_work_size_handle, &ok, Context);
ASSERT (ok) ;
}
// transplant I_work into T->i
T->i = I_work ; T->i_size = (*I_work_size_handle) ;
I_work = NULL ;
(*I_work_handle) = NULL ;
(*I_work_size_handle) = 0 ;
}
else
{
// duplicates exist, so allocate a new T->i. I_work must be freed later
T->i = GB_MALLOC (tnz, int64_t, &(T->i_size)) ;
if (T->i == NULL)
{
// out of memory
GB_phbix_free (T) ;
GB_FREE_WORKSPACE ;
return (GrB_OUT_OF_MEMORY) ;
}
}
int64_t *restrict Ti = T->i ;
//==========================================================================
// numerical phase of the build: assemble any duplicates
//==========================================================================
// The tuples have been sorted. Assemble any duplicates with a switch
// factory of built-in workers, or four generic workers. The vector
// pointers T->p and hyperlist T->h (if hypersparse) have already been
// computed.
// If there are no duplicates, T->i holds the row indices of the tuple.
// Otherwise, the row indices are still in I_work. K_work holds the
// positions of each tuple in the array Sx. The tuples are sorted so that
// duplicates are adjacent to each other and they appear in the order they
// appeared in the original tuples. This method assembles the duplicates
// and computes T->i and T->x from I_work, K_work, and Sx. into T, becoming
// T->i. If no duplicates appear, T->i is already computed, and Sx just
// needs to be copied and permuted into T->x.
// The (i,k,Sx[k]) tuples are held in two integer arrays: (1) I_work or
// T->i, and (2) K_work, and an array Sx of numerical values. Sx has not
// been sorted, nor even accessed yet. It is identical to the original
// unsorted tuples. The (i,k,Sx[k]) tuple holds the row index i, the
// position k, and the value Sx [k]. This entry becomes T(i,j) = Sx [k] in
// the matrix T, and duplicates (if any) are assembled via the dup
// operator.
//--------------------------------------------------------------------------
// get opcodes and check types
//--------------------------------------------------------------------------
// With GB_build, there can be 1 to 2 different types.
// T->type is identical to the types of x,y,z for z=dup(x,y).
// dup is never NULL and all its three types are the same
// The type of Sx (stype) can different but must be compatible
// with T->type
// With GB_wait, there can be 1 to 5 different types:
// The pending tuples are in Sx, of type stype which must be
// compatible with dup->ytype and T->type
// z = dup (x,y): can be NULL or have 1 to 3 different types
// T->type: must be compatible with all above types.
// dup may be NULL, in which case it is assumed be the implicit SECOND
// operator, with all three types equal to T->type
GrB_Type xtype, ytype, ztype ;
GxB_binary_function fdup ;
#ifndef GBCOMPACT
GB_Opcode opcode ;
#endif
GB_Type_code tcode = ttype->code ;
const size_t tsize = ttype->size ;
bool op_2nd ;
ASSERT_TYPE_OK (ttype, "ttype for build_factory", GB0) ;
if (dup == NULL)
{
//----------------------------------------------------------------------
// dup is the implicit SECOND operator
//----------------------------------------------------------------------
// z = SECOND (x,y) where all three types are the same as ttype
// T(i,j) = (ttype) Sx(k) will be done for all tuples.
#ifndef GBCOMPACT
opcode = GB_SECOND_binop_code ;
#endif
xtype = ttype ;
ytype = ttype ;
ztype = ttype ;
fdup = NULL ;
op_2nd = true ;
ASSERT (GB_op_is_second (dup, ttype)) ;
}
else
{
//----------------------------------------------------------------------
// dup is an explicit operator
//----------------------------------------------------------------------
// T(i,j) = (ttype) Sx[k] will be done for the first tuple.
// for subsequent tuples: T(i,j) += Sx[k], via the dup operator and
// typecasting:
//
// y = (dup->ytype) Sx[k]
// x = (dup->xtype) T(i,j)
// z = (dup->ztype) dup (x,y)
// T(i,j) = (ttype) z
ASSERT_BINARYOP_OK (dup, "dup for build_factory", GB0) ;
ASSERT (!S_iso) ;
#ifndef GBCOMPACT
opcode = dup->opcode ;
#endif
xtype = dup->xtype ;
ytype = dup->ytype ;
ztype = dup->ztype ;
fdup = dup->binop_function ;
op_2nd = GB_op_is_second (dup, ttype) ;
}
//--------------------------------------------------------------------------
// get the sizes and codes of each type
//--------------------------------------------------------------------------
GB_Type_code zcode = ztype->code ;
GB_Type_code xcode = xtype->code ;
GB_Type_code ycode = ytype->code ;
ASSERT (GB_Type_compatible (ttype, stype)) ; // T(i,j) = (ttype) Sx
ASSERT (GB_Type_compatible (ytype, stype)) ; // y = (ytype) Sx
ASSERT (GB_Type_compatible (xtype, ttype)) ; // x = (xtype) T(i,j)
ASSERT (GB_Type_compatible (ttype, ztype)) ; // T(i,j) = (ttype) z
size_t zsize = ztype->size ;
size_t xsize = xtype->size ;
size_t ysize = ytype->size ;
// no typecasting if all 5 types are the same
bool nocasting = (ttype == stype) &&
(ttype == xtype) && (ttype == ytype) && (ttype == ztype) ;
ASSERT_TYPE_OK (ttype, "ttype for build_factory", GB0) ;
ASSERT_TYPE_OK (stype, "stype for build_factory", GB0) ;
ASSERT_TYPE_OK (xtype, "xtype for build_factory", GB0) ;
ASSERT_TYPE_OK (ytype, "ytype for build_factory", GB0) ;
ASSERT_TYPE_OK (ztype, "ztype for build_factory", GB0) ;
//--------------------------------------------------------------------------
// STEP 5: assemble the tuples
//--------------------------------------------------------------------------
bool copy_S_into_T = (nocasting && known_sorted && ndupl == 0) ;
if (copy_S_into_T && S_work != NULL)
{
//----------------------------------------------------------------------
// transplant S_work into T->x
//----------------------------------------------------------------------
// No typecasting is needed, the tuples were originally in sorted
// order, and no duplicates appear. All that is required is to copy Sx
// into Tx. Sx can be directly transplanted into T->x since Sx is
// provided as S_work. GB_builder must either transplant or free
// S_work. The transplant can be used by GB_wait, whenever the tuples
// are already sorted, with no duplicates, and no typecasting is
// needed, since S_work is always A->Pending->x. T and Sx may be iso
// or non-iso.
T->x = S_work ; T->x_size = (*S_work_size_handle) ;
S_work = NULL ;
(*S_work_handle) = NULL ;
(*S_work_size_handle) = 0 ;
int64_t tx_size_required = tnz * tsize ;
if (2 * tx_size_required < T->x_size)
{
// shrink the size of T->x
bool ok = true ;
GB_REALLOC (T->x, tx_size_required, GB_void, &(T->x_size), &ok,
Context) ;
}
}
else
{
//----------------------------------------------------------------------
// allocate T->x
//----------------------------------------------------------------------
T->x = GB_XALLOC (false, S_iso, tnz, tsize, &(T->x_size)) ; // x:OK
if (T->x == NULL)
{
// out of memory
GB_phbix_free (T) ;
GB_FREE_WORKSPACE ;
return (GrB_OUT_OF_MEMORY) ;
}
GB_void *restrict Tx = (GB_void *) T->x ;
ASSERT (GB_IMPLIES (nvals > 0, Sx != NULL)) ;
if (nvals == 0)
{
// nothing to do
}
else if (copy_S_into_T)
{
//------------------------------------------------------------------
// copy Sx into T->x
//------------------------------------------------------------------
// No typecasting is needed, the tuples were originally in sorted
// order, and no duplicates appear. All that is required is to
// copy Sx into Tx. Sx cannot be transplanted into T->x since
// S_work is NULL and S_input cannot be modified by GB_builder.
ASSERT (S_work == NULL) ;
ASSERT (Sx == S_input) ;
GB_memcpy (Tx, Sx, (S_iso ? 1 : nvals) * tsize, nthreads) ;
}
else if (nocasting)
{
//------------------------------------------------------------------
// assemble the values, Sx, into T, no typecasting needed
//------------------------------------------------------------------
// Sx (either S_work or S_input) must be permuted and copied into
// T->x, since the tuples had to be sorted, or duplicates appear.
// Any duplicates are now assembled.
// There are 44 common cases of this function for built-in types
// and 8 associative operators: MIN, MAX, PLUS, TIMES for 10 types
// (all but boolean; and OR, AND, XOR, and EQ for boolean.
// In addition, the FIRST and SECOND operators are hard-coded, for
// another 22 workers, since SECOND is used by GB_wait and since
// FIRST is useful for keeping the first tuple seen. It is
// controlled by the GB_INCLUDE_SECOND_OPERATOR definition, so they
// do not appear in GB_reduce_to_* where the FIRST and SECOND
// operators are not needed.
// Early exit cannot be exploited, so the terminal is ignored.
bool done = false ;
if (S_iso)
{
//--------------------------------------------------------------
// T and Sx are iso; set iso value and delete duplicates
//--------------------------------------------------------------
memcpy (Tx, Sx, tsize) ;
#define GB_ISO_BUILD
#include "GB_reduce_build_template.c"
done = true ;
}
else
{
//--------------------------------------------------------------
// T and Sx are not iso; call in the workers
//--------------------------------------------------------------
#ifndef GBCOMPACT
//----------------------------------------------------------
// define the worker for the switch factory
//----------------------------------------------------------
#define GB_INCLUDE_SECOND_OPERATOR
#define GB_red(opname,aname) \
GB (_red_build_ ## opname ## aname)
#define GB_RED_WORKER(opname,aname,atype) \
{ \
info = GB_red (opname, aname) ((atype *) Tx, Ti, \
(atype *) Sx, nvals, ndupl, I_work, K_work, \
tstart_slice, tnz_slice, nthreads) ; \
done = (info != GrB_NO_VALUE) ; \
} \
break ;
//----------------------------------------------------------
// launch the switch factory
//----------------------------------------------------------
// controlled by opcode and typecode
GB_Type_code typecode = tcode ;
#include "GB_red_factory.c"
#endif
}
//------------------------------------------------------------------
// generic worker
//------------------------------------------------------------------
if (!done)
{
if (do_burble) GBURBLE ("(generic build) ") ;
//--------------------------------------------------------------
// no typecasting, but use the fdup function pointer and memcpy
//--------------------------------------------------------------
// Either the fdup operator or type of Sx and T are
// user-defined, or fdup is not an associative operator handled
// by the GB_red_factory, or some combination of these
// conditions. User-defined types cannot be typecasted, so
// this handles all user-defined types.
// Tx [p] = (ttype) Sx [k], but with no typecasting
#undef GB_CAST_ARRAY_TO_ARRAY
#define GB_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) \
memcpy (Tx +((p)*tsize), Sx +((k)*tsize), tsize) ;
if (op_2nd)
{
//----------------------------------------------------------
// dup is the SECOND operator, with no typecasting
//----------------------------------------------------------
// Tx [p] += (ttype) Sx [k], but 2nd op and no typecasting
#undef GB_ADD_CAST_ARRAY_TO_ARRAY
#define GB_ADD_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) \
GB_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k)
#include "GB_reduce_build_template.c"
}
else
{
//----------------------------------------------------------
// dup is another operator, with no typecasting needed
//----------------------------------------------------------
// Tx [p] += (ttype) Sx [k], but with no typecasting
#undef GB_ADD_CAST_ARRAY_TO_ARRAY
#define GB_ADD_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) \
fdup (Tx +((p)*tsize), Tx +((p)*tsize), Sx+((k)*tsize));
#include "GB_reduce_build_template.c"
}
}
}
else
{
//------------------------------------------------------------------
// assemble the values Sx into T, typecasting as needed
//------------------------------------------------------------------
if (do_burble)
{
GBURBLE ("(generic build with typecast) ") ;
}
// If T and Sx are iso, no typecasting is ever done, so this method
// is not used in that case.
ASSERT (!S_iso) ;
// Sx (either S_work or S_input) must be permuted and copied into
// T->x, since the tuples had to be sorted, or duplicates appear.
// Any duplicates are now assembled. Not all of the 5 types are
// the same, but all of them are built-in since user-defined types
// cannot be typecasted.
const GB_Type_code scode = stype->code ;
const size_t ssize = stype->size ;
GB_cast_function cast_S_to_T = GB_cast_factory (tcode, scode) ;
GB_cast_function cast_S_to_Y = GB_cast_factory (ycode, scode) ;
GB_cast_function cast_T_to_X = GB_cast_factory (xcode, tcode) ;
GB_cast_function cast_Z_to_T = GB_cast_factory (tcode, zcode) ;
ASSERT (scode <= GB_FC64_code) ;
ASSERT (tcode <= GB_FC64_code) ;
ASSERT (xcode <= GB_FC64_code) ;
ASSERT (ycode <= GB_FC64_code) ;
ASSERT (zcode <= GB_FC64_code) ;
// Tx [p] = (ttype) Sx [k], with typecasting
#undef GB_CAST_ARRAY_TO_ARRAY
#define GB_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) \
cast_S_to_T (Tx +((p)*tsize), Sx +((k)*ssize), ssize) ;
if (op_2nd)
{
//--------------------------------------------------------------
// dup operator is the SECOND operator, with typecasting
//--------------------------------------------------------------
// Tx [p] += (ttype) Sx [k], but 2nd op, with typecasting
#undef GB_ADD_CAST_ARRAY_TO_ARRAY
#define GB_ADD_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) \
GB_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k)
#include "GB_reduce_build_template.c"
}
else
{
//--------------------------------------------------------------
// dup is another operator, with typecasting required
//--------------------------------------------------------------
// Tx [p] += Sx [k], with typecasting
#undef GB_ADD_CAST_ARRAY_TO_ARRAY
#define GB_ADD_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) \
{ \
/* ywork = (ytype) Sx [k] */ \
GB_void ywork [GB_VLA(ysize)] ; \
cast_S_to_Y (ywork, Sx +((k)*ssize), ssize) ; \
/* xwork = (xtype) Tx [p] */ \
GB_void xwork [GB_VLA(xsize)] ; \
cast_T_to_X (xwork, Tx +((p)*tsize), tsize) ; \
/* zwork = f (xwork, ywork) */ \
GB_void zwork [GB_VLA(zsize)] ; \
fdup (zwork, xwork, ywork) ; \
/* Tx [tnz-1] = (ttype) zwork */ \
cast_Z_to_T (Tx +((p)*tsize), zwork, zsize) ; \
}
#include "GB_reduce_build_template.c"
}
}
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORKSPACE ;
T->jumbled = false ;
ASSERT_MATRIX_OK (T, "T built", GB0) ;
ASSERT (GB_IS_HYPERSPARSE (T)) ;
return (GrB_SUCCESS) ;
}
|
requantize_relu_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void requantize_relu_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& scale_in_data, const Mat& scale_out_data, const Mat& bias_data, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int size = w * h;
int outc = top_blob.c;
int out_elempack = top_blob.elempack;
int scale_in_data_size = scale_in_data.w;
int scale_out_data_size = scale_out_data.w;
int bias_data_size = bias_data.w;
// int8(relu(v * scale_in) * scale_out)
// int8_relu(v * (scale_in * scale_out))
// int8(relu(v * scale_in + bias) * scale_out)
// int8_relu(v * (scale_in * scale_out) + (bias * scale_out))
if (out_elempack == 8)
{
if (bias_data_size == 0)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < outc; q++)
{
const int* intptr0 = bottom_blob.channel(q * 2);
const int* intptr1 = bottom_blob.channel(q * 2 + 1);
signed char* ptr = top_blob.channel(q);
v4f32 _scale_in0 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 8, 0);
v4f32 _scale_in1 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 8 + 4, 0);
v4f32 _scale_out0 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 8, 0);
v4f32 _scale_out1 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 8 + 4, 0);
v4f32 _scale0 = __msa_fmul_w(_scale_in0, _scale_out0);
v4f32 _scale1 = __msa_fmul_w(_scale_in1, _scale_out1);
int i = 0;
for (; i + 3 < size; i += 4)
{
__builtin_prefetch(intptr0 + 64);
__builtin_prefetch(intptr1 + 64);
v4f32 _v00 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0, 0));
v4f32 _v01 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0 + 4, 0));
v4f32 _v02 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0 + 8, 0));
v4f32 _v03 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0 + 12, 0));
v4f32 _v10 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1, 0));
v4f32 _v11 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1 + 4, 0));
v4f32 _v12 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1 + 8, 0));
v4f32 _v13 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1 + 12, 0));
_v00 = __msa_fmul_w(_v00, _scale0);
_v01 = __msa_fmul_w(_v01, _scale0);
_v02 = __msa_fmul_w(_v02, _scale0);
_v03 = __msa_fmul_w(_v03, _scale0);
_v10 = __msa_fmul_w(_v10, _scale1);
_v11 = __msa_fmul_w(_v11, _scale1);
_v12 = __msa_fmul_w(_v12, _scale1);
_v13 = __msa_fmul_w(_v13, _scale1);
*((int64_t*)ptr) = float2int8relu(_v00, _v10);
*((int64_t*)(ptr + 8)) = float2int8relu(_v01, _v11);
*((int64_t*)(ptr + 16)) = float2int8relu(_v02, _v12);
*((int64_t*)(ptr + 24)) = float2int8relu(_v03, _v13);
intptr0 += 16;
intptr1 += 16;
ptr += 32;
}
for (; i < size; i++)
{
__builtin_prefetch(intptr0 + 16);
__builtin_prefetch(intptr1 + 16);
v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0, 0));
v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1, 0));
_v0 = __msa_fmul_w(_v0, _scale0);
_v1 = __msa_fmul_w(_v1, _scale1);
*((int64_t*)ptr) = float2int8relu(_v0, _v1);
intptr0 += 4;
intptr1 += 4;
ptr += 8;
}
}
}
else
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < outc; q++)
{
const int* intptr0 = bottom_blob.channel(q * 2);
const int* intptr1 = bottom_blob.channel(q * 2 + 1);
signed char* ptr = top_blob.channel(q);
v4f32 _scale_in0 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 8, 0);
v4f32 _scale_in1 = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 8 + 4, 0);
v4f32 _scale_out0 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 8, 0);
v4f32 _scale_out1 = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 8 + 4, 0);
v4f32 _bias0 = bias_data_size == 1 ? (v4f32)__msa_fill_w_f32(bias_data[0]) : (v4f32)__msa_ld_w((const float*)bias_data + q * 8, 0);
v4f32 _bias1 = bias_data_size == 1 ? (v4f32)__msa_fill_w_f32(bias_data[0]) : (v4f32)__msa_ld_w((const float*)bias_data + q * 8 + 4, 0);
v4f32 _scale0 = __msa_fmul_w(_scale_in0, _scale_out0);
v4f32 _scale1 = __msa_fmul_w(_scale_in1, _scale_out1);
_bias0 = __msa_fmul_w(_bias0, _scale_out0);
_bias1 = __msa_fmul_w(_bias1, _scale_out1);
int i = 0;
for (; i + 3 < size; i += 4)
{
__builtin_prefetch(intptr0 + 64);
__builtin_prefetch(intptr1 + 64);
v4f32 _v00 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0, 0));
v4f32 _v01 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0 + 4, 0));
v4f32 _v02 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0 + 8, 0));
v4f32 _v03 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0 + 12, 0));
v4f32 _v10 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1, 0));
v4f32 _v11 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1 + 4, 0));
v4f32 _v12 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1 + 8, 0));
v4f32 _v13 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1 + 12, 0));
_v00 = __msa_fmadd_w(_bias0, _v00, _scale0);
_v01 = __msa_fmadd_w(_bias0, _v01, _scale0);
_v02 = __msa_fmadd_w(_bias0, _v02, _scale0);
_v03 = __msa_fmadd_w(_bias0, _v03, _scale0);
_v10 = __msa_fmadd_w(_bias1, _v10, _scale1);
_v11 = __msa_fmadd_w(_bias1, _v11, _scale1);
_v12 = __msa_fmadd_w(_bias1, _v12, _scale1);
_v13 = __msa_fmadd_w(_bias1, _v13, _scale1);
*((int64_t*)ptr) = float2int8relu(_v00, _v10);
*((int64_t*)(ptr + 8)) = float2int8relu(_v01, _v11);
*((int64_t*)(ptr + 16)) = float2int8relu(_v02, _v12);
*((int64_t*)(ptr + 24)) = float2int8relu(_v03, _v13);
intptr0 += 16;
intptr1 += 16;
ptr += 32;
}
for (; i + 1 < size; i += 2)
{
__builtin_prefetch(intptr0 + 32);
__builtin_prefetch(intptr1 + 32);
v4f32 _v00 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0, 0));
v4f32 _v01 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0 + 4, 0));
v4f32 _v10 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1, 0));
v4f32 _v11 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1 + 4, 0));
_v00 = __msa_fmadd_w(_bias0, _v00, _scale0);
_v01 = __msa_fmadd_w(_bias0, _v01, _scale0);
_v10 = __msa_fmadd_w(_bias1, _v10, _scale1);
_v11 = __msa_fmadd_w(_bias1, _v11, _scale1);
*((int64_t*)ptr) = float2int8relu(_v00, _v10);
*((int64_t*)(ptr + 8)) = float2int8relu(_v01, _v11);
intptr0 += 8;
intptr1 += 8;
ptr += 16;
}
for (; i < size; i++)
{
__builtin_prefetch(intptr0 + 16);
__builtin_prefetch(intptr1 + 16);
v4f32 _v0 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr0, 0));
v4f32 _v1 = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr1, 0));
_v0 = __msa_fmadd_w(_bias0, _v0, _scale0);
_v1 = __msa_fmadd_w(_bias1, _v1, _scale1);
*((int64_t*)ptr) = float2int8relu(_v0, _v1);
intptr0 += 4;
intptr1 += 4;
ptr += 8;
}
}
}
}
if (out_elempack == 1)
{
if (bias_data_size == 0)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const int* intptr = bottom_blob.channel(q);
signed char* ptr0 = top_blob.channel(q * 4);
signed char* ptr1 = top_blob.channel(q * 4 + 1);
signed char* ptr2 = top_blob.channel(q * 4 + 2);
signed char* ptr3 = top_blob.channel(q * 4 + 3);
v4f32 _scale_in = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 4, 0);
v4f32 _scale_out = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 4, 0);
v4f32 _scale = __msa_fmul_w(_scale_in, _scale_out);
int i = 0;
for (; i < size; i++)
{
__builtin_prefetch(intptr + 16);
v4f32 _v = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
_v = __msa_fmul_w(_v, _scale);
v16i8 v = float2int8relu(_v);
ptr0[0] = v[0];
ptr1[0] = v[1];
ptr2[0] = v[2];
ptr3[0] = v[3];
intptr += 4;
ptr0 += 1;
ptr1 += 1;
ptr2 += 1;
ptr3 += 1;
}
}
}
else
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const int* intptr = bottom_blob.channel(q);
signed char* ptr0 = top_blob.channel(q * 4);
signed char* ptr1 = top_blob.channel(q * 4 + 1);
signed char* ptr2 = top_blob.channel(q * 4 + 2);
signed char* ptr3 = top_blob.channel(q * 4 + 3);
v4f32 _scale_in = scale_in_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_in_data[0]) : (v4f32)__msa_ld_w((const float*)scale_in_data + q * 4, 0);
v4f32 _scale_out = scale_out_data_size == 1 ? (v4f32)__msa_fill_w_f32(scale_out_data[0]) : (v4f32)__msa_ld_w((const float*)scale_out_data + q * 4, 0);
v4f32 _bias = bias_data_size == 1 ? (v4f32)__msa_fill_w_f32(bias_data[0]) : (v4f32)__msa_ld_w((const float*)bias_data + q * 4, 0);
v4f32 _scale = __msa_fmul_w(_scale_in, _scale_out);
_bias = __msa_fmul_w(_bias, _scale_out);
int i = 0;
for (; i < size; i++)
{
__builtin_prefetch(intptr + 16);
v4f32 _v = (v4f32)__msa_ffint_s_w(__msa_ld_w(intptr, 0));
_v = __msa_fmadd_w(_bias, _v, _scale);
v16i8 v = float2int8relu(_v);
ptr0[0] = v[0];
ptr1[0] = v[1];
ptr2[0] = v[2];
ptr3[0] = v[3];
intptr += 4;
ptr0 += 1;
ptr1 += 1;
ptr2 += 1;
ptr3 += 1;
}
}
}
}
}
|
Vec.h | #ifndef VEC_H
#define VEC_H
/*
Szymon Rusinkiewicz
Princeton University
Vec.h
Class for a constant-length vector
Supports the following operations:
vec v1; // Initialized to (0,0,0)
vec v2(1,2,3); // Initialized to (1,2,3)
vec v3(v2); // Copy constructor
float farray[3];
vec v4 = vec(farray); // Explicit: "v4 = farray" won't work
Vec<3,double> vd; // The "vec" used above is Vec<3,float>
point p1, p2, p3; // Same as vec
v3 = v1 + v2; // Also -, *, / (all componentwise)
v3 = 3.5f * v1; // Also vec * scalar, vec / scalar
// NOTE: scalar has to be the same type:
// it won't work to do double * vec<float>
v1 = min(v2,v3); // Componentwise min/max
v1 = sin(v2); // Componentwise - all the usual functions...
swap(v1,v2); // In-place swap
v3 = v1 DOT v2; // Actually operator^
v3 = v1 CROSS v2; // Actually operator%
float f = v1[0]; // Subscript
float *fp = v1; // Implicit conversion to float *
f = len(v1); // Length (also len2 == squared length)
f = dist(p1, p2); // Distance (also dist2 == squared distance)
normalize(v1); // Normalize (i.e., make it unit length)
// normalize(vec(0,0,0)) => vec(1,0,0)
v1 = trinorm(p1,p2,p3); // Normal of triangle
cout << v1 << endl; // iostream output in the form (1,2,3)
cin >> v2; // iostream input using the same syntax
Also defines the utility functions sqr, cube, sgn, fract, clamp, mix,
step, smoothstep, faceforward, reflect, and refract
*/
// Windows defines these as macros, which prevents us from using the
// type-safe versions from std::, as well as interfering with method defns
#undef min
#undef max
#include <cmath>
#include <iostream>
#include <algorithm>
using std::min;
using std::max;
using std::swap;
using std::sqrt;
// Let gcc optimize conditional branches a bit better...
#ifndef likely
# if !defined(__GNUC__) || (__GNUC__ == 2 && __GNUC_MINOR__ < 96)
# define likely(x) (x)
# define unlikely(x) (x)
# else
# define likely(x) (__builtin_expect((x), 1))
# define unlikely(x) (__builtin_expect((x), 0))
# endif
#endif
// Utility functions for square and cube, to go along with sqrt and cbrt
template <class T>
static inline T sqr(const T &x)
{
return x*x;
}
// Boost-like compile-time assertion checking
template <bool X> struct VEC_STATIC_ASSERTION_FAILURE;
template <> struct VEC_STATIC_ASSERTION_FAILURE<true>
{ void operator () () {} };
#define VEC_STATIC_CHECK(expr) VEC_STATIC_ASSERTION_FAILURE<bool(expr)>()
template <int D, class T = float>
class Vec {
protected:
T v[D];
public:
// Constructor for no arguments. Everything initialized to 0.
Vec() { for (int i = 0; i < D; i++) v[i] = T(0); }
// Uninitialized constructor - meant mostly for internal use
#define VEC_UNINITIALIZED ((void *) 0)
Vec(void *) {}
// Constructors for 2-4 arguments
Vec(T x, T y)
{ VEC_STATIC_CHECK(D == 2); v[0] = x; v[1] = y; }
Vec(T x, T y, T z)
{ VEC_STATIC_CHECK(D == 3); v[0] = x; v[1] = y; v[2] = z; }
Vec(T x, T y, T z, T w)
{ VEC_STATIC_CHECK(D == 4); v[0] = x; v[1] = y; v[2] = z; v[3] = w; }
// Constructor from anything that can be accessed using []
// Pretty aggressive, so marked as explicit.
template <class S> explicit Vec(const S &x)
{ for (int i = 0; i < D; i++) v[i] = T(x[i]); }
// No destructor or assignment operator needed
// Array reference and conversion to pointer - no bounds checking
const T &operator [] (int i) const
{ return v[i]; }
T &operator [] (int i)
{ return v[i]; }
operator const T * () const
{ return v; }
operator const T * ()
{ return v; }
operator T * ()
{ return v; }
// Member operators
Vec<D,T> &operator += (const Vec<D,T> &x)
{
for (int i = 0; i < D; i++)
#pragma omp atomic
v[i] += x[i];
return *this;
}
Vec<D,T> &operator -= (const Vec<D,T> &x)
{
for (int i = 0; i < D; i++)
#pragma omp atomic
v[i] -= x[i];
return *this;
}
Vec<D,T> &operator *= (const Vec<D,T> &x)
{
for (int i = 0; i < D; i++)
#pragma omp atomic
v[i] *= x[i];
return *this;
}
Vec<D,T> &operator *= (const T &x)
{
for (int i = 0; i < D; i++)
#pragma omp atomic
v[i] *= x;
return *this;
}
Vec<D,T> &operator /= (const Vec<D,T> &x)
{
for (int i = 0; i < D; i++)
#pragma omp atomic
v[i] /= x[i];
return *this;
}
Vec<D,T> &operator /= (const T &x)
{
for (int i = 0; i < D; i++)
#pragma omp atomic
v[i] /= x;
return *this;
}
// Set each component to min/max of this and the other vector
Vec<D,T> &min(const Vec<D,T> &x)
{
#pragma omp critical
for (int i = 0; i < D; i++)
if (x[i] < v[i]) v[i] = x[i];
return *this;
}
Vec<D,T> &max(const Vec<D,T> &x)
{
#pragma omp critical
for (int i = 0; i < D; i++)
if (x[i] > v[i]) v[i] = x[i];
return *this;
}
// Outside of class: + - * / % ^ << >>
// Some partial compatibility with valarrays and vectors
typedef T value_type;
size_t size() const
{ return D; }
T sum() const
{ T total = v[0];
for (int i = 1; i < D; i++) total += v[i];
return total; }
T avg() const
{ return sum() / D; }
T product() const
{ T total = v[0];
for (int i = 1; i < D; i++) total *= v[i];
return total; }
T min() const
{ T m = v[0];
for (int i = 1; i < D; i++)
if (v[i] < m) m = v[i];
return m; }
T max() const
{ T m = v[0];
for (int i = 1; i < D; i++)
if (v[i] > m) m = v[i];
return m; }
T *begin() { return &(v[0]); }
const T *begin() const { return &(v[0]); }
T *end() { return begin() + D; }
const T *end() const { return begin() + D; }
void clear() { for (int i = 0; i < D; i++) v[i] = T(0); }
bool empty() const
{ for (int i = 0; i < D; i++)
if (v[i]) return false;
return true; }
Vec<D,T> apply(T func(T)) const
{ Vec<D,T> result(VEC_UNINITIALIZED);
for (int i = 0; i < D; i++) result[i] = func(v[i]);
return result; }
Vec<D,T> apply(T func(const T&)) const
{ Vec<D,T> result(VEC_UNINITIALIZED);
for (int i = 0; i < D; i++) result[i] = func(v[i]);
return result; }
};
typedef Vec<3,float> vec;
typedef Vec<3,float> point;
typedef Vec<2,float> vec2;
typedef Vec<3,float> vec3;
typedef Vec<4,float> vec4;
typedef Vec<2,int> ivec2;
typedef Vec<3,int> ivec3;
typedef Vec<4,int> ivec4;
// Nonmember operators that take two Vecs
template <int D, class T>
static inline const Vec<D,T> operator + (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (int i = 0; i < D; i++)
result[i] = v1[i] + v2[i];
return result;
}
template <int D, class T>
static inline const Vec<D,T> operator - (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (int i = 0; i < D; i++)
result[i] = v1[i] - v2[i];
return result;
}
template <int D, class T>
static inline const Vec<D,T> operator * (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (int i = 0; i < D; i++)
result[i] = v1[i] * v2[i];
return result;
}
template <int D, class T>
static inline const Vec<D,T> operator / (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (int i = 0; i < D; i++)
result[i] = v1[i] / v2[i];
return result;
}
// Dot product in any dimension
template <int D, class T>
static inline const T operator ^ (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
T sum = v1[0] * v2[0];
for (int i = 1; i < D; i++)
sum += v1[i] * v2[i];
return sum;
}
#define DOT ^
// Cross product - only in 3 dimensions
template <class T>
static inline const Vec<3,T> operator % (const Vec<3,T> &v1, const Vec<3,T> &v2)
{
return Vec<3,T>(v1[1]*v2[2] - v1[2]*v2[1],
v1[2]*v2[0] - v1[0]*v2[2],
v1[0]*v2[1] - v1[1]*v2[0]);
}
#define CROSS %
// Component-wise equality and inequality (#include the usual caveats
// about comparing floats for equality...)
template <int D, class T>
static inline bool operator == (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
for (int i = 0; i < D; i++)
if (v1[i] != v2[i])
return false;
return true;
}
template <int D, class T>
static inline bool operator != (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
for (int i = 0; i < D; i++)
if (v1[i] != v2[i])
return true;
return false;
}
// Unary operators
template <int D, class T>
static inline const Vec<D,T> &operator + (const Vec<D,T> &v)
{
return v;
}
template <int D, class T>
static inline const Vec<D,T> operator - (const Vec<D,T> &v)
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (int i = 0; i < D; i++)
result[i] = -v[i];
return result;
}
template <int D, class T>
static inline bool operator ! (const Vec<D,T> &v)
{
return v.empty();
}
// Vec/scalar operators
template <int D, class T>
static inline const Vec<D,T> operator * (const T &x, const Vec<D,T> &v)
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (int i = 0; i < D; i++)
result[i] = x * v[i];
return result;
}
template <int D, class T>
static inline const Vec<D,T> operator * (const Vec<D,T> &v, const T &x)
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (int i = 0; i < D; i++)
result[i] = v[i] * x;
return result;
}
template <int D, class T>
static inline const Vec<D,T> operator / (const T &x, const Vec<D,T> &v)
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (int i = 0; i < D; i++)
result[i] = x / v[i];
return result;
}
template <int D, class T>
static inline const Vec<D,T> operator / (const Vec<D,T> &v, const T &x)
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (int i = 0; i < D; i++)
result[i] = v[i] / x;
return result;
}
// iostream operators
template <int D, class T>
static inline std::ostream &operator << (std::ostream &os, const Vec<D,T> &v)
{
os << "(";
for (int i = 0; i < D-1; i++)
os << v[i] << ", ";
return os << v[D-1] << ")";
}
template <int D, class T>
static inline std::istream &operator >> (std::istream &is, Vec<D,T> &v)
{
char c1 = 0, c2 = 0;
is >> c1;
if (c1 == '(' || c1 == '[') {
is >> v[0] >> std::ws >> c2;
for (int i = 1; i < D; i++) {
if (c2 == ',')
is >> v[i] >> std::ws >> c2;
else
is.setstate(std::ios::failbit);
}
}
if (c1 == '(' && c2 != ')')
is.setstate(std::ios::failbit);
else if (c1 == '[' && c2 != ']')
is.setstate(std::ios::failbit);
return is;
}
// Functions on Vecs
template <int D, class T>
static inline void swap(const Vec<D,T> &v1, const Vec<D,T> &v2)
{
for (int i = 0; i < D; i++)
swap(v1[i], v2[i]);
}
template <int D, class T>
static inline const T len2(const Vec<D,T> &v)
{
T l2 = v[0] * v[0];
for (int i = 1; i < D; i++)
l2 += v[i] * v[i];
return l2;
}
template <int D, class T>
static inline const T len(const Vec<D,T> &v)
{
return sqrt(len2(v));
}
template <int D, class T>
static inline const T dist2(const Vec<D,T> &v1, const Vec<D,T> &v2)
{
T d2 = sqr(v2[0]-v1[0]);
for (int i = 1; i < D; i++)
d2 += sqr(v2[i]-v1[i]);
return d2;
}
template <int D, class T>
static inline const T dist(const Vec<D,T> &v1, const Vec<D,T> &v2)
{
return sqrt(dist2(v1,v2));
}
template <int D, class T>
static inline Vec<D,T> normalize(Vec<D,T> &v)
{
T l = len(v);
if (unlikely(l <= T(0))) {
v[0] = T(1);
for (int i = 1; i < D; i++)
v[i] = T(0);
return v;
}
l = T(1) / l;
for (int i = 0; i < D; i++)
v[i] *= l;
return v;
}
// Area-weighted triangle face normal
template <class T>
static inline T trinorm(const T &v0, const T &v1, const T &v2)
{
return (typename T::value_type) 0.5 * ((v1 - v0) CROSS (v2 - v0));
}
template <class T>
static inline T cube(const T &x)
{
return x*x*x;
}
// Sign of a scalar
template <class T>
static inline T sgn(const T &x)
{
return (x < T(0)) ? T(-1) : T(1);
}
// Utility functions based on GLSL
template <class T>
static inline T fract(const T &x)
{
return x - floor(x);
}
template <class T>
static inline T clamp(const T &x, const T &a, const T &b)
{
return x > a ? x < b ? x : b : a; // returns a on NaN
}
template <class T, class S>
static inline T mix(const T &x, const T &y, const S &a)
{
return (S(1)-a) * x + a * y;
}
template <class T>
static inline T step(const T &x, const T &a)
{
return x < a ? T(0) : T(1);
}
template <class T>
static inline T smoothstep(const T &x, const T &a, const T &b)
{
if (b <= a) return step(x,a);
T t = (x - a) / (b - a);
return t <= T(0) ? T(0) : t >= T(1) ? T(1) : t * t * (T(3) - T(2) * t);
}
template <int D, class T>
static inline T faceforward(const Vec<D,T> &N, const Vec<D,T> &I,
const Vec<D,T> &Nref)
{
return ((Nref DOT I) < T(0)) ? N : -N;
}
template <int D, class T>
static inline T reflect(const Vec<D,T> &I, const Vec<D,T> &N)
{
return I - (T(2) * (N DOT I)) * N;
}
template <int D, class T>
static inline T refract(const Vec<D,T> &I, const Vec<D,T> &N,
const T &eta)
{
T NdotI = N DOT I;
T k = T(1) - sqr(eta) * (T(1) - sqr(NdotI));
return (k < T(0)) ? T(0) : eta * I - (eta * NdotI * sqrt(k)) * N;
}
// Generic macros for declaring 1-, 2-, and 3- argument
// componentwise functions on vecs
#define VEC_DECLARE_ONEARG(name) \
template <int D, class T> \
static inline Vec<D,T> name(const Vec<D,T> &v) \
{ \
Vec<D,T> result(VEC_UNINITIALIZED); \
for (int i = 0; i < D; i++) \
result[i] = name(v[i]); \
return result; \
}
#define VEC_DECLARE_TWOARG(name) \
template <int D, class T> \
static inline Vec<D,T> name(const Vec<D,T> &v, const T &w) \
{ \
Vec<D,T> result(VEC_UNINITIALIZED); \
for (int i = 0; i < D; i++) \
result[i] = name(v[i], w); \
return result; \
} \
template <int D, class T> \
static inline Vec<D,T> name(const Vec<D,T> &v, const Vec<D,T> &w) \
{ \
Vec<D,T> result(VEC_UNINITIALIZED); \
for (int i = 0; i < D; i++) \
result[i] = name(v[i], w[i]); \
return result; \
}
#define VEC_DECLARE_THREEARG(name) \
template <int D, class T> \
static inline Vec<D,T> name(const Vec<D,T> &v, const T &w, const T &x) \
{ \
Vec<D,T> result(VEC_UNINITIALIZED); \
for (int i = 0; i < D; i++) \
result[i] = name(v[i], w, x); \
return result; \
} \
template <int D, class T> \
static inline Vec<D,T> name(const Vec<D,T> &v, const Vec<D,T> &w, const Vec<D,T> &x) \
{ \
Vec<D,T> result(VEC_UNINITIALIZED); \
for (int i = 0; i < D; i++) \
result[i] = name(v[i], w[i], x[i]); \
return result; \
}
VEC_DECLARE_ONEARG(fabs)
VEC_DECLARE_ONEARG(floor)
VEC_DECLARE_ONEARG(ceil)
VEC_DECLARE_ONEARG(round)
VEC_DECLARE_ONEARG(trunc)
VEC_DECLARE_ONEARG(sin)
VEC_DECLARE_ONEARG(asin)
VEC_DECLARE_ONEARG(cos)
VEC_DECLARE_ONEARG(acos)
VEC_DECLARE_ONEARG(tan)
VEC_DECLARE_ONEARG(atan)
VEC_DECLARE_ONEARG(exp)
VEC_DECLARE_ONEARG(log)
VEC_DECLARE_ONEARG(sqrt)
VEC_DECLARE_ONEARG(sqr)
VEC_DECLARE_ONEARG(cbrt)
VEC_DECLARE_ONEARG(cube)
VEC_DECLARE_ONEARG(sgn)
VEC_DECLARE_TWOARG(min)
VEC_DECLARE_TWOARG(max)
VEC_DECLARE_TWOARG(atan2)
VEC_DECLARE_TWOARG(pow)
VEC_DECLARE_TWOARG(fmod)
VEC_DECLARE_TWOARG(step)
VEC_DECLARE_THREEARG(smoothstep)
VEC_DECLARE_THREEARG(clamp)
#undef VEC_DECLARE_ONEARG
#undef VEC_DECLARE_TWOARG
#undef VEC_DECLARE_THREEARG
// Both valarrays and GLSL use abs() on a vector to mean fabs().
// Let's be compatible...
template <int D, class T>
static inline Vec<D,T> abs(const Vec<D,T> &v)
{
return fabs(v);
}
#endif
|
guess2.c | #include "math.h"
#include <stdio.h>
#include <stdlib.h>
#define _DISP
//#define _LABEL
#define EXP 2
struct number{
int num[4];
int flag;
};
//double LABEL[13]={360,1440,1260,264,9,480,720,216,8,180,72,6,24};
struct number initarray[5040];
inline void num2p(int num,int *p){
int i;
for(i=0;i<4;i++)
*(p++)=0;
i=3;
while(num){
*(--p)=num%10;
num=num/10;
}
}
inline int check1(int * p){
int i,j;
for(i=0;i<4;i++){
for(j=i+1;j<4;j++){
if(p[i]==p[j])
return 0;
}
}
return 1;
}
void PreInitArray(){
int i,j;
int cnt=0;
int numt[4];
//struct number * arrayp=initarray;
for(i=123;i<=9876;i++){
num2p(i,numt);
if(check1(numt)){
initarray[cnt].flag=1;
for(j=0;j<4;j++)
{
initarray[cnt].num[j]=numt[j];
}
cnt++;
}
}
#ifdef _LABEL
/* for(i=0;i<=4;i++){
for(j=0;j<=4-i;j++){
LABEL[i*(11-i)/2+j]= pow((4*(i+j-1.6)*(i+j-1.6)+(i-0.4)*(i-0.4)),-1);
}
}
for(i=0;i<13;i++)
printf("%9d",i);
printf("\n");
*/
for(i=0;i<13;i++)
{
LABEL[i]= pow( LABEL[i] ,1.85 ) ;
printf("%9f",LABEL[i]);
}
#endif
printf("\nPre Iint Over!\n");
}
void InitArray(struct number * nump){
int i,j;
for(i=0;i<5040;i++){
for(j=0;j<4;j++)
nump[i].num[j]=initarray[i].num[j];
nump[i].flag=1;
}
}
inline void check2(int * num0,int *numg,int *a,int *b){
int i,j;
*a=0;
*b=0;
for(i=0;i<4;i++){
if(num0[i]==numg[i])
(*a)++;
for(j=0;j<4;j++){
if(num0[i]==numg[j])
(*b)++;
}
}
(*b)-=(*a);
}
double Division(struct number * array,double cnt,int *nump){
int hist[15]={0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0};
int i;
//for(i=0;i<15;i++)
// hist[i]=0;
int ta,tb;
for(i=0;i<5040;i++){
if(array[i].flag){
check2(array[i].num,nump,&ta,&tb);
hist[ta*(11-ta)/2+tb]++;
}
}
double div=0;
double temp;
for(i=0;i<13;i++){
if(hist[i]!=0)
{
temp=pow( hist[i], EXP);
#ifdef _LABEL
temp=LABEL[i]*temp*temp;
#endif
div+=temp;
}
}
return div;
}
int BestDivision(struct number * array,int count){
double best=10000*10000+0.0;
int bestindex=-1;
double new;
int i;
double cnt=0.0;
// direct
/*
for(i=0;i<5040;i++){
if(array[i].flag!=0)
return i;
}*/
for(i=0;i<5040;i++)
cnt+=array[i].flag;
// printf("shengyu cnt:%f\n",cnt);
if(cnt<1.1){
for(i=0;i<5040;i++){
if(array[i].flag)
return i;
}
}
cnt=cnt/13.0;
for(i=0;i<5040;i++){
if( (array[i].flag))
{
new=Division(array,cnt,array[i].num);
if(best>new){
best=new;
bestindex=i;
}
//if(best==13)
// break;
}
/*else{
new2=Division(array,cnt,array[i].num);
if(best2>new2){
best2=new;
bestindex2=i;
}
}*/
}
// printf("best min:%f\n",best);
return bestindex;
}
int CCguess(int * num){
int numg[4];
int cnt=0;
int i;
int a,b,ta,tb;
int ans;
struct number array[5040];
//printf("Begin Init!\n");
InitArray(array);
//printf("Init Over!\n");
for(i=0;i<4;i++)
numg[i]=i;
while(1){
check2(num,numg,&a,&b);
//printf("a:%d,b:%d\n",a,b);
cnt++;
if(a==4&&b==0)
return cnt;
if(cnt>9)
return 0;
for(i=0;i<5040;i++){
if(array[i].flag){
check2(array[i].num,numg,&ta,&tb);
array[i].flag=(ta==a && tb==b);
}
}
// printf("best Error\n");
ans=BestDivision(array,cnt);
// printf("Error: ans:%dcnt:%d\n",ans,cnt);
for(i=0;i<4;i++)
numg[i]=array[ans].num[i];
}
}
int main(){
PreInitArray();
int i,j,cnt=0;
int ans;
int hist[11];
for(i=0;i<11;i++)
hist[i]=0;
#pragma omp parallel for
for(i=0;i<5040;i++){
ans=CCguess(initarray[i].num);
hist[ans]++;
for(j=0;j<4;j++)
printf("%d",initarray[i].num[j]);
printf(",%d ",ans);
if(ans==0){
printf("\nError!\n");
//break;
exit(1);
}
if(i%100==0)
printf("%5d\n",i);
}
printf("time:");
for(j=1;j<11;j++)
printf("%5d",j);
printf("\n ");
for(j=1;j<11;j++){
cnt+=hist[j]*j;
printf("%5d",hist[j]);
}
printf("\naverage cnt:%12f\n",cnt/(5040+0.0));
return 1;
}
|
fox_floats_timer_caching_omp_fileIO_benchmark.c | /* fox_floats_timer_caching_omp_fileIO_benchmark.c -- uses Fox's algorithm to multiply two square matrices
*
* Implementation of parallel matrix multiplication:
* LaTeX: $C_{i,j} = \sum_{k} A_{i,k}B_{k,j}$
*
* Input:
* Input Matrix file name: A.dat, B.dat
*
* Output:
* Output Matrix file name: C.dat
* Output Sub-matrices file name: SubMatrices.dat
*
* Notes:
* 1. Assumes the number of processes is a perfect square
* 2. The array member of the matrices is statically allocated
*
* See Chap 7, pp. 113 & ff and pp. 125 & ff in PPMPI
*/
/* Compiler command:
* mpiicc -O3 -qopenmp -qopt-report-phase=vec -qopt-report=3 fox_floats_timer_caching_omp_fileIO_benchmark.c
* -o fox_floats_timer_caching_omp_fileIO_benchmark
*
* Run command:
* mpirun -n -4 ./fox_floats_timer_caching_omp
*/
/* Head files */
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <mpi.h>
#include <omp.h>
// define problem scale, matrix row/col size
#define PROBLEM_SCALE 1024
// define whether or not Print Matices in the Command Line
#define PRINT_A 0
#define PRINT_B 0
#define PRINT_C 0
#define PRINT_LOCAL_A 0
#define PRINT_LOCAL_B 0
#define PRINT_LOCAL_C 0
// define float precision, 4 byte single-precision float or 8 byte double-precision float
#define FLOAT double
#define FLOAT_MPI MPI_DOUBLE
// Define threads speed-up affnity in the computing
#define NUM_THREADS 16
// Define threads affinity "scatter" or "compact"
#define AFFINITY "KMP_AFFINITY = compact"
/* Type define structure of process grid */
typedef struct {
int p; /* Total number of processes */
MPI_Comm comm; /* Communicator for entire grid */
MPI_Comm row_comm; /* Communicator for my row */
MPI_Comm col_comm; /* Communicator for my col */
int q; /* Order of grid */
int my_row; /* My row number */
int my_col; /* My column number */
int my_rank; /* My rank in the grid comm */
} GRID_INFO_T;
/* Type define structure of local matrix */
#define MAX 2097152 // Maximum number of elements in the array that store the local matrix (2^21)
typedef struct {
int n_bar;
#define Order(A) ((A)->n_bar) // defination with parameters
FLOAT entries[MAX];
#define Entry(A,i,j) (*(((A)->entries) + ((A)->n_bar)*(i) + (j))) // defination with parameters, Array dereference
} LOCAL_MATRIX_T;
/* Function Declarations */
LOCAL_MATRIX_T* Local_matrix_allocate(int n_bar);
void Free_local_matrix(LOCAL_MATRIX_T** local_A);
void Read_matrix_A(char* prompt, LOCAL_MATRIX_T* local_A,
GRID_INFO_T* grid, int n); // Read matrix A from a file
void Read_matrix_B(char* prompt, LOCAL_MATRIX_T* local_B, // for continuous memory access, local A(i,k)*B(k,j) = A(i,k)*B^{T}(j,k)
GRID_INFO_T* grid, int n); // Read matrix B from a file
void Print_matrix_A(char* title, LOCAL_MATRIX_T* local_A,
GRID_INFO_T* grid, int n); // Print matrix A in the command line
void Print_matrix_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k)
GRID_INFO_T* grid, int n); // Print matrix B in the command line
void Print_matrix_C(char* title, LOCAL_MATRIX_T* local_C,
GRID_INFO_T* grid, int n); // Print matrix C in the command line
void Set_to_zero(LOCAL_MATRIX_T* local_A);
void Local_matrix_multiply(LOCAL_MATRIX_T* local_A,
LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C);
void Build_matrix_type(LOCAL_MATRIX_T* local_A);
MPI_Datatype local_matrix_mpi_t;
LOCAL_MATRIX_T* temp_mat; // global LOCAL_MATRIX_T* type pointer
void Print_local_matrices_A(char* title, LOCAL_MATRIX_T* local_A,
GRID_INFO_T* grid);
void Print_local_matrices_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k)
GRID_INFO_T* grid);
void Print_local_matrices_C(char* title, LOCAL_MATRIX_T* local_B,
GRID_INFO_T* grid);
void Write_matrix_C(char* title, LOCAL_MATRIX_T* local_C,
GRID_INFO_T* grid, int n); // Write matrix multiplication to a file
void Write_local_matrices_A(char* title, LOCAL_MATRIX_T* local_A,
GRID_INFO_T* grid); // Write local matrix A to a file
void Write_local_matrices_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k)
GRID_INFO_T* grid); // Write local matrix B to a file
void Write_local_matrices_C(char* title, LOCAL_MATRIX_T* local_A,
GRID_INFO_T* grid); // Write local matrix C to a file
/*********************************************************/
main(int argc, char* argv[]) {
FILE *fp;
int p;
int my_rank;
GRID_INFO_T grid;
LOCAL_MATRIX_T* local_A;
LOCAL_MATRIX_T* local_B;
LOCAL_MATRIX_T* local_C;
int n;
int n_bar;
double timer_start;
double timer_end;
int content;
int i;
int j;
void Setup_grid(GRID_INFO_T* grid);
void Fox(int n, GRID_INFO_T* grid, LOCAL_MATRIX_T* local_A,
LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C);
// Matrix Generator
fp = fopen("A.dat", "w"); // Generate and print matrix A into a file
for (i = 0; i < PROBLEM_SCALE; i++) {
for (j = 0; j < PROBLEM_SCALE; j++)
if(i == j){
fprintf(fp,"%d ", 1);
}
else {
fprintf(fp,"%d ", 0);
}
fprintf(fp,"\n");
}
fclose(fp);
fp = fopen("B.dat", "w"); // Generate and print matrix B into a file
for (i = 0; i < PROBLEM_SCALE; i++){
for (j = 0; j < PROBLEM_SCALE; j++)
fprintf(fp,"%d ", (i*PROBLEM_SCALE)+j);
fprintf(fp, "\n");
}
fclose(fp);
// SPMD Mode start from here (Processess fork from here)
MPI_Init(&argc, &argv); // MPI initializing
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator
// Initial OpenMP Environment
omp_set_num_threads(NUM_THREADS);
kmp_set_defaults(AFFINITY);
Setup_grid(&grid); // Set up Processess grid
if (my_rank == 0) {
fp = fopen("A.dat","r");
n = 0;
while((content = fgetc(fp)) != EOF)
{
//printf("fgetc = %d\n", content);
if(content != 0x20 && content != 0x0A) n++;
}
fclose(fp);
n = (int) sqrt((double) n);
printf("We read the order of the matrices from A.dat is\n %d\n", n);
// while(fgetc(fp) != EOF) n++;
// printf("What's the order of the matrices?\n");
// scanf("%d", &n); // Overall Matrix's Order
}
MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD); // MPI broadcast the overall matrix's order
n_bar = n/grid.q; // \bar n is the local matrix's order
local_A = Local_matrix_allocate(n_bar); // Allocate local matrix A
Order(local_A) = n_bar; // Local matrix A's order
Read_matrix_A("Read A from A.dat", local_A, &grid, n); // Read local matrices A from process 0 by using stdin, and send them to each process (Procedure)
if (PRINT_A == 1)
Print_matrix_A("We read A =", local_A, &grid, n);// Print local matrices A from process 0 by using stdout, and send them to each process (Procedure)
local_B = Local_matrix_allocate(n_bar); // Allocate local matrix
Order(local_B) = n_bar; // Local matrix B's order
Read_matrix_B("Read B from B.dat", local_B, &grid, n); // Read local matrix B as it's local transpose from process 0 by using stdin, and send them to each process (Procedure)
if (PRINT_B == 1)
Print_matrix_B("We read B =", local_B, &grid, n);// Print local matrix B as it's local transpose from process 0 by using stdout, and send them to each process (Procedure)
Build_matrix_type(local_A); // Buid local_A's MPI matrix data type
temp_mat = Local_matrix_allocate(n_bar); // Allocate temporary matrix of order n $\time$ n
local_C = Local_matrix_allocate(n_bar); // Allocate matrix local_C
Order(local_C) = n_bar; // Set matrix local_C's order
MPI_Barrier(MPI_COMM_WORLD); // Set the MPI process barrier
timer_start = MPI_Wtime(); // Get the MPI wall time
Fox(n, &grid, local_A, local_B, local_C); // FOX parallel matrix multiplication Algorithm implement function
timer_end = MPI_Wtime(); // Get the MPI wall time
MPI_Barrier(MPI_COMM_WORLD); // Set the MPI process barrier
Write_matrix_C("Write C into the C.dat", local_C, &grid, n); // Print matrix local_C (parallel matrix multiplication result)
if (PRINT_C == 1)
Print_matrix_C("The product is", local_C, &grid, n); // Print matrix local_C (parallel matrix multiplication result)
Write_local_matrices_A("Write split of local matrix A into local_A.dat",
local_A, &grid); // Write local matrix A into file
if (PRINT_LOCAL_A == 1)
Print_local_matrices_A("Split of local matrix A",
local_A, &grid); // Print matrix A split in processess
Write_local_matrices_B("Write split of local matrix B into local_B.dat",
local_B, &grid); // Write local matrix B into file, special for row-major storage
if (PRINT_LOCAL_B == 1)
Print_local_matrices_B("Split of local matrix B",
local_B, &grid); // Print matrix B split in processess, special for row-major storage
Write_local_matrices_C("Write split of local matrix C into local_C.dat",
local_C, &grid); // Print matrix C split in processess
if (PRINT_LOCAL_C == 1)
Print_local_matrices_C("Split of local matrix C",
local_C, &grid); // Print matrix C split in processess
Free_local_matrix(&local_A); // Free local matrix local_A
Free_local_matrix(&local_B); // Free local matrix local_B
Free_local_matrix(&local_C); // Free local matrix local_C
if(my_rank == 0)
printf("Parallel Fox Matrix Multiplication Elapsed time:\n %30.20E seconds\n", timer_end-timer_start);
MPI_Finalize(); // MPI finalize, processes join and resource recycle
} /* main */
/*********************************************************/
void Setup_grid(
GRID_INFO_T* grid /* out */) {
int old_rank;
int dimensions[2];
int wrap_around[2];
int coordinates[2];
int free_coords[2];
/* Set up Global Grid Information */
MPI_Comm_size(MPI_COMM_WORLD, &(grid->p));
MPI_Comm_rank(MPI_COMM_WORLD, &old_rank);
/* We assume p is a perfect square */ // but what if it's not a perfect square
grid->q = (int) sqrt((double) grid->p);
dimensions[0] = dimensions[1] = grid->q;
/* We want a circular shift in second dimension. */
/* Don't care about first */
wrap_around[0] = wrap_around[1] = 1;
MPI_Cart_create(MPI_COMM_WORLD, 2, dimensions,
wrap_around, 1, &(grid->comm));
MPI_Comm_rank(grid->comm, &(grid->my_rank));
MPI_Cart_coords(grid->comm, grid->my_rank, 2,
coordinates);
grid->my_row = coordinates[0];
grid->my_col = coordinates[1];
/* Set up row communicators */
free_coords[0] = 0;
free_coords[1] = 1;
MPI_Cart_sub(grid->comm, free_coords,
&(grid->row_comm));
/* Set up column communicators */
free_coords[0] = 1;
free_coords[1] = 0;
MPI_Cart_sub(grid->comm, free_coords,
&(grid->col_comm));
} /* Setup_grid */
/*********************************************************/
void Fox(
int n /* in */,
GRID_INFO_T* grid /* in */,
LOCAL_MATRIX_T* local_A /* in */,
LOCAL_MATRIX_T* local_B /* in */,
LOCAL_MATRIX_T* local_C /* out */) {
LOCAL_MATRIX_T* temp_A; /* Storage for the sub- */
/* matrix of A used during */
/* the current stage */
int stage;
int bcast_root;
int n_bar; /* n/sqrt(p) */
int source;
int dest;
MPI_Status status;
n_bar = n/grid->q;
Set_to_zero(local_C);
/* Calculate addresses for row circular shift of B */
source = (grid->my_row + 1) % grid->q;
dest = (grid->my_row + grid->q - 1) % grid->q;
/* Set aside storage for the broadcast block of A */
temp_A = Local_matrix_allocate(n_bar);
for (stage = 0; stage < grid->q; stage++) {
bcast_root = (grid->my_row + stage) % grid->q;
if (bcast_root == grid->my_col) { // Process P_{ii} broadcast A_{ii} in process gird's row commnunicator
MPI_Bcast(local_A, 1, local_matrix_mpi_t,
bcast_root, grid->row_comm);
Local_matrix_multiply(local_A, local_B,
local_C);
} else { // temp_A is a buffer for process P_{ij} to store A_{ij}
MPI_Bcast(temp_A, 1, local_matrix_mpi_t,
bcast_root, grid->row_comm);
Local_matrix_multiply(temp_A, local_B,
local_C);
}
MPI_Sendrecv_replace(local_B, 1, local_matrix_mpi_t, // MPI send and receive with single buffer
dest, 0, source, 0, grid->col_comm, &status); // Circular shift of process grid B's row, after local multiplication operation
} /* for */
} /* Fox */
/*********************************************************/
LOCAL_MATRIX_T* Local_matrix_allocate(int local_order) {
LOCAL_MATRIX_T* temp;
temp = (LOCAL_MATRIX_T*) malloc(sizeof(LOCAL_MATRIX_T));
return temp;
} /* Local_matrix_allocate */
/*********************************************************/
void Free_local_matrix(
LOCAL_MATRIX_T** local_A_ptr /* in/out */) {
free(*local_A_ptr);
} /* Free_local_matrix */
/*********************************************************/
/* Read and distribute matrix for matrix A:
* foreach global row of the matrix,
* foreach grid column
* read a block of n_bar floats on process 0
* and send them to the appropriate process.
*/
void Read_matrix_A(
char* prompt /* in */,
LOCAL_MATRIX_T* local_A /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
FILE *fp;
int mat_row, mat_col;
int grid_row, grid_col;
int dest;
int coords[2];
FLOAT* temp;
MPI_Status status;
if (grid->my_rank == 0) { // Process 0 read matrix input from stdin and send them to other processess
fp = fopen("A.dat","r");
temp = (FLOAT*) malloc(Order(local_A)*sizeof(FLOAT));
printf("%s\n", prompt);
fflush(stdout);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_A);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &dest);
if (dest == 0) {
for (mat_col = 0; mat_col < Order(local_A); mat_col++)
fscanf(fp, "%lf",
(local_A->entries)+mat_row*Order(local_A)+mat_col);
/* scanf("%lf",
(local_A->entries)+mat_row*Order(local_A)+mat_col);
*/
} else {
for(mat_col = 0; mat_col < Order(local_A); mat_col++)
fscanf(fp,"%lf", temp + mat_col);
// scanf("%lf", temp + mat_col);
MPI_Send(temp, Order(local_A), FLOAT_MPI, dest, 0,
grid->comm);
}
}
}
free(temp);
fclose(fp);
} else { // Other processess receive matrix from process 0
for (mat_row = 0; mat_row < Order(local_A); mat_row++)
MPI_Recv(&Entry(local_A, mat_row, 0), Order(local_A),
FLOAT_MPI, 0, 0, grid->comm, &status);
}
} /* Read_matrix */
/*********************************************************/
/* Read and distribute matrix for local matrix B's transpose:
* foreach global row of the matrix,
* foreach grid column
* read a block of n_bar floats on process 0
* and send them to the appropriate process.
*/
void Read_matrix_B(
char* prompt /* in */,
LOCAL_MATRIX_T* local_B /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
FILE *fp;
int mat_row, mat_col;
int grid_row, grid_col;
int dest;
int coords[2];
FLOAT *temp;
MPI_Status status;
if (grid->my_rank == 0) { // Process 0 read matrix input from stdin and send them to other processess
fp = fopen("B.dat","r");
temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT));
printf("%s\n", prompt);
fflush(stdout);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_B);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &dest);
if (dest == 0) { // process 0 (local)
for (mat_col = 0; mat_col < Order(local_B); mat_col++)
fscanf(fp, "%lf",
(local_B->entries)+mat_col*Order(local_B)+mat_row); // switch rows and colums in local_B, for column major storage
/* scanf("%lf",
(local_B->entries)+mat_col*Order(local_B)+mat_row); // switch rows and colums in local_B, for column major storage
*/
/* scanf("%lf",
(local_A->entries)+mat_row*Order(local_A)+mat_col); */
} else {
for(mat_col = 0; mat_col < Order(local_B); mat_col++)
fscanf(fp, "%lf", temp + mat_col);
// scanf("%lf", temp + mat_col);
MPI_Send(temp, Order(local_B), FLOAT_MPI, dest, 0,
grid->comm);
}
}
}
free(temp);
fclose(fp);
} else { // Other processess receive matrix from process 0
temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); // switch rows and colums in local_B, for column major storage
for (mat_col = 0; mat_col < Order(local_B); mat_col++) {
MPI_Recv(temp, Order(local_B),
FLOAT_MPI, 0, 0, grid->comm, &status); // switch rows and colums in local_B, for column major storage
for(mat_row = 0; mat_row < Order(local_B); mat_row++)
Entry(local_B, mat_row, mat_col) = *(temp + mat_row); // switch rows and colums in local_B, for column major storage
/* MPI_Recv(&Entry(local_A, mat_row, 0), Order(local_A),
FLOAT_MPI, 0, 0, grid->comm, &status); */
}
free(temp);
}
} /* Read_matrix_B */
/*********************************************************/
/* Recive and Print Matrix A:
* foreach global row of the matrix,
* foreach grid column
* send n_bar floats to process 0 from each other process
* receive a block of n_bar floats on process 0 from other processes and print them
*/
void Print_matrix_A(
char* title /* in */,
LOCAL_MATRIX_T* local_A /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
int mat_row, mat_col;
int grid_row, grid_col;
int source;
int coords[2];
FLOAT* temp;
MPI_Status status;
if (grid->my_rank == 0) {
temp = (FLOAT*) malloc(Order(local_A)*sizeof(FLOAT));
printf("%s\n", title);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_A);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &source);
if (source == 0) {
for(mat_col = 0; mat_col < Order(local_A); mat_col++)
printf("%20.15E ", Entry(local_A, mat_row, mat_col));
} else {
MPI_Recv(temp, Order(local_A), FLOAT_MPI, source, 0,
grid->comm, &status);
for(mat_col = 0; mat_col < Order(local_A); mat_col++)
printf("%20.15E ", temp[mat_col]);
}
}
printf("\n");
}
free(temp);
} else {
for (mat_row = 0; mat_row < Order(local_A); mat_row++)
MPI_Send(&Entry(local_A, mat_row, 0), Order(local_A),
FLOAT_MPI, 0, 0, grid->comm);
}
} /* Print_matrix_A */
/*********************************************************/
/* Recive and Print Matrix for local matrix B's transpose:
* foreach global row of the matrix,
* foreach grid column
* send n_bar floats to process 0 from each other process
* receive a block of n_bar floats on process 0 from other processes and print them
*/
void Print_matrix_B(
char* title /* in */,
LOCAL_MATRIX_T* local_B /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
int mat_row, mat_col;
int grid_row, grid_col;
int source;
int coords[2];
FLOAT* temp;
MPI_Status status;
if (grid->my_rank == 0) {
temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT));
printf("%s\n", title);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_B);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &source);
if (source == 0) {
for(mat_col = 0; mat_col < Order(local_B); mat_col++)
printf("%20.15E ", Entry(local_B, mat_col, mat_row)); // switch rows and colums in local_B, for column major storage
// printf("%20.15E ", Entry(local_A, mat_row, mat_col));
} else {
MPI_Recv(temp, Order(local_B), FLOAT_MPI, source, 0,
grid->comm, &status);
for(mat_col = 0; mat_col < Order(local_B); mat_col++)
printf("%20.15E ", temp[mat_col]);
}
}
printf("\n");
}
free(temp);
} else {
temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT));
for (mat_col = 0; mat_col < Order(local_B); mat_col++) {
for(mat_row = 0; mat_row < Order(local_B); mat_row++)
*(temp+mat_row) = Entry(local_B, mat_row, mat_col); // switch rows and colums in local_B, for column major storage
MPI_Send(temp, Order(local_B), FLOAT_MPI, 0, 0, grid->comm);
}
free(temp);
}
} /* Print_matrix_B */
/*********************************************************/
/* Recive and Print Matrix A:
* foreach global row of the matrix,
* foreach grid column
* send n_bar floats to process 0 from each other process
* receive a block of n_bar floats on process 0 from other processes and print them
*/
void Print_matrix_C(
char* title /* in */,
LOCAL_MATRIX_T* local_C /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
int mat_row, mat_col;
int grid_row, grid_col;
int source;
int coords[2];
FLOAT* temp;
MPI_Status status;
if (grid->my_rank == 0) {
temp = (FLOAT*) malloc(Order(local_C)*sizeof(FLOAT));
printf("%s\n", title);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_C);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &source);
if (source == 0) {
for(mat_col = 0; mat_col < Order(local_C); mat_col++)
printf("%20.15E ", Entry(local_C, mat_row, mat_col));
} else {
MPI_Recv(temp, Order(local_C), FLOAT_MPI, source, 0,
grid->comm, &status);
for(mat_col = 0; mat_col < Order(local_C); mat_col++)
printf("%20.15E ", temp[mat_col]);
}
}
printf("\n");
}
free(temp);
} else {
for (mat_row = 0; mat_row < Order(local_C); mat_row++)
MPI_Send(&Entry(local_C, mat_row, 0), Order(local_C),
FLOAT_MPI, 0, 0, grid->comm);
}
} /* Print_matrix_C */
/*********************************************************/
/* Recive and Write Matrix C into a file:
* foreach global row of the matrix,
* foreach grid column
* send n_bar floats to process 0 from each other process
* receive a block of n_bar floats on process 0 from other processes and print them
*/
void Write_matrix_C(
char* title /* in */,
LOCAL_MATRIX_T* local_C /* out */,
GRID_INFO_T* grid /* in */,
int n /* in */) {
FILE *fp;
int mat_row, mat_col;
int grid_row, grid_col;
int source;
int coords[2];
FLOAT* temp;
MPI_Status status;
if (grid->my_rank == 0) {
fp = fopen("C.dat", "w+");
temp = (FLOAT*) malloc(Order(local_C)*sizeof(FLOAT));
printf("%s\n", title);
for (mat_row = 0; mat_row < n; mat_row++) {
grid_row = mat_row/Order(local_C);
coords[0] = grid_row;
for (grid_col = 0; grid_col < grid->q; grid_col++) {
coords[1] = grid_col;
MPI_Cart_rank(grid->comm, coords, &source);
if (source == 0) {
for(mat_col = 0; mat_col < Order(local_C); mat_col++)
fprintf(fp, "%20.15E ", Entry(local_C, mat_row, mat_col));
// printf("%20.15E ", Entry(local_A, mat_row, mat_col));
} else {
MPI_Recv(temp, Order(local_C), FLOAT_MPI, source, 0,
grid->comm, &status);
for(mat_col = 0; mat_col < Order(local_C); mat_col++)
fprintf(fp, "%20.15E ", temp[mat_col]);
// printf("%20.15E ", temp[mat_col]);
}
}
fprintf(fp,"\n");
}
free(temp);
fclose(fp);
} else {
for (mat_row = 0; mat_row < Order(local_C); mat_row++)
MPI_Send(&Entry(local_C, mat_row, 0), Order(local_C),
FLOAT_MPI, 0, 0, grid->comm);
}
} /* Write_matrix_C */
/*********************************************************/
/*
* Set local matrix's element to zero
*/
void Set_to_zero(
LOCAL_MATRIX_T* local_A /* out */) {
int i, j;
for (i = 0; i < Order(local_A); i++)
for (j = 0; j < Order(local_A); j++)
Entry(local_A,i,j) = 0.0E0;
} /* Set_to_zero */
/*********************************************************/
void Build_matrix_type(
LOCAL_MATRIX_T* local_A /* in */) {
MPI_Datatype temp_mpi_t;
int block_lengths[2];
MPI_Aint displacements[2];
MPI_Datatype typelist[2];
MPI_Aint start_address;
MPI_Aint address;
MPI_Type_contiguous(Order(local_A)*Order(local_A),
FLOAT_MPI, &temp_mpi_t); // Creates a contiguous datatype
/*
Synopsis
int MPI_Type_contiguous(int count,
MPI_Datatype oldtype,
MPI_Datatype *newtype)
Input Parameters
count
replication count (nonnegative integer)
oldtype
old datatype (handle)
*/
block_lengths[0] = block_lengths[1] = 1;
typelist[0] = MPI_INT;
typelist[1] = temp_mpi_t;
MPI_Address(local_A, &start_address); // Gets the address of a location in caller's memory
MPI_Address(&(local_A->n_bar), &address);
/*
Synopsis
int MPI_Address(const void *location, MPI_Aint *address)
Input Parameters
location
location in caller memory (choice)
Output Parameters
address
address of location (address integer)
*/
displacements[0] = address - start_address;
MPI_Address(local_A->entries, &address);
displacements[1] = address - start_address;
MPI_Type_struct(2, block_lengths, displacements,
typelist, &local_matrix_mpi_t); // Creates a struct datatype
/*
Synopsis
int MPI_Type_struct(int count,
const int *array_of_blocklengths,
const MPI_Aint *array_of_displacements,
const MPI_Datatype *array_of_types,
MPI_Datatype *newtype)
Input Parameters
count
number of blocks (integer) -- also number of entries in arrays array_of_types , array_of_displacements and array_of_blocklengths
array_of_blocklengths
number of elements in each block (array)
array_of_displacements
byte displacement of each block (array)
array_of_types
type of elements in each block (array of handles to datatype objects)
Output Parameters
newtype
new datatype (handle)
*/
MPI_Type_commit(&local_matrix_mpi_t); // Commits the datatype
/*
Synopsis
int MPI_Type_commit(MPI_Datatype *datatype)
Input Parameters
datatype
datatype (handle)
*/
} /* Build_matrix_type */
/*********************************************************/
/* local matrix multiplication function
* withing OpenMP Thread Acceleration
*/
void Local_matrix_multiply(
LOCAL_MATRIX_T* local_A /* in */,
LOCAL_MATRIX_T* local_B /* in */,
LOCAL_MATRIX_T* local_C /* out */) {
int i, j, k;
// int my_rank;
// MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator
#pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) // Threads acceleration upgrade, parallel task split
for (i = 0; i < Order(local_A); i++) {
// printf("Current in the Fox Kernel:\n my process id is %d, my thread id is %d\n",my_rank,omp_get_thread_num());
for (j = 0; j < Order(local_A); j++)
for (k = 0; k < Order(local_B); k++)
Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage
+ Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k)
/* Entry(local_C,i,j) = Entry(local_C,i,j)
+ Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper
*/
}
} /* Local_matrix_multiply */
/*********************************************************/
/* Recive and Print Local Matrix A:
* Process 0 print local matrix local_A
* Other Processess send local matrix local_A to process 0
* And process 0 receive local matrix local_A from other processess
*/
void Print_local_matrices_A(
char* title /* in */,
LOCAL_MATRIX_T* local_A /* in */,
GRID_INFO_T* grid /* in */) {
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
printf("%s\n", title);
printf("Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_A); i++) {
for (j = 0; j < Order(local_A); j++)
printf("%20.15E ", Entry(local_A,i,j));
printf("\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
printf("Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
printf("%20.15E ", Entry(temp_mat,i,j));
printf("\n");
}
}
fflush(stdout);
} else {
MPI_Send(local_A, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Print_local_matrices_A */
/*********************************************************/
/* Recive and Print Local Matrix for local matrix B's transpose:
* Process 0 print local matrix local_A
* Other Processess send local matrix local_A to process 0
* And process 0 receive local matrix local_A from other processess
*/
void Print_local_matrices_B(
char* title /* in */,
LOCAL_MATRIX_T* local_B /* in */,
GRID_INFO_T* grid /* in */) {
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
printf("%s\n", title);
printf("Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_B); i++) {
for (j = 0; j < Order(local_B); j++)
printf("%20.15E ", Entry(local_B,j,i)); // switch rows and colums in local_B, for column major storage
printf("\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
printf("Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
printf("%20.15E ", Entry(temp_mat,j,i)); // switch rows and colums in local_B, for column major storage
printf("\n");
}
}
fflush(stdout);
} else {
MPI_Send(local_B, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Print_local_matrices_B */
/*********************************************************/
/* Recive and Print Local Matrix A:
* Process 0 print local matrix local_A
* Other Processess send local matrix local_A to process 0
* And process 0 receive local matrix local_A from other processess
*/
void Print_local_matrices_C(
char* title /* in */,
LOCAL_MATRIX_T* local_C /* in */,
GRID_INFO_T* grid /* in */) {
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
printf("%s\n", title);
printf("Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_C); i++) {
for (j = 0; j < Order(local_C); j++)
printf("%20.15E ", Entry(local_C,i,j));
printf("\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
printf("Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
printf("%20.15E ", Entry(temp_mat,i,j));
printf("\n");
}
}
fflush(stdout);
} else {
MPI_Send(local_C, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Print_local_matrices_C */
/*********************************************************/
/* Recive and Write Local Matrix A:
* Process 0 print local matrix local_A
* Other Processess send local matrix local_A to process 0
* And process 0 receive local matrix local_A from other processess
*/
void Write_local_matrices_A(
char* title /* in */,
LOCAL_MATRIX_T* local_A /* in */,
GRID_INFO_T* grid /* in */) {
FILE *fp;
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
fp = fopen("local_A.dat","w+");
printf("%s\n", title);
fprintf(fp,"Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_A); i++) {
for (j = 0; j < Order(local_A); j++)
fprintf(fp,"%20.15E ", Entry(local_A,i,j));
fprintf(fp, "\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
fprintf(fp, "%20.15E ", Entry(temp_mat,i,j));
fprintf(fp, "\n");
}
}
fflush(stdout);
fclose(fp);
} else {
MPI_Send(local_A, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Write_local_matrices_A */
/*********************************************************/
/* Recive and Write Local Matrix for local matrix B's transpose:
* Process 0 print local matrix local_A
* Other Processess send local matrix local_A to process 0
* And process 0 receive local matrix local_A from other processess
*/
void Write_local_matrices_B(
char* title /* in */,
LOCAL_MATRIX_T* local_B /* in */,
GRID_INFO_T* grid /* in */) {
FILE *fp;
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
fp = fopen("local_B.dat","w+");
printf("%s\n", title);
fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_B); i++) {
for (j = 0; j < Order(local_B); j++)
fprintf(fp, "%20.15E ", Entry(local_B,j,i)); // switch rows and colums in local_B, for column major storage
fprintf(fp, "\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
fprintf(fp, "%20.15E ", Entry(temp_mat,j,i)); // switch rows and colums in local_B, for column major storage
fprintf(fp, "\n");
}
}
fflush(stdout);
fclose(fp);
} else {
MPI_Send(local_B, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Write_local_matrices_B */
/*********************************************************/
/* Recive and Write Local Matrix C:
* Process 0 print local matrix local_C
* Other Processess send local matrix local_C to process 0
* And process 0 receive local matrix local_C from other processess
*/
void Write_local_matrices_C(
char* title /* in */,
LOCAL_MATRIX_T* local_C /* in */,
GRID_INFO_T* grid /* in */) {
FILE *fp;
int coords[2];
int i, j;
int source;
MPI_Status status;
// print by process No.0 in process mesh
if (grid->my_rank == 0) {
fp = fopen("local_C.dat","w+");
printf("%s\n", title);
fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n",
grid->my_rank, grid->my_row, grid->my_col);
for (i = 0; i < Order(local_C); i++) {
for (j = 0; j < Order(local_C); j++)
fprintf(fp, "%20.15E ", Entry(local_C,i,j));
fprintf(fp, "\n");
}
for (source = 1; source < grid->p; source++) {
MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0,
grid->comm, &status);
MPI_Cart_coords(grid->comm, source, 2, coords);
fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n",
source, coords[0], coords[1]);
for (i = 0; i < Order(temp_mat); i++) {
for (j = 0; j < Order(temp_mat); j++)
fprintf(fp, "%20.15E ", Entry(temp_mat,i,j));
fprintf(fp, "\n");
}
}
fflush(stdout);
fclose(fp);
} else {
MPI_Send(local_C, 1, local_matrix_mpi_t, 0, 0, grid->comm);
}
} /* Write_local_matrices_C */
|
FFVMC.h | #ifndef FFVMC3D_H
#define FFVMC3D_H
#include <vector>
#include <fstream>
#include <sstream>
#include <iostream>
#include <sys/stat.h>
#include "BCMTools.h"
#include "BlockManager.h"
#include "Scalar3D.h"
#include "BCMOctree.h"
#include "Partition.h"
typedef struct _Vertex {
double x;
double y;
double z;
double value;
}Vertex;
extern const int edgeTable[256];
extern const int triTable[256][16];
extern Vertex GetIntersection(Vertex v1, Vertex v2, double p1, double p2, double threshold);
class FFVMC {
public:
FFVMC()
: blockManager(BlockManager::getInstance()),
comm(blockManager.getCommunicator()) {
int myrank = comm.Get_rank();
for(int id=0; id<blockManager.getNumBlock(); ++id) {
BlockBase* block = blockManager.getBlock(id);
}
if( myrank == 0 ) {
} else {
}
PNX = 0;
PNY = 0;
PNZ = 0;
pPointData = 0;
vVertexList.clear();
}
virtual ~FFVMC() {
}
private:
BlockManager& blockManager;
const MPI::Intracomm& comm;
public:
template <typename T>
void writeContour(
int dataClassID,
int vc,
const std::string path,
const std::string prefix,
const std::string name,
int step,
int maxLevel,
int minLevel,
RootGrid* rootGrid,
BCMOctree* tree,
Partition* partition,
Vec3r rootOrigin,
double rootLength,
double threshold) {
std::ostringstream ossFileNameTime;
ossFileNameTime << path;
mkdir(ossFileNameTime.str().c_str(), 0755);
ossFileNameTime << "/";
ossFileNameTime.width(10);
ossFileNameTime.setf(std::ios::fixed);
ossFileNameTime.fill('0');
ossFileNameTime << step;
mkdir(ossFileNameTime.str().c_str(), 0755);
const Vec3i& size = blockManager.getSize();
int myrank = comm.Get_rank();
for (int id = 0; id < blockManager.getNumBlock(); ++id) {
BlockBase* block = blockManager.getBlock(id);
Vec3i size = block->getSize();
Vec3r origin = block->getOrigin();
Vec3r blockSize = block->getBlockSize();
Vec3r cellSize = block->getCellSize();
int level = block->getLevel();
Scalar3D<T>* s = dynamic_cast<Scalar3D<T>*>(block->getDataClass(dataClassID));
T* sData = s->getData();
int nx = size.x;
int ny = size.y;
int nz = size.z;
int nv = vc;
double ox = origin.x;
double oy = origin.y;
double oz = origin.z;
double dx = cellSize.x;
writeContourLocal(
sData,
threshold,
path,
prefix,
name,
step, myrank, id,
nx, ny, nz,
nv,
ox, oy, oz,
dx);
}
if( myrank != 0 ) {
return;
}
std::ostringstream ossFileName;
ossFileName << path;
ossFileName << "/";
ossFileName << prefix;
ossFileName << name.c_str();
ossFileName << "-";
ossFileName.width(10);
ossFileName.setf(std::ios::fixed);
ossFileName.fill('0');
ossFileName << step;
ossFileName << ".pvtp";
std::ofstream ofs;
ofs.open(ossFileName.str().c_str(), std::ios::out);
ofs << "<VTKFile type=\"PPolyData\" version=\"0.1\">" << std::endl;
ofs << "<PPolyData GhostLevel=\"0\">" << std::endl;
ofs << "<PPointData>" << std::endl;
ofs << "<PDataArray type=\"Float32\" Name=\"";
ofs << name;
ofs << "\" format=\"ascii\"/>" << std::endl;
ofs << "</PPointData>" << std::endl;
ofs << "<PCellData>" << std::endl;
ofs << "</PCellData>" << std::endl;
ofs << "<PPoints>" << std::endl;
ofs << "<PDataArray type=\"Float32\" Name=\"Points\" NumberOfComponents=\"3\" format=\"ascii\"/>" << std::endl;
ofs << "</PPoints>" << std::endl;
std::vector<Node*>& leafNodeArray = tree->getLeafNodeArray();
for (int iRank = 0; iRank < comm.Get_size(); iRank++) {
for (int id = partition->getStart(iRank); id < partition->getEnd(iRank); id++) {
Node* node = leafNodeArray[id];
Vec3r origin = tree->getOrigin(node) * rootLength;
Vec3r blockSize = node->getBlockSize() * rootLength;
Vec3r cellSize;
cellSize.x = blockSize.x / size.x;
cellSize.y = blockSize.y / size.y;
cellSize.z = blockSize.z / size.z;
int level = node->getLevel();
std::ostringstream ossFileName2;
ossFileName2 << "./";
ossFileName2.width(10);
ossFileName2.setf(std::ios::fixed);
ossFileName2.fill('0');
ossFileName2 << step;
ossFileName2 << "/";
ossFileName2 << prefix;
ossFileName2 << name.c_str();
ossFileName2 << "-";
ossFileName2.width(5);
ossFileName2.setf(std::ios::fixed);
ossFileName2.fill('0');
ossFileName2 << iRank;
ossFileName2 << "-";
ossFileName2.width(5);
ossFileName2.setf(std::ios::fixed);
ossFileName2.fill('0');
ossFileName2 << id - partition->getStart(iRank);
ossFileName2 << "-";
ossFileName2.width(10);
ossFileName2.setf(std::ios::fixed);
ossFileName2.fill('0');
ossFileName2 << step;
ossFileName2 << ".vtp";
ofs << "<Piece Source=\"";
ofs << ossFileName2.str();
ofs << "\"/>" << std::endl;
}
}
ofs << "</PPolyData>" << std::endl;
ofs << "</VTKFile>" << std::endl;
ofs.close();
}
template <typename T>
void writeContourLocal(
T* pData,
double threshold,
const std::string path,
const std::string prefix,
const std::string name,
int step, int rank, int block,
int NX, int NY, int NZ,
int NV,
double ox, double oy, double oz,
double dx)
{
InitPointData(pData, NX, NY, NZ, NV);
ClearTriangles();
DetectTriangles(threshold, ox, oy, oz, dx);
PrintVTP(path, prefix, name, step, rank, block, threshold);
}
template <typename T>
void InitPointData(
T* pData,
int NX, int NY, int NZ,
int NV)
{
if( NX + 1 == PNX &&
NY + 1 == PNY &&
NZ + 1 == PNZ ) {
} else {
PNX = NX + 1;
PNY = NY + 1;
PNZ = NZ + 1;
if( pPointData ) {
delete [] pPointData;
pPointData = 0;
}
pPointData = new float [PNX*PNY*PNZ];
}
int CX = NX + 2*NV;
int CY = NY + 2*NV;
int CZ = NZ + 2*NV;
#pragma omp parallel for
for(int k=0; k<PNZ; k++) {
for(int j=0; j<PNY; j++) {
for(int i=0; i<PNX; i++) {
int i0 = NV + i;
int j0 = NV + j;
int k0 = NV + k;
int m0 = i0 + CX*(j0 + CY*k0);
int m1 = i0-1 + CX*(j0 + CY*k0);
int m2 = i0-1 + CX*((j0-1) + CY*k0);
int m3 = i0 + CX*((j0-1) + CY*k0);
int m4 = i0 + CX*(j0 + CY*(k0-1));
int m5 = i0-1 + CX*(j0 + CY*(k0-1));
int m6 = i0-1 + CX*((j0-1) + CY*(k0-1));
int m7 = i0 + CX*((j0-1) + CY*(k0-1));
T phi0 = pData[m0];
T phi1 = pData[m1];
T phi2 = pData[m2];
T phi3 = pData[m3];
T phi4 = pData[m4];
T phi5 = pData[m5];
T phi6 = pData[m6];
T phi7 = pData[m7];
int mp = i + PNX*(j + PNY*k);
pPointData[mp] = 0.125*(phi0 + phi1 + phi2 + phi3 + phi4 + phi5 + phi6 + phi7);
}
}
}
}
void ClearTriangles() {
vVertexList.clear();
}
void DetectTriangles(
double threshold,
double ox, double oy, double oz,
double dx)
{
#pragma omp parallel for
for(int k=0; k<PNZ-1; k++) {
for(int j=0; j<PNY-1; j++) {
for(int i=0; i<PNX-1; i++) {
Vertex v[8];
v[3].x = ox + dx*i;
v[3].y = oy + dx*j;
v[3].z = oz + dx*k;
v[2].x = ox + dx*(i+1);
v[2].y = oy + dx*j;
v[2].z = oz + dx*k;
v[1].x = ox + dx*(i+1);
v[1].y = oy + dx*(j+1);
v[1].z = oz + dx*k;
v[0].x = ox + dx*i;
v[0].y = oy + dx*(j+1);
v[0].z = oz + dx*k;
v[7].x = ox + dx*i;
v[7].y = oy + dx*j;
v[7].z = oz + dx*(k+1);
v[6].x = ox + dx*(i+1);
v[6].y = oy + dx*j;
v[6].z = oz + dx*(k+1);
v[5].x = ox + dx*(i+1);
v[5].y = oy + dx*(j+1);
v[5].z = oz + dx*(k+1);
v[4].x = ox + dx*i;
v[4].y = oy + dx*(j+1);
v[4].z = oz + dx*(k+1);
double p[8];
p[3] = pPointData[i + PNX*(j + PNY*k)];
p[2] = pPointData[i+1 + PNX*(j + PNY*k)];
p[1] = pPointData[i+1 + PNX*(j+1 + PNY*k)];
p[0] = pPointData[i + PNX*(j+1 + PNY*k)];
p[7] = pPointData[i + PNX*(j + PNY*(k+1))];
p[6] = pPointData[i+1 + PNX*(j + PNY*(k+1))];
p[5] = pPointData[i+1 + PNX*(j+1 + PNY*(k+1))];
p[4] = pPointData[i + PNX*(j+1 + PNY*(k+1))];
int cubeindex = 0;
if( p[0] < threshold ) {
cubeindex |= 1;
}
if( p[1] < threshold ) {
cubeindex |= 2;
}
if( p[2] < threshold ) {
cubeindex |= 4;
}
if( p[3] < threshold ) {
cubeindex |= 8;
}
if( p[4] < threshold ) {
cubeindex |= 16;
}
if( p[5] < threshold ) {
cubeindex |= 32;
}
if( p[6] < threshold ) {
cubeindex |= 64;
}
if( p[7] < threshold ) {
cubeindex |= 128;
}
Vertex u[12];
if( edgeTable[cubeindex] & 1 ) {
u[0] = GetIntersection(v[0], v[1], p[0], p[1], threshold);
}
if( edgeTable[cubeindex] & 2 ) {
u[1] = GetIntersection(v[1], v[2], p[1], p[2], threshold);
}
if( edgeTable[cubeindex] & 4 ) {
u[2] = GetIntersection(v[2], v[3], p[2], p[3], threshold);
}
if( edgeTable[cubeindex] & 8 ) {
u[3] = GetIntersection(v[3], v[0], p[3], p[0], threshold);
}
if( edgeTable[cubeindex] & 16 ) {
u[4] = GetIntersection(v[4], v[5], p[4], p[5], threshold);
}
if( edgeTable[cubeindex] & 32 ) {
u[5] = GetIntersection(v[5], v[6], p[5], p[6], threshold);
}
if( edgeTable[cubeindex] & 64 ) {
u[6] = GetIntersection(v[6], v[7], p[6], p[7], threshold);
}
if( edgeTable[cubeindex] & 128 ) {
u[7] = GetIntersection(v[7], v[4], p[7], p[4], threshold);
}
if( edgeTable[cubeindex] & 256 ) {
u[8] = GetIntersection(v[0], v[4], p[0], p[4], threshold);
}
if( edgeTable[cubeindex] & 512 ) {
u[9] = GetIntersection(v[1], v[5], p[1], p[5], threshold);
}
if( edgeTable[cubeindex] & 1024 ) {
u[10] = GetIntersection(v[2], v[6], p[2], p[6], threshold);
}
if( edgeTable[cubeindex] & 2048 ) {
u[11] = GetIntersection(v[3], v[7], p[3], p[7], threshold);
}
for(int n=0; n<12; n++) {
u[n].value = threshold;
}
#pragma omp critical
{
int nTriangle = 0;
for(int i=0; triTable[cubeindex][i] != -1; i+=3) {
vVertexList.push_back( u[triTable[cubeindex][i]] );
vVertexList.push_back( u[triTable[cubeindex][i+1]] );
vVertexList.push_back( u[triTable[cubeindex][i+2]] );
nTriangle++;
}
}
// std::cout << nTriangle << std::endl;
}
}
}
}
void PrintVTP(
const std::string path,
const std::string prefix,
const std::string name,
int step, int rank, int block,
double threshold)
{
std::ostringstream ossFileName2;
ossFileName2 << path;
ossFileName2 << "/";
ossFileName2.width(10);
ossFileName2.setf(std::ios::fixed);
ossFileName2.fill('0');
ossFileName2 << step;
ossFileName2 << "/";
ossFileName2 << prefix;
ossFileName2 << name.c_str();
ossFileName2 << "-";
ossFileName2.width(5);
ossFileName2.setf(std::ios::fixed);
ossFileName2.fill('0');
ossFileName2 << rank;
ossFileName2 << "-";
ossFileName2.width(5);
ossFileName2.setf(std::ios::fixed);
ossFileName2.fill('0');
ossFileName2 << block;
ossFileName2 << "-";
ossFileName2.width(10);
ossFileName2.setf(std::ios::fixed);
ossFileName2.fill('0');
ossFileName2 << step;
ossFileName2 << ".vtp";
int nPoints = vVertexList.size();
int nPolys = vVertexList.size()/3;
int nLines = 0;
std::ofstream ofs;
ofs.open(ossFileName2.str().c_str(), std::ios::out);
ofs << "<VTKFile type=\"PolyData\" version=\"0.1\" byte_order=\"";
#ifdef __FUJITSU
ofs << "BigEndian";
#else
ofs << "LittleEndian";
#endif
ofs << "\">" << std::endl;
ofs << "<PolyData>" << std::endl;
ofs << "<Piece NumberOfPoints=\"";
ofs << nPoints;
ofs << "\" NumberOfVerts=\"0\" NumberOfLines=\"";
ofs << nLines;
ofs << "\" NumberOfStrips=\"0\" NumberOfPolys=\"";
ofs << nPolys;
ofs << "\">" << std::endl;
ofs << "<Points>" << std::endl;
ofs << "<DataArray type=\"Float32\" Name=\"Points\" NumberOfComponents=\"3\" format=\"ascii\">" << std::endl;
for(int i=0; i<vVertexList.size(); i++) {
ofs << vVertexList[i].x << " " << vVertexList[i].y << " " << vVertexList[i].z << std::endl;
}
ofs << "</DataArray>" << std::endl;
ofs << "</Points>" << std::endl;
ofs << "<Polys>" << std::endl;
ofs << "<DataArray type=\"Int32\" Name=\"connectivity\" format=\"ascii\">" << std::endl;
for(int i=0; i<vVertexList.size()/3; i++) {
ofs << 3*i << " " << 3*i+1 << " " << 3*i+2 << std::endl;
}
ofs << "</DataArray>" << std::endl;
ofs << "<DataArray type=\"Int32\" Name=\"offsets\" format=\"ascii\">" << std::endl;
for(int i=0; i<vVertexList.size()/3; i++) {
ofs << 3*(i+1) << std::endl;
}
ofs << "</DataArray>" << std::endl;
ofs << "</Polys>" << std::endl;
ofs << "<Lines>" << std::endl;
ofs << "</Lines>" << std::endl;
ofs << "<Verts>" << std::endl;
ofs << "</Verts>" << std::endl;
ofs << "<Strips>" << std::endl;
ofs << "</Strips>" << std::endl;
ofs << "<PointData>" << std::endl;
ofs << "<DataArray type=\"Float32\" Name=\"";
ofs << name;
ofs << "\" format=\"ascii\">" << std::endl;
for(int i=0; i<vVertexList.size(); i++) {
ofs << vVertexList[i].value << std::endl;
}
ofs << "</DataArray>" << std::endl;
ofs << "</PointData>" << std::endl;
ofs << "<CellData>" << std::endl;
ofs << "</CellData>" << std::endl;
ofs << "</Piece>" << std::endl;
ofs << "</PolyData>" << std::endl;
ofs << "</VTKFile>" << std::endl;
ofs.close();
}
private:
private:
int PNX;
int PNY;
int PNZ;
float *pPointData;
std::vector<Vertex> vVertexList;
};
#endif
|
exercise5.c | /*
* BSD 2-Clause License
*
* Copyright (c) 2020, Alessandro Capotondi
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file exercise5.c
* @author Alessandro Capotondi
* @date 27 Mar 2020
* @brief Exercise 5
*
* @see https://dolly.fim.unimore.it/2019/course/view.php?id=152
*/
#include <stdio.h>
#include <omp.h>
#include "utils.h"
#if !defined(W)
#define W (1 << 15)
#endif
/* Dummy Tasks */
void task1();
void task2();
void task3();
void task4();
/**
* @brief EX 5 - Task Parallelism w/sections
*
* a) Create a parallel region with 4 threads. Use thread IDs to execute
* different WORK functions on different threads.
* b) Create a parallel region with 4 threads. Achieve the same work partitioning
* as a) using SECTIONS.
*
* @return void
*/
void exercise()
{
#if 0 //5a
#pragma omp parallel num_threads(4)
{
if(omp_get_thread_num()==0)
task1();
if(omp_get_thread_num()==1)
task2();
if(omp_get_thread_num()==2)
task3();
if(omp_get_thread_num()==3)
task4();
}
#endif
#if 1 //5b
#pragma omp parallel sections
{
#pragma omp section
task1();
#pragma omp section
task2();
#pragma omp section
task3();
#pragma omp section
task4();
}
#endif
}
void task1()
{
DEBUG_PRINT("%hu: exec task1!\n", omp_get_thread_num());
work((1 * W));
}
void task2()
{
DEBUG_PRINT("%hu: exec task2!\n", omp_get_thread_num());
work((2 * W));
}
void task3()
{
DEBUG_PRINT("%hu: exec task3!\n", omp_get_thread_num());
work((3 * W));
}
void task4()
{
DEBUG_PRINT("%hu: exec task4!\n", omp_get_thread_num());
work((4 * W));
}
|
MkBase.h | #ifndef MkBase_h
#define MkBase_h
#include "Matrix.h"
#include "PropagationMPlex.h"
namespace mkfit {
//==============================================================================
// MkBase
//==============================================================================
class MkBase
{
public:
MPlexLS Err[2];
MPlexLV Par[2];
MPlexQI Chg;
static constexpr int iC = 0; // current
static constexpr int iP = 1; // propagated
float getPar(int itrack, int i, int par) const { return Par[i].ConstAt(itrack, par, 0); }
float RadiusSqr(int itrack, int i) const { return hipo_sqr(getPar(itrack, i, 0), getPar(itrack, i, 1)); }
//----------------------------------------------------------------------------
MkBase() {}
//----------------------------------------------------------------------------
void PropagateTracksToR(float r, const int N_proc, const PropagationFlags pf)
{
MPlexQF msRad;
#pragma omp simd
for (int n = 0; n < NN; ++n)
{
msRad.At(n, 0, 0) = r;
}
propagateHelixToRMPlex(Err[iC], Par[iC], Chg, msRad,
Err[iP], Par[iP], N_proc, pf);
}
void PropagateTracksToHitR(const MPlexHV& par, const int N_proc, const PropagationFlags pf,
const MPlexQI *noMatEffPtr=nullptr)
{
MPlexQF msRad;
#pragma omp simd
for (int n = 0; n < NN; ++n)
{
msRad.At(n, 0, 0) = std::hypot(par.ConstAt(n, 0, 0), par.ConstAt(n, 1, 0));
}
propagateHelixToRMPlex(Err[iC], Par[iC], Chg, msRad,
Err[iP], Par[iP], N_proc, pf, noMatEffPtr);
}
//----------------------------------------------------------------------------
void PropagateTracksToZ(float z, const int N_proc, const PropagationFlags pf)
{
MPlexQF msZ;
#pragma omp simd
for (int n = 0; n < NN; ++n)
{
msZ.At(n, 0, 0) = z;
}
propagateHelixToZMPlex(Err[iC], Par[iC], Chg, msZ,
Err[iP], Par[iP], N_proc, pf);
}
void PropagateTracksToHitZ(const MPlexHV& par, const int N_proc, const PropagationFlags pf,
const MPlexQI *noMatEffPtr=nullptr)
{
MPlexQF msZ;
#pragma omp simd
for (int n = 0; n < NN; ++n)
{
msZ.At(n, 0, 0) = par.ConstAt(n, 2, 0);
}
propagateHelixToZMPlex(Err[iC], Par[iC], Chg, msZ,
Err[iP], Par[iP], N_proc, pf, noMatEffPtr);
}
void PropagateTracksToPCAZ(const int N_proc, const PropagationFlags pf)
{
MPlexQF msZ; // PCA z-coordinate
#pragma omp simd
for (int n = 0; n < NN; ++n)
{
const float slope = std::tan(Par[iC].ConstAt(n, 5, 0));
// msZ.At(n, 0, 0) = ( Config::beamspotz0 + slope * ( Config::beamspotr0 - std::hypot(Par[iC].ConstAt(n, 0, 0), Par[iC].ConstAt(n, 1, 0))) + slope * slope * Par[iC].ConstAt(n, 2, 0) ) / ( 1+slope*slope); // PCA w.r.t. z0, r0
msZ.At(n, 0, 0) = (slope * (slope * Par[iC].ConstAt(n, 2, 0) - std::hypot(Par[iC].ConstAt(n, 0, 0), Par[iC].ConstAt(n, 1, 0)))) / (1 + slope * slope); // PCA to origin
}
propagateHelixToZMPlex(Err[iC], Par[iC], Chg, msZ,
Err[iP], Par[iP], N_proc, pf);
}
};
} // end namespace mkfit
#endif
|
header.h | /*--------------------------------------------------------------------
c---------------------------------------------------------------------
c
c header.h
c
c---------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c The following include file is generated automatically by the
c "setparams" utility. It defines
c maxcells: the square root of the maximum number of processors
c problem_size: 12, 64, 102, 162 (for class T, A, B, C)
c dt_default: default time step for this problem size if no
c config file
c niter_default: default number of iterations for this problem size
--------------------------------------------------------------------*/
#ifndef _HEADER_H_
#define _HEADER_H_
#include "npbparams.h"
#include "../math/nas_math.h"
#define AA 0
#define BB 1
#define CC 2
#define BLOCK_SIZE 5
/* COMMON block: global */
static int grid_points[3]; /* grid_ponts(1:3) */
/* COMMON block: constants */
static double tx1, tx2, tx3, ty1, ty2, ty3, tz1, tz2, tz3;
static double dx1, dx2, dx3, dx4, dx5;
static double dy1, dy2, dy3, dy4, dy5;
static double dz1, dz2, dz3, dz4, dz5;
static double dssp, dt;
static double ce[5][13]; /* ce(5,13) */
static double dxmax, dymax, dzmax;
static double xxcon1, xxcon2, xxcon3, xxcon4, xxcon5;
static double dx1tx1, dx2tx1, dx3tx1, dx4tx1, dx5tx1;
static double yycon1, yycon2, yycon3, yycon4, yycon5;
static double dy1ty1, dy2ty1, dy3ty1, dy4ty1, dy5ty1;
static double zzcon1, zzcon2, zzcon3, zzcon4, zzcon5;
static double dz1tz1, dz2tz1, dz3tz1, dz4tz1, dz5tz1;
static double dnxm1, dnym1, dnzm1, c1c2, c1c5, c3c4, c1345;
static double conz1, c1, c2, c3, c4, c5, c4dssp, c5dssp, dtdssp;
static double dttx1, dttx2, dtty1, dtty2, dttz1, dttz2;
static double c2dttx1, c2dtty1, c2dttz1, comz1, comz4, comz5, comz6;
static double c3c4tx3, c3c4ty3, c3c4tz3, c2iv, con43, con16;
#define IMAX PROBLEM_SIZE
#define JMAX PROBLEM_SIZE
#define KMAX PROBLEM_SIZE
/*
c to improve cache performance, grid dimensions padded by 1
c for even number sizes only.
*/
/* COMMON block: fields */
static double us[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
static double vs[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
static double ws[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
static double qs[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
static double rho_i[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
static double square[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
static double forcing[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1][5+1];
static double u[(IMAX+1)/2*2+1][(JMAX+1)/2*2+1][(KMAX+1)/2*2+1][5];
static double rhs[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1][5];
static double lhs[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1][3][5][5];
/* COMMON block: work_1d */
double cuf[PROBLEM_SIZE];
double q[PROBLEM_SIZE];
double ue[PROBLEM_SIZE][5];
double buf[PROBLEM_SIZE][5];
#pragma omp threadprivate(cuf, q, ue, buf)
/*
c to improve cache performance, grid dimensions (first two for these
c to arrays) padded by 1 for even number sizes only.
*/
/* COMMON block: work_lhs */
static double fjac[IMAX/2*2+1][JMAX/2*2+1][KMAX-1+1][5][5];
/* fjac(5, 5, 0:IMAX/2*2, 0:JMAX/2*2, 0:KMAX-1) */
static double njac[IMAX/2*2+1][JMAX/2*2+1][KMAX-1+1][5][5];
/* njac(5, 5, 0:IMAX/2*2, 0:JMAX/2*2, 0:KMAX-1) */
static double tmp1, tmp2, tmp3;
#endif
|
sum.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#include <sys/time.h>
enum {
N = 100000
};
double wtime()
{
struct timeval t;
gettimeofday(&t, NULL);
return (double)t.tv_sec + (double)t.tv_usec * 1E-6;
}
double sum(double *v, int low, int high)
{
if (low == high)
return v[low];
int mid = (low + high) / 2;
return sum(v, low, mid) + sum(v, mid + 1, high);
}
double sum_omp_tasks(double *v, int low, int high)
{
if (low == high)
return v[low];
double sum_left, sum_right;
int mid = (low + high) / 2;
#pragma omp task shared(sum_left)
sum_left = sum_omp_tasks(v, low, mid);
#pragma omp task shared(sum_right)
sum_right = sum_omp_tasks(v, mid + 1, high);
#pragma omp taskwait
return sum_left + sum_right;
}
double sum_omp(double *v, int low, int high)
{
double s = 0;
#pragma omp parallel
{
#pragma omp single nowait
s = sum_omp_tasks(v, low, high);
}
return s;
}
double run_serial()
{
double *v = malloc(sizeof(*v) * N);
for (int i = 0; i < N; i++)
v[i] = i + 1.0;
double t = wtime();
double res = sum(v, 0, N - 1);
t = wtime() - t;
printf("Result (serial): %.4f; error %.12f\n", res, fabs(res - (1.0 + N) / 2.0 * N));
free(v);
return t;
}
double run_parallel()
{
double *v = malloc(sizeof(*v) * N);
for (int i = 0; i < N; i++)
v[i] = i + 1.0;
double t = wtime();
double res = sum_omp(v, 0, N - 1);
t = wtime() - t;
printf("Result (parallel): %.4f; error %.12f\n", res, fabs(res - (1.0 + N) / 2.0 * N));
free(v);
return t;
}
int main(int argc, char **argv)
{
printf("Recursive summation N = %d\n", N);
double tserial = run_serial();
double tparallel = run_parallel();
printf("Execution time (serial): %.6f\n", tserial);
printf("Execution time (parallel): %.6f\n", tparallel);
printf("Speedup: %.2f\n", tserial / tparallel);
return 0;
}
|
cache.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC AAA CCCC H H EEEEE %
% C A A C H H E %
% C AAAAA C HHHHH EEE %
% C A A C H H E %
% CCCC A A CCCC H H EEEEE %
% %
% %
% MagickCore Pixel Cache Methods %
% %
% Software Design %
% Cristy %
% July 1999 %
% %
% %
% Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite-private.h"
#include "magick/distribute-cache-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/memory-private.h"
#include "magick/nt-base-private.h"
#include "magick/option.h"
#include "magick/pixel.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/policy.h"
#include "magick/quantum.h"
#include "magick/random_.h"
#include "magick/registry.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/splay-tree.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/utility.h"
#include "magick/utility-private.h"
#if defined(MAGICKCORE_ZLIB_DELEGATE)
#include "zlib.h"
#endif
/*
Define declarations.
*/
#define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent)
#define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \
GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse)
/*
Typedef declarations.
*/
typedef struct _MagickModulo
{
ssize_t
quotient,
remainder;
} MagickModulo;
/*
Forward declarations.
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static Cache
GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *)
magick_hot_spot;
static const IndexPacket
*GetVirtualIndexesFromCache(const Image *);
static const PixelPacket
*GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t,
const ssize_t,const size_t,const size_t,ExceptionInfo *),
*GetVirtualPixelsCache(const Image *);
static MagickBooleanType
GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,
PixelPacket *,ExceptionInfo *),
GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod,
const ssize_t,const ssize_t,PixelPacket *,ExceptionInfo *),
OpenPixelCache(Image *,const MapMode,ExceptionInfo *),
ReadPixelCacheIndexes(CacheInfo *,NexusInfo *,ExceptionInfo *),
ReadPixelCachePixels(CacheInfo *,NexusInfo *,ExceptionInfo *),
SyncAuthenticPixelsCache(Image *,ExceptionInfo *),
WritePixelCacheIndexes(CacheInfo *,NexusInfo *,ExceptionInfo *),
WritePixelCachePixels(CacheInfo *,NexusInfo *,ExceptionInfo *);
static PixelPacket
*GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*SetPixelCacheNexusPixels(const CacheInfo *,const MapMode,
const RectangleInfo *,const MagickBooleanType,NexusInfo *,ExceptionInfo *)
magick_hot_spot;
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
/*
Global declarations.
*/
static volatile MagickBooleanType
instantiate_cache = MagickFalse;
static SemaphoreInfo
*cache_semaphore = (SemaphoreInfo *) NULL;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCache() acquires a pixel cache.
%
% The format of the AcquirePixelCache() method is:
%
% Cache AcquirePixelCache(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickExport Cache AcquirePixelCache(const size_t number_threads)
{
CacheInfo
*restrict cache_info;
char
*synchronize;
cache_info=(CacheInfo *) AcquireQuantumMemory(1,sizeof(*cache_info));
if (cache_info == (CacheInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(cache_info,0,sizeof(*cache_info));
cache_info->type=UndefinedCache;
cache_info->mode=IOMode;
cache_info->colorspace=sRGBColorspace;
cache_info->channels=4;
cache_info->file=(-1);
cache_info->id=GetMagickThreadId();
cache_info->number_threads=number_threads;
if (GetOpenMPMaximumThreads() > cache_info->number_threads)
cache_info->number_threads=GetOpenMPMaximumThreads();
if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads)
cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
if (cache_info->number_threads == 0)
cache_info->number_threads=1;
cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads);
if (cache_info->nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
cache_info->semaphore=AllocateSemaphoreInfo();
cache_info->reference_count=1;
cache_info->file_semaphore=AllocateSemaphoreInfo();
cache_info->debug=IsEventLogging();
cache_info->signature=MagickSignature;
return((Cache ) cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCacheNexus() allocates the NexusInfo structure.
%
% The format of the AcquirePixelCacheNexus method is:
%
% NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickExport NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
{
NexusInfo
**restrict nexus_info;
register ssize_t
i;
nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory(
number_threads,sizeof(*nexus_info)));
if (nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
nexus_info[0]=(NexusInfo *) AcquireQuantumMemory(number_threads,
sizeof(**nexus_info));
if (nexus_info[0] == (NexusInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(nexus_info[0],0,number_threads*sizeof(**nexus_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
nexus_info[i]=(&nexus_info[0][i]);
nexus_info[i]->signature=MagickSignature;
}
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCachePixels() returns the pixels associated with the specified
% image.
%
% The format of the AcquirePixelCachePixels() method is:
%
% const void *AcquirePixelCachePixels(const Image *image,
% MagickSizeType *length,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const void *AcquirePixelCachePixels(const Image *image,
MagickSizeType *length,ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
(void) exception;
*length=0;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((const void *) NULL);
*length=cache_info->length;
return((const void *) cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t G e n e s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentGenesis() instantiates the cache component.
%
% The format of the CacheComponentGenesis method is:
%
% MagickBooleanType CacheComponentGenesis(void)
%
*/
MagickExport MagickBooleanType CacheComponentGenesis(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
cache_semaphore=AllocateSemaphoreInfo();
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t T e r m i n u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentTerminus() destroys the cache component.
%
% The format of the CacheComponentTerminus() method is:
%
% CacheComponentTerminus(void)
%
*/
MagickExport void CacheComponentTerminus(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
ActivateSemaphoreInfo(&cache_semaphore);
LockSemaphoreInfo(cache_semaphore);
instantiate_cache=MagickFalse;
UnlockSemaphoreInfo(cache_semaphore);
DestroySemaphoreInfo(&cache_semaphore);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l i p P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipPixelCacheNexus() clips the cache nexus as defined by the image clip
% mask. The method returns MagickTrue if the pixel region is clipped,
% otherwise MagickFalse.
%
% The format of the ClipPixelCacheNexus() method is:
%
% MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClipPixelCacheNexus(Image *image,
NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
MagickSizeType
number_pixels;
NexusInfo
**restrict clip_nexus,
**restrict image_nexus;
register const PixelPacket
*restrict r;
register IndexPacket
*restrict nexus_indexes,
*restrict indexes;
register PixelPacket
*restrict p,
*restrict q;
register ssize_t
i;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->clip_mask == (Image *) NULL) ||
(image->storage_class == PseudoClass))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
image_nexus=AcquirePixelCacheNexus(1);
clip_nexus=AcquirePixelCacheNexus(1);
if ((image_nexus == (NexusInfo **) NULL) ||
(clip_nexus == (NexusInfo **) NULL))
ThrowBinaryException(CacheError,"UnableToGetCacheNexus",image->filename);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,image_nexus[0],
exception);
indexes=image_nexus[0]->indexes;
q=nexus_info->pixels;
nexus_indexes=nexus_info->indexes;
r=GetVirtualPixelsFromNexus(image->clip_mask,MaskVirtualPixelMethod,
nexus_info->region.x,nexus_info->region.y,nexus_info->region.width,
nexus_info->region.height,clip_nexus[0],exception);
number_pixels=(MagickSizeType) nexus_info->region.width*
nexus_info->region.height;
for (i=0; i < (ssize_t) number_pixels; i++)
{
if ((p == (PixelPacket *) NULL) || (r == (const PixelPacket *) NULL))
break;
if (GetPixelIntensity(image,r) > (QuantumRange/2))
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,GetPixelOpacity(p));
if (cache_info->active_index_channel != MagickFalse)
SetPixelIndex(nexus_indexes+i,GetPixelIndex(indexes+i));
}
p++;
q++;
r++;
}
clip_nexus=DestroyPixelCacheNexus(clip_nexus,1);
image_nexus=DestroyPixelCacheNexus(image_nexus,1);
if (i < (ssize_t) number_pixels)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCache() clones a pixel cache.
%
% The format of the ClonePixelCache() method is:
%
% Cache ClonePixelCache(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickExport Cache ClonePixelCache(const Cache cache)
{
CacheInfo
*restrict clone_info;
const CacheInfo
*restrict cache_info;
assert(cache != NULL);
cache_info=(const CacheInfo *) cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads);
if (clone_info == (Cache) NULL)
return((Cache) NULL);
clone_info->virtual_pixel_method=cache_info->virtual_pixel_method;
return((Cache ) clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheMethods() clones the pixel cache methods from one cache to
% another.
%
% The format of the ClonePixelCacheMethods() method is:
%
% void ClonePixelCacheMethods(Cache clone,const Cache cache)
%
% A description of each parameter follows:
%
% o clone: Specifies a pointer to a Cache structure.
%
% o cache: the pixel cache.
%
*/
MagickExport void ClonePixelCacheMethods(Cache clone,const Cache cache)
{
CacheInfo
*restrict cache_info,
*restrict source_info;
assert(clone != (Cache) NULL);
source_info=(CacheInfo *) clone;
assert(source_info->signature == MagickSignature);
if (source_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
source_info->filename);
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickSignature);
source_info->methods=cache_info->methods;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e R e p o s i t o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %
% ClonePixelCacheRepository() clones the source pixel cache to the destination
% cache.
%
% The format of the ClonePixelCacheRepository() method is:
%
% MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info,
% CacheInfo *source_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o source_info: the source pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void CopyPixels(PixelPacket *destination,
const PixelPacket *source,const MagickSizeType number_pixels)
{
#if !defined(MAGICKCORE_OPENMP_SUPPORT) || (MAGICKCORE_QUANTUM_DEPTH <= 8)
(void) memcpy(destination,source,(size_t) number_pixels*sizeof(*source));
#else
{
register MagickOffsetType
i;
if ((number_pixels*sizeof(*source)) < MagickMaxBufferExtent)
{
(void) memcpy(destination,source,(size_t) number_pixels*
sizeof(*source));
return;
}
#pragma omp parallel for
for (i=0; i < (MagickOffsetType) number_pixels; i++)
destination[i]=source[i];
}
#endif
}
static inline MagickSizeType MagickMin(const MagickSizeType x,
const MagickSizeType y)
{
if (x < y)
return(x);
return(y);
}
static MagickBooleanType ClonePixelCacheRepository(
CacheInfo *restrict clone_info,CacheInfo *restrict cache_info,
ExceptionInfo *exception)
{
#define MaxCacheThreads 2
#define cache_threads(source,destination,chunk) \
num_threads((chunk) < (16*GetMagickResourceLimit(ThreadResource)) ? 1 : \
GetMagickResourceLimit(ThreadResource) < MaxCacheThreads ? \
GetMagickResourceLimit(ThreadResource) : MaxCacheThreads)
MagickBooleanType
status;
NexusInfo
**restrict cache_nexus,
**restrict clone_nexus;
size_t
length;
ssize_t
y;
assert(cache_info != (CacheInfo *) NULL);
assert(clone_info != (CacheInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
if (cache_info->type == PingCache)
return(MagickTrue);
if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) &&
((clone_info->type == MemoryCache) || (clone_info->type == MapCache)) &&
(cache_info->columns == clone_info->columns) &&
(cache_info->rows == clone_info->rows) &&
(cache_info->active_index_channel == clone_info->active_index_channel))
{
/*
Identical pixel cache morphology.
*/
CopyPixels(clone_info->pixels,cache_info->pixels,cache_info->columns*
cache_info->rows);
if ((cache_info->active_index_channel != MagickFalse) &&
(clone_info->active_index_channel != MagickFalse))
(void) memcpy(clone_info->indexes,cache_info->indexes,
cache_info->columns*cache_info->rows*sizeof(*cache_info->indexes));
return(MagickTrue);
}
/*
Mismatched pixel cache morphology.
*/
cache_nexus=AcquirePixelCacheNexus(MaxCacheThreads);
clone_nexus=AcquirePixelCacheNexus(MaxCacheThreads);
if ((cache_nexus == (NexusInfo **) NULL) ||
(clone_nexus == (NexusInfo **) NULL))
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
length=(size_t) MagickMin(cache_info->columns,clone_info->columns)*
sizeof(*cache_info->pixels);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
cache_threads(cache_info,clone_info,cache_info->rows)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
PixelPacket
*pixels;
RectangleInfo
region;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
region.width=cache_info->columns;
region.height=1;
region.x=0;
region.y=y;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,®ion,MagickTrue,
cache_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
region.width=clone_info->columns;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,®ion,MagickTrue,
clone_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
(void) ResetMagickMemory(clone_nexus[id]->pixels,0,(size_t)
clone_nexus[id]->length);
(void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length);
status=WritePixelCachePixels(clone_info,clone_nexus[id],exception);
}
if ((cache_info->active_index_channel != MagickFalse) &&
(clone_info->active_index_channel != MagickFalse))
{
/*
Clone indexes.
*/
length=(size_t) MagickMin(cache_info->columns,clone_info->columns)*
sizeof(*cache_info->indexes);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
cache_threads(cache_info,clone_info,cache_info->rows)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
PixelPacket
*pixels;
RectangleInfo
region;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
region.width=cache_info->columns;
region.height=1;
region.x=0;
region.y=y;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,®ion,MagickTrue,
cache_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
status=ReadPixelCacheIndexes(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
region.width=clone_info->columns;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,®ion,MagickTrue,
clone_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
(void) memcpy(clone_nexus[id]->indexes,cache_nexus[id]->indexes,length);
status=WritePixelCacheIndexes(clone_info,clone_nexus[id],exception);
}
}
cache_nexus=DestroyPixelCacheNexus(cache_nexus,MaxCacheThreads);
clone_nexus=DestroyPixelCacheNexus(clone_nexus,MaxCacheThreads);
if (cache_info->debug != MagickFalse)
{
char
message[MaxTextExtent];
(void) FormatLocaleString(message,MaxTextExtent,"%s => %s",
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type),
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type));
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixelCache() method is:
%
% void DestroyImagePixelCache(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void DestroyImagePixelCache(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->cache == (void *) NULL)
return;
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixels() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixels() method is:
%
% void DestroyImagePixels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImagePixels(Image *image)
{
CacheInfo
*restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL)
{
cache_info->methods.destroy_pixel_handler(image);
return;
}
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyPixelCache() method is:
%
% Cache DestroyPixelCache(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info)
{
int
status;
status=(-1);
if (cache_info->file != -1)
{
status=close(cache_info->file);
cache_info->file=(-1);
RelinquishMagickResource(FileResource,1);
}
return(status == -1 ? MagickFalse : MagickTrue);
}
static inline void RelinquishPixelCachePixels(CacheInfo *cache_info)
{
switch (cache_info->type)
{
case MemoryCache:
{
if (cache_info->mapped == MagickFalse)
cache_info->pixels=(PixelPacket *) RelinquishAlignedMemory(
cache_info->pixels);
else
{
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
cache_info->pixels=(PixelPacket *) NULL;
}
RelinquishMagickResource(MemoryResource,cache_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
cache_info->pixels=(PixelPacket *) NULL;
if (cache_info->mode != ReadMode)
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(MapResource,cache_info->length);
}
case DiskCache:
{
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
if (cache_info->mode != ReadMode)
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(DiskResource,cache_info->length);
break;
}
case DistributedCache:
{
*cache_info->cache_filename='\0';
(void) RelinquishDistributePixelCache((DistributeCacheInfo *)
cache_info->server_info);
break;
}
default:
break;
}
cache_info->type=UndefinedCache;
cache_info->mapped=MagickFalse;
cache_info->indexes=(IndexPacket *) NULL;
}
MagickExport Cache DestroyPixelCache(Cache cache)
{
CacheInfo
*restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count--;
if (cache_info->reference_count != 0)
{
UnlockSemaphoreInfo(cache_info->semaphore);
return((Cache) NULL);
}
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->debug != MagickFalse)
{
char
message[MaxTextExtent];
(void) FormatLocaleString(message,MaxTextExtent,"destroy %s",
cache_info->filename);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
RelinquishPixelCachePixels(cache_info);
if (cache_info->server_info != (DistributeCacheInfo *) NULL)
cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *)
cache_info->server_info);
if (cache_info->nexus_info != (NexusInfo **) NULL)
cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info,
cache_info->number_threads);
if (cache_info->random_info != (RandomInfo *) NULL)
cache_info->random_info=DestroyRandomInfo(cache_info->random_info);
if (cache_info->file_semaphore != (SemaphoreInfo *) NULL)
DestroySemaphoreInfo(&cache_info->file_semaphore);
if (cache_info->semaphore != (SemaphoreInfo *) NULL)
DestroySemaphoreInfo(&cache_info->semaphore);
cache_info->signature=(~MagickSignature);
cache_info=(CacheInfo *) RelinquishMagickMemory(cache_info);
cache=(Cache) NULL;
return(cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCacheNexus() destroys a pixel cache nexus.
%
% The format of the DestroyPixelCacheNexus() method is:
%
% NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info,
% const size_t number_threads)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus to destroy.
%
% o number_threads: the number of nexus threads.
%
*/
static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info)
{
if (nexus_info->mapped == MagickFalse)
(void) RelinquishAlignedMemory(nexus_info->cache);
else
(void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length);
nexus_info->cache=(PixelPacket *) NULL;
nexus_info->pixels=(PixelPacket *) NULL;
nexus_info->indexes=(IndexPacket *) NULL;
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
}
MagickExport NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info,
const size_t number_threads)
{
register ssize_t
i;
assert(nexus_info != (NexusInfo **) NULL);
for (i=0; i < (ssize_t) number_threads; i++)
{
if (nexus_info[i]->cache != (PixelPacket *) NULL)
RelinquishCacheNexusPixels(nexus_info[i]);
nexus_info[i]->signature=(~MagickSignature);
}
nexus_info[0]=(NexusInfo *) RelinquishMagickMemory(nexus_info[0]);
nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info);
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c I n d e x e s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticIndexesFromCache() returns the indexes associated with the last
% call to QueueAuthenticPixelsCache() or GetAuthenticPixelsCache().
%
% The format of the GetAuthenticIndexesFromCache() method is:
%
% IndexPacket *GetAuthenticIndexesFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static IndexPacket *GetAuthenticIndexesFromCache(const Image *image)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->indexes);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c I n d e x Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticIndexQueue() returns the authentic black channel or the colormap
% indexes associated with the last call to QueueAuthenticPixels() or
% GetVirtualPixels(). NULL is returned if the black channel or colormap
% indexes are not available.
%
% The format of the GetAuthenticIndexQueue() method is:
%
% IndexPacket *GetAuthenticIndexQueue(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport IndexPacket *GetAuthenticIndexQueue(const Image *image)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->methods.get_authentic_indexes_from_handler !=
(GetAuthenticIndexesFromHandler) NULL)
return(cache_info->methods.get_authentic_indexes_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->indexes);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or
% disk pixel cache as defined by the geometry parameters. A pointer to the
% pixels is returned if the pixels are transferred, otherwise a NULL is
% returned.
%
% The format of the GetAuthenticPixelCacheNexus() method is:
%
% PixelPacket *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket *GetAuthenticPixelCacheNexus(Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
PixelPacket
*restrict pixels;
/*
Transfer pixels from the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue,
nexus_info,exception);
if (pixels == (PixelPacket *) NULL)
return((PixelPacket *) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(pixels);
if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse)
return((PixelPacket *) NULL);
if (cache_info->active_index_channel != MagickFalse)
if (ReadPixelCacheIndexes(cache_info,nexus_info,exception) == MagickFalse)
return((PixelPacket *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsFromCache() returns the pixels associated with the last
% call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods.
%
% The format of the GetAuthenticPixelsFromCache() method is:
%
% PixelPacket *GetAuthenticPixelsFromCache(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static PixelPacket *GetAuthenticPixelsFromCache(const Image *image)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelQueue() returns the authentic pixels associated with the
% last call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% The format of the GetAuthenticPixelQueue() method is:
%
% PixelPacket *GetAuthenticPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport PixelPacket *GetAuthenticPixelQueue(const Image *image)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->methods.get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
return(cache_info->methods.get_authentic_pixels_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixels() obtains a pixel region for read/write access. If the
% region is successfully accessed, a pointer to a PixelPacket array
% representing the region is returned, otherwise NULL is returned.
%
% The returned pointer may point to a temporary working copy of the pixels
% or it may point to the original pixels in memory. Performance is maximized
% if the selected region is part of one row, or one or more full rows, since
% then there is opportunity to access the pixels in-place (without a copy)
% if the image is in memory, or in a memory-mapped file. The returned pointer
% must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or if the storage class is
% PseduoClass, call GetAuthenticIndexQueue() after invoking
% GetAuthenticPixels() to obtain the black color component or colormap indexes
% (of type IndexPacket) corresponding to the region. Once the PixelPacket
% (and/or IndexPacket) array has been updated, the changes must be saved back
% to the underlying image using SyncAuthenticPixels() or they may be lost.
%
% The format of the GetAuthenticPixels() method is:
%
% PixelPacket *GetAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket *GetAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->methods.get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
return(cache_info->methods.get_authentic_pixels_handler(image,x,y,columns,
rows,exception));
assert(id < (int) cache_info->number_threads);
return(GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache
% as defined by the geometry parameters. A pointer to the pixels is returned
% if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetAuthenticPixelsCache() method is:
%
% PixelPacket *GetAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PixelPacket *GetAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return((PixelPacket *) NULL);
assert(cache_info->signature == MagickSignature);
assert(id < (int) cache_info->number_threads);
return(GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtent() returns the extent of the pixels associated with the
% last call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% The format of the GetImageExtent() method is:
%
% MagickSizeType GetImageExtent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickSizeType GetImageExtent(const Image *image)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
assert(id < (int) cache_info->number_threads);
return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCache() ensures that there is only a single reference to the
% pixel cache to be modified, updating the provided cache pointer to point to
% a clone of the original pixel cache if necessary.
%
% The format of the GetImagePixelCache method is:
%
% Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone: any value other than MagickFalse clones the cache pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType ValidatePixelCacheMorphology(
const Image *restrict image)
{
CacheInfo
*restrict cache_info;
/*
Does the image match the pixel cache morphology?
*/
cache_info=(CacheInfo *) image->cache;
if ((image->storage_class != cache_info->storage_class) ||
(image->colorspace != cache_info->colorspace) ||
(image->channels != cache_info->channels) ||
(image->columns != cache_info->columns) ||
(image->rows != cache_info->rows) ||
(cache_info->nexus_info == (NexusInfo **) NULL))
return(MagickFalse);
return(MagickTrue);
}
static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
MagickBooleanType
destroy,
status;
static MagickSizeType
cpu_throttle = 0,
cycles = 0,
time_limit = 0;
static time_t
cache_timestamp = 0;
status=MagickTrue;
LockSemaphoreInfo(image->semaphore);
if (cpu_throttle == 0)
cpu_throttle=GetMagickResourceLimit(ThrottleResource);
if ((cpu_throttle != MagickResourceInfinity) && ((cycles++ % 32) == 0))
MagickDelay(cpu_throttle);
if (time_limit == 0)
{
/*
Set the expire time in seconds.
*/
time_limit=GetMagickResourceLimit(TimeResource);
cache_timestamp=time((time_t *) NULL);
}
if ((time_limit != MagickResourceInfinity) &&
((MagickSizeType) (time((time_t *) NULL)-cache_timestamp) >= time_limit))
{
#if defined(ECANCELED)
errno=ECANCELED;
#endif
ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded");
}
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
destroy=MagickFalse;
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
CacheInfo
*clone_info;
Image
clone_image;
/*
Clone pixel cache.
*/
clone_image=(*image);
clone_image.semaphore=AllocateSemaphoreInfo();
clone_image.reference_count=1;
clone_image.cache=ClonePixelCache(cache_info);
clone_info=(CacheInfo *) clone_image.cache;
status=OpenPixelCache(&clone_image,IOMode,exception);
if (status != MagickFalse)
{
if (clone != MagickFalse)
status=ClonePixelCacheRepository(clone_info,cache_info,
exception);
if (status != MagickFalse)
{
if (cache_info->reference_count == 1)
cache_info->nexus_info=(NexusInfo **) NULL;
destroy=MagickTrue;
image->cache=clone_image.cache;
}
}
DestroySemaphoreInfo(&clone_image.semaphore);
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
if (destroy != MagickFalse)
cache_info=(CacheInfo *) DestroyPixelCache(cache_info);
if (status != MagickFalse)
{
/*
Ensure the image matches the pixel cache morphology.
*/
image->taint=MagickTrue;
image->type=UndefinedType;
if (ValidatePixelCacheMorphology(image) == MagickFalse)
{
status=OpenPixelCache(image,IOMode,exception);
cache_info=(CacheInfo *) image->cache;
if (cache_info->type == DiskCache)
(void) ClosePixelCacheOnDisk(cache_info);
}
}
UnlockSemaphoreInfo(image->semaphore);
if (status == MagickFalse)
return((Cache) NULL);
return(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCacheType() returns the pixel cache type: UndefinedCache,
% DiskCache, MapCache, MemoryCache, or PingCache.
%
% The format of the GetImagePixelCacheType() method is:
%
% CacheType GetImagePixelCacheType(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport CacheType GetPixelCacheType(const Image *image)
{
return(GetImagePixelCacheType(image));
}
MagickExport CacheType GetImagePixelCacheType(const Image *image)
{
CacheInfo
*restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
return(cache_info->type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e A u t h e n t i c P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixel() method is:
%
% MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x,
% const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image,
const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
PixelPacket
*restrict pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
*pixel=image->background_color;
if (cache_info->methods.get_one_authentic_pixel_from_handler !=
(GetOneAuthenticPixelFromHandler) NULL)
return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,
pixel,exception));
pixels=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception);
if (pixels == (PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e A u t h e n t i c P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixelFromCache() method is:
%
% MagickBooleanType GetOneAuthenticPixelFromCache(const Image image,
% const ssize_t x,const ssize_t y,PixelPacket *pixel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image,
const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
PixelPacket
*restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
*pixel=image->background_color;
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (pixels == (PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l M a g i c k P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualMagickPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs. If
% you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualMagickPixel() method is:
%
% MagickBooleanType GetOneVirtualMagickPixel(const Image image,
% const ssize_t x,const ssize_t y,MagickPixelPacket *pixel,
% ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: these values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualMagickPixel(const Image *image,
const ssize_t x,const ssize_t y,MagickPixelPacket *pixel,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
assert(id < (int) cache_info->number_threads);
pixels=GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y,
1UL,1UL,cache_info->nexus_info[id],exception);
GetMagickPixelPacket(image,pixel);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
indexes=GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id]);
SetMagickPixelPacket(image,pixels,indexes,pixel);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l M e t h o d P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualMethodPixel() returns a single pixel at the specified (x,y)
% location as defined by specified pixel method. The image background color
% is returned if an error occurs. If you plan to modify the pixel, use
% GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualMethodPixel() method is:
%
% MagickBooleanType GetOneVirtualMethodPixel(const Image image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,Pixelpacket *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualMethodPixel(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
const PixelPacket
*restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
*pixel=image->background_color;
if (cache_info->methods.get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
return(cache_info->methods.get_one_virtual_pixel_from_handler(image,
virtual_pixel_method,x,y,pixel,exception));
assert(id < (int) cache_info->number_threads);
pixels=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixel() returns a single virtual pixel at the specified
% (x,y) location. The image background color is returned if an error occurs.
% If you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixel() method is:
%
% MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x,
% const ssize_t y,PixelPacket *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image,
const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
const PixelPacket
*restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
*pixel=image->background_color;
if (cache_info->methods.get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
return(cache_info->methods.get_one_virtual_pixel_from_handler(image,
GetPixelCacheVirtualMethod(image),x,y,pixel,exception));
assert(id < (int) cache_info->number_threads);
pixels=GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y,
1UL,1UL,cache_info->nexus_info[id],exception);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e V i r t u a l P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelFromCache() returns a single virtual pixel at the
% specified (x,y) location. The image background color is returned if an
% error occurs.
%
% The format of the GetOneVirtualPixelFromCache() method is:
%
% MagickBooleanType GetOneVirtualPixelFromCache(const Image image,
% const VirtualPixelPacket method,const ssize_t x,const ssize_t y,
% PixelPacket *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
const PixelPacket
*restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
assert(id < (int) cache_info->number_threads);
*pixel=image->background_color;
pixels=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheChannels() returns the number of pixel channels associated
% with this instance of the pixel cache.
%
% The format of the GetPixelCacheChannels() method is:
%
% size_t GetPixelCacheChannels(Cache cache)
%
% A description of each parameter follows:
%
% o type: GetPixelCacheChannels returns DirectClass or PseudoClass.
%
% o cache: the pixel cache.
%
*/
MagickExport size_t GetPixelCacheChannels(const Cache cache)
{
CacheInfo
*restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->channels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheColorspace() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheColorspace() method is:
%
% Colorspace GetPixelCacheColorspace(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickExport ColorspaceType GetPixelCacheColorspace(const Cache cache)
{
CacheInfo
*restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->colorspace);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheMethods() initializes the CacheMethods structure.
%
% The format of the GetPixelCacheMethods() method is:
%
% void GetPixelCacheMethods(CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickExport void GetPixelCacheMethods(CacheMethods *cache_methods)
{
assert(cache_methods != (CacheMethods *) NULL);
(void) ResetMagickMemory(cache_methods,0,sizeof(*cache_methods));
cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache;
cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache;
cache_methods->get_virtual_indexes_from_handler=GetVirtualIndexesFromCache;
cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache;
cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache;
cache_methods->get_authentic_indexes_from_handler=
GetAuthenticIndexesFromCache;
cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache;
cache_methods->get_one_authentic_pixel_from_handler=
GetOneAuthenticPixelFromCache;
cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache;
cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache;
cache_methods->destroy_pixel_handler=DestroyImagePixelCache;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e N e x u s E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheNexusExtent() returns the extent of the pixels associated with
% the last call to SetPixelCacheNexusPixels() or GetPixelCacheNexusPixels().
%
% The format of the GetPixelCacheNexusExtent() method is:
%
% MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus info.
%
*/
MagickExport MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
NexusInfo *nexus_info)
{
CacheInfo
*restrict cache_info;
MagickSizeType
extent;
assert(cache != NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickSignature);
extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height;
if (extent == 0)
return((MagickSizeType) cache_info->columns*cache_info->rows);
return(extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCachePixels() returns the pixels associated with the specified image.
%
% The format of the GetPixelCachePixels() method is:
%
% void *GetPixelCachePixels(Image *image,MagickSizeType *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
assert(length != (MagickSizeType *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
(void) exception;
*length=0;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
*length=cache_info->length;
return((void *) cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheStorageClass() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheStorageClass() method is:
%
% ClassType GetPixelCacheStorageClass(Cache cache)
%
% A description of each parameter follows:
%
% o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass.
%
% o cache: the pixel cache.
%
*/
MagickExport ClassType GetPixelCacheStorageClass(const Cache cache)
{
CacheInfo
*restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->storage_class);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e T i l e S i z e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheTileSize() returns the pixel cache tile size.
%
% The format of the GetPixelCacheTileSize() method is:
%
% void GetPixelCacheTileSize(const Image *image,size_t *width,
% size_t *height)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the optimize cache tile width in pixels.
%
% o height: the optimize cache tile height in pixels.
%
*/
MagickExport void GetPixelCacheTileSize(const Image *image,size_t *width,
size_t *height)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*width=2048UL/sizeof(PixelPacket);
if (GetImagePixelCacheType(image) == DiskCache)
*width=8192UL/sizeof(PixelPacket);
*height=(*width);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the
% pixel cache. A virtual pixel is any pixel access that is outside the
% boundaries of the image cache.
%
% The format of the GetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
{
CacheInfo
*restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
return(cache_info->virtual_pixel_method);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l I n d e x e s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualIndexesFromCache() returns the indexes associated with the last
% call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualIndexesFromCache() method is:
%
% IndexPacket *GetVirtualIndexesFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const IndexPacket *GetVirtualIndexesFromCache(const Image *image)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l I n d e x e s F r o m N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualIndexesFromNexus() returns the indexes associated with the
% specified cache nexus.
%
% The format of the GetVirtualIndexesFromNexus() method is:
%
% const IndexPacket *GetVirtualIndexesFromNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the colormap indexes.
%
*/
MagickExport const IndexPacket *GetVirtualIndexesFromNexus(const Cache cache,
NexusInfo *nexus_info)
{
CacheInfo
*restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->storage_class == UndefinedClass)
return((IndexPacket *) NULL);
return(nexus_info->indexes);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l I n d e x Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualIndexQueue() returns the virtual black channel or the
% colormap indexes associated with the last call to QueueAuthenticPixels() or
% GetVirtualPixels(). NULL is returned if the black channel or colormap
% indexes are not available.
%
% The format of the GetVirtualIndexQueue() method is:
%
% const IndexPacket *GetVirtualIndexQueue(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const IndexPacket *GetVirtualIndexQueue(const Image *image)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->methods.get_virtual_indexes_from_handler !=
(GetVirtualIndexesFromHandler) NULL)
return(cache_info->methods.get_virtual_indexes_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s F r o m N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsFromNexus() gets virtual pixels from the in-memory or disk
% pixel cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelsFromNexus() method is:
%
% PixelPacket *GetVirtualPixelsFromNexus(const Image *image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% const size_t columns,const size_t rows,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to acquire.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t
DitherMatrix[64] =
{
0, 48, 12, 60, 3, 51, 15, 63,
32, 16, 44, 28, 35, 19, 47, 31,
8, 56, 4, 52, 11, 59, 7, 55,
40, 24, 36, 20, 43, 27, 39, 23,
2, 50, 14, 62, 1, 49, 13, 61,
34, 18, 46, 30, 33, 17, 45, 29,
10, 58, 6, 54, 9, 57, 5, 53,
42, 26, 38, 22, 41, 25, 37, 21
};
static inline ssize_t DitherX(const ssize_t x,const size_t columns)
{
ssize_t
index;
index=x+DitherMatrix[x & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) columns)
return((ssize_t) columns-1L);
return(index);
}
static inline ssize_t DitherY(const ssize_t y,const size_t rows)
{
ssize_t
index;
index=y+DitherMatrix[y & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) rows)
return((ssize_t) rows-1L);
return(index);
}
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns)
{
return((ssize_t) (columns*GetPseudoRandomValue(random_info)));
}
static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows)
{
return((ssize_t) (rows*GetPseudoRandomValue(random_info)));
}
/*
VirtualPixelModulo() computes the remainder of dividing offset by extent. It
returns not only the quotient (tile the offset falls in) but also the positive
remainer within that tile such that 0 <= remainder < extent. This method is
essentially a ldiv() using a floored modulo division rather than the normal
default truncated modulo division.
*/
static inline MagickModulo VirtualPixelModulo(const ssize_t offset,
const size_t extent)
{
MagickModulo
modulo;
modulo.quotient=offset/(ssize_t) extent;
if (offset < 0L)
modulo.quotient--;
modulo.remainder=offset-modulo.quotient*(ssize_t) extent;
return(modulo);
}
MagickExport const PixelPacket *GetVirtualPixelsFromNexus(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
IndexPacket
virtual_index;
MagickOffsetType
offset;
MagickSizeType
length,
number_pixels;
NexusInfo
**restrict virtual_nexus;
PixelPacket
*restrict pixels,
virtual_pixel;
RectangleInfo
region;
register const IndexPacket
*restrict virtual_indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
u,
v;
/*
Acquire pixels.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->type == UndefinedCache)
return((const PixelPacket *) NULL);
region.x=x;
region.y=y;
region.width=columns;
region.height=rows;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,®ion,
(image->clip_mask != (Image *) NULL) || (image->mask != (Image *) NULL) ?
MagickTrue : MagickFalse,nexus_info,exception);
if (pixels == (PixelPacket *) NULL)
return((const PixelPacket *) NULL);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+
nexus_info->region.width-1L;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels))
if ((x >= 0) && ((ssize_t) (x+columns) <= (ssize_t) cache_info->columns) &&
(y >= 0) && ((ssize_t) (y+rows) <= (ssize_t) cache_info->rows))
{
MagickBooleanType
status;
/*
Pixel request is inside cache extents.
*/
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(pixels);
status=ReadPixelCachePixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const PixelPacket *) NULL);
if ((cache_info->storage_class == PseudoClass) ||
(cache_info->colorspace == CMYKColorspace))
{
status=ReadPixelCacheIndexes(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const PixelPacket *) NULL);
}
return(pixels);
}
/*
Pixel request is outside cache extents.
*/
q=pixels;
indexes=nexus_info->indexes;
virtual_nexus=AcquirePixelCacheNexus(1);
if (virtual_nexus == (NexusInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"UnableToGetCacheNexus","`%s'",image->filename);
return((const PixelPacket *) NULL);
}
switch (virtual_pixel_method)
{
case BlackVirtualPixelMethod:
{
SetPixelRed(&virtual_pixel,0);
SetPixelGreen(&virtual_pixel,0);
SetPixelBlue(&virtual_pixel,0);
SetPixelOpacity(&virtual_pixel,OpaqueOpacity);
break;
}
case GrayVirtualPixelMethod:
{
SetPixelRed(&virtual_pixel,QuantumRange/2);
SetPixelGreen(&virtual_pixel,QuantumRange/2);
SetPixelBlue(&virtual_pixel,QuantumRange/2);
SetPixelOpacity(&virtual_pixel,OpaqueOpacity);
break;
}
case TransparentVirtualPixelMethod:
{
SetPixelRed(&virtual_pixel,0);
SetPixelGreen(&virtual_pixel,0);
SetPixelBlue(&virtual_pixel,0);
SetPixelOpacity(&virtual_pixel,TransparentOpacity);
break;
}
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
SetPixelRed(&virtual_pixel,QuantumRange);
SetPixelGreen(&virtual_pixel,QuantumRange);
SetPixelBlue(&virtual_pixel,QuantumRange);
SetPixelOpacity(&virtual_pixel,OpaqueOpacity);
break;
}
default:
{
virtual_pixel=image->background_color;
break;
}
}
virtual_index=0;
for (v=0; v < (ssize_t) rows; v++)
{
ssize_t
y_offset;
y_offset=y+v;
if ((virtual_pixel_method == EdgeVirtualPixelMethod) ||
(virtual_pixel_method == UndefinedVirtualPixelMethod))
y_offset=EdgeY(y_offset,cache_info->rows);
for (u=0; u < (ssize_t) columns; u+=length)
{
ssize_t
x_offset;
x_offset=x+u;
length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u);
if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) ||
((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) ||
(length == 0))
{
MagickModulo
x_modulo,
y_modulo;
/*
Transfer a single pixel.
*/
length=(MagickSizeType) 1;
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
case ConstantVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
p=(&virtual_pixel);
virtual_indexes=(&virtual_index);
break;
}
case EdgeVirtualPixelMethod:
default:
{
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),
EdgeY(y_offset,cache_info->rows),1UL,1UL,*virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
case RandomVirtualPixelMethod:
{
if (cache_info->random_info == (RandomInfo *) NULL)
cache_info->random_info=AcquireRandomInfo();
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
RandomX(cache_info->random_info,cache_info->columns),
RandomY(cache_info->random_info,cache_info->rows),1UL,1UL,
*virtual_nexus,exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
case DitherVirtualPixelMethod:
{
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
DitherX(x_offset,cache_info->columns),
DitherY(y_offset,cache_info->rows),1UL,1UL,*virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
case TileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
case MirrorVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
if ((x_modulo.quotient & 0x01) == 1L)
x_modulo.remainder=(ssize_t) cache_info->columns-
x_modulo.remainder-1L;
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if ((y_modulo.quotient & 0x01) == 1L)
y_modulo.remainder=(ssize_t) cache_info->rows-
y_modulo.remainder-1L;
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
case CheckerTileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L)
{
p=(&virtual_pixel);
virtual_indexes=(&virtual_index);
break;
}
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
case HorizontalTileVirtualPixelMethod:
{
if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows))
{
p=(&virtual_pixel);
virtual_indexes=(&virtual_index);
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
case VerticalTileVirtualPixelMethod:
{
if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns))
{
p=(&virtual_pixel);
virtual_indexes=(&virtual_index);
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
case HorizontalTileEdgeVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL,
*virtual_nexus,exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
case VerticalTileEdgeVirtualPixelMethod:
{
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL,
*virtual_nexus,exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
}
if (p == (const PixelPacket *) NULL)
break;
*q++=(*p);
if ((indexes != (IndexPacket *) NULL) &&
(virtual_indexes != (const IndexPacket *) NULL))
*indexes++=(*virtual_indexes);
continue;
}
/*
Transfer a run of pixels.
*/
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x_offset,y_offset,
(size_t) length,1UL,*virtual_nexus,exception);
if (p == (const PixelPacket *) NULL)
break;
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,*virtual_nexus);
(void) memcpy(q,p,(size_t) length*sizeof(*p));
q+=length;
if ((indexes != (IndexPacket *) NULL) &&
(virtual_indexes != (const IndexPacket *) NULL))
{
(void) memcpy(indexes,virtual_indexes,(size_t) length*
sizeof(*virtual_indexes));
indexes+=length;
}
}
}
virtual_nexus=DestroyPixelCacheNexus(virtual_nexus,1);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel
% cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCache() method is:
%
% const PixelPacket *GetVirtualPixelCache(const Image *image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const PixelPacket *GetVirtualPixelCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,columns,rows,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelQueue() returns the virtual pixels associated with the
% last call to QueueAuthenticPixels() or GetVirtualPixels().
%
% The format of the GetVirtualPixelQueue() method is:
%
% const PixelPacket *GetVirtualPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const PixelPacket *GetVirtualPixelQueue(const Image *image)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->methods.get_virtual_pixels_handler !=
(GetVirtualPixelsHandler) NULL)
return(cache_info->methods.get_virtual_pixels_handler(image));
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixels() returns an immutable pixel region. If the
% region is successfully accessed, a pointer to it is returned, otherwise
% NULL is returned. The returned pointer may point to a temporary working
% copy of the pixels or it may point to the original pixels in memory.
% Performance is maximized if the selected region is part of one row, or one
% or more full rows, since there is opportunity to access the pixels in-place
% (without a copy) if the image is in memory, or in a memory-mapped file. The
% returned pointer must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to access
% the black color component or to obtain the colormap indexes (of type
% IndexPacket) corresponding to the region.
%
% If you plan to modify the pixels, use GetAuthenticPixels() instead.
%
% Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread-
% safe. In a threaded environment, use GetCacheViewVirtualPixels() or
% GetCacheViewAuthenticPixels() instead.
%
% The format of the GetVirtualPixels() method is:
%
% const PixelPacket *GetVirtualPixels(const Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const PixelPacket *GetVirtualPixels(const Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->methods.get_virtual_pixel_handler !=
(GetVirtualPixelHandler) NULL)
return(cache_info->methods.get_virtual_pixel_handler(image,
GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception));
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y,
columns,rows,cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsCache() returns the pixels associated with the last call
% to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualPixelsCache() method is:
%
% PixelPacket *GetVirtualPixelsCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const PixelPacket *GetVirtualPixelsCache(const Image *image)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsNexus() returns the pixels associated with the specified
% cache nexus.
%
% The format of the GetVirtualPixelsNexus() method is:
%
% const IndexPacket *GetVirtualPixelsNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the colormap pixels.
%
*/
MagickExport const PixelPacket *GetVirtualPixelsNexus(const Cache cache,
NexusInfo *nexus_info)
{
CacheInfo
*restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->storage_class == UndefinedClass)
return((PixelPacket *) NULL);
return((const PixelPacket *) nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a s k P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MaskPixelCacheNexus() masks the cache nexus as defined by the image mask.
% The method returns MagickTrue if the pixel region is masked, otherwise
% MagickFalse.
%
% The format of the MaskPixelCacheNexus() method is:
%
% MagickBooleanType MaskPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void MagickPixelCompositeMask(const MagickPixelPacket *p,
const MagickRealType alpha,const MagickPixelPacket *q,
const MagickRealType beta,MagickPixelPacket *composite)
{
double
gamma;
if (alpha == TransparentOpacity)
{
*composite=(*q);
return;
}
gamma=1.0-QuantumScale*QuantumScale*alpha*beta;
gamma=PerceptibleReciprocal(gamma);
composite->red=gamma*MagickOver_(p->red,alpha,q->red,beta);
composite->green=gamma*MagickOver_(p->green,alpha,q->green,beta);
composite->blue=gamma*MagickOver_(p->blue,alpha,q->blue,beta);
if ((p->colorspace == CMYKColorspace) && (q->colorspace == CMYKColorspace))
composite->index=gamma*MagickOver_(p->index,alpha,q->index,beta);
}
static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
MagickPixelPacket
alpha,
beta;
MagickSizeType
number_pixels;
NexusInfo
**restrict clip_nexus,
**restrict image_nexus;
register const PixelPacket
*restrict r;
register IndexPacket
*restrict nexus_indexes,
*restrict indexes;
register PixelPacket
*restrict p,
*restrict q;
register ssize_t
i;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->mask == (Image *) NULL) ||
(image->storage_class == PseudoClass))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
image_nexus=AcquirePixelCacheNexus(1);
clip_nexus=AcquirePixelCacheNexus(1);
if ((image_nexus == (NexusInfo **) NULL) ||
(clip_nexus == (NexusInfo **) NULL))
ThrowBinaryException(CacheError,"UnableToGetCacheNexus",image->filename);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,
nexus_info->region.y,nexus_info->region.width,nexus_info->region.height,
image_nexus[0],exception);
indexes=image_nexus[0]->indexes;
q=nexus_info->pixels;
nexus_indexes=nexus_info->indexes;
r=GetVirtualPixelsFromNexus(image->mask,MaskVirtualPixelMethod,
nexus_info->region.x,nexus_info->region.y,nexus_info->region.width,
nexus_info->region.height,clip_nexus[0],&image->exception);
GetMagickPixelPacket(image,&alpha);
GetMagickPixelPacket(image,&beta);
number_pixels=(MagickSizeType) nexus_info->region.width*
nexus_info->region.height;
for (i=0; i < (ssize_t) number_pixels; i++)
{
if ((p == (PixelPacket *) NULL) || (r == (const PixelPacket *) NULL))
break;
SetMagickPixelPacket(image,p,indexes+i,&alpha);
SetMagickPixelPacket(image,q,nexus_indexes+i,&beta);
MagickPixelCompositeMask(&beta,GetPixelIntensity(image,r),&alpha,
alpha.opacity,&beta);
SetPixelRed(q,ClampToQuantum(beta.red));
SetPixelGreen(q,ClampToQuantum(beta.green));
SetPixelBlue(q,ClampToQuantum(beta.blue));
SetPixelOpacity(q,ClampToQuantum(beta.opacity));
if (cache_info->active_index_channel != MagickFalse)
SetPixelIndex(nexus_indexes+i,GetPixelIndex(indexes+i));
p++;
q++;
r++;
}
clip_nexus=DestroyPixelCacheNexus(clip_nexus,1);
image_nexus=DestroyPixelCacheNexus(image_nexus,1);
if (i < (ssize_t) number_pixels)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p e n P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpenPixelCache() allocates the pixel cache. This includes defining the cache
% dimensions, allocating space for the image pixels and optionally the
% colormap indexes, and memory mapping the cache if it is disk based. The
% cache nexus array is initialized as well.
%
% The format of the OpenPixelCache() method is:
%
% MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void AllocatePixelCachePixels(CacheInfo *cache_info)
{
cache_info->mapped=MagickFalse;
cache_info->pixels=(PixelPacket *) MagickAssumeAligned(
AcquireAlignedMemory(1,(size_t) cache_info->length));
if (cache_info->pixels == (PixelPacket *) NULL)
{
cache_info->mapped=MagickTrue;
cache_info->pixels=(PixelPacket *) MapBlob(-1,IOMode,0,(size_t)
cache_info->length);
}
}
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#if defined(SIGBUS)
static void CacheSignalHandler(int status)
{
ThrowFatalException(CacheFatalError,"UnableToExtendPixelCache");
}
#endif
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info,
const MapMode mode)
{
int
file;
/*
Open pixel cache on disk.
*/
if (cache_info->file != -1)
return(MagickTrue); /* cache already open */
if (*cache_info->cache_filename == '\0')
file=AcquireUniqueFileResource(cache_info->cache_filename);
else
switch (mode)
{
case ReadMode:
{
file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0);
break;
}
case WriteMode:
{
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT |
O_BINARY | O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE);
break;
}
case IOMode:
default:
{
file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY |
O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE);
break;
}
}
if (file == -1)
return(MagickFalse);
(void) AcquireMagickResource(FileResource,1);
cache_info->file=file;
cache_info->mode=mode;
return(MagickTrue);
}
static inline MagickOffsetType WritePixelCacheRegion(
const CacheInfo *restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX));
#else
count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length)
{
CacheInfo
*restrict cache_info;
MagickOffsetType
count,
extent,
offset;
cache_info=(CacheInfo *) image->cache;
if (image->debug != MagickFalse)
{
char
format[MaxTextExtent],
message[MaxTextExtent];
(void) FormatMagickSize(length,MagickFalse,format);
(void) FormatLocaleString(message,MaxTextExtent,
"extend %s (%s[%d], disk, %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (length != (MagickSizeType) ((MagickOffsetType) length))
return(MagickFalse);
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
return(MagickTrue);
extent=(MagickOffsetType) length-1;
count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *) "");
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (cache_info->synchronize != MagickFalse)
{
int
status;
status=posix_fallocate(cache_info->file,offset+1,extent-offset);
if (status != 0)
return(MagickFalse);
}
#endif
#if defined(SIGBUS)
(void) signal(SIGBUS,CacheSignalHandler);
#endif
return(count != (MagickOffsetType) 1 ? MagickFalse : MagickTrue);
}
static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info,
source_info;
char
format[MaxTextExtent],
message[MaxTextExtent];
const char
*type;
MagickSizeType
length,
number_pixels;
MagickStatusType
status;
size_t
columns,
packet_size;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->columns == 0) || (image->rows == 0))
ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
source_info=(*cache_info);
source_info.file=(-1);
(void) FormatLocaleString(cache_info->filename,MaxTextExtent,"%s[%.20g]",
image->filename,(double) GetImageIndexInList(image));
cache_info->mode=mode;
cache_info->rows=image->rows;
cache_info->columns=image->columns;
cache_info->channels=image->channels;
cache_info->active_index_channel=((image->storage_class == PseudoClass) ||
(image->colorspace == CMYKColorspace)) ? MagickTrue : MagickFalse;
if (image->ping != MagickFalse)
{
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->type=PingCache;
cache_info->pixels=(PixelPacket *) NULL;
cache_info->indexes=(IndexPacket *) NULL;
cache_info->length=0;
return(MagickTrue);
}
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
packet_size=sizeof(PixelPacket);
if (cache_info->active_index_channel != MagickFalse)
packet_size+=sizeof(IndexPacket);
length=number_pixels*packet_size;
columns=(size_t) (length/cache_info->rows/packet_size);
if (cache_info->columns != columns)
ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed",
image->filename);
cache_info->length=length;
status=AcquireMagickResource(AreaResource,cache_info->length);
length=number_pixels*(sizeof(PixelPacket)+sizeof(IndexPacket));
if ((status != MagickFalse) && (length == (MagickSizeType) ((size_t) length)))
{
status=AcquireMagickResource(MemoryResource,cache_info->length);
if (((cache_info->type == UndefinedCache) && (status != MagickFalse)) ||
(cache_info->type == MemoryCache))
{
AllocatePixelCachePixels(cache_info);
if (cache_info->pixels == (PixelPacket *) NULL)
cache_info->pixels=source_info.pixels;
else
{
/*
Create memory pixel cache.
*/
cache_info->colorspace=image->colorspace;
cache_info->type=MemoryCache;
cache_info->indexes=(IndexPacket *) NULL;
if (cache_info->active_index_channel != MagickFalse)
cache_info->indexes=(IndexPacket *) (cache_info->pixels+
number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status&=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MaxTextExtent,
"open %s (%s %s, %.20gx%.20g %s)",cache_info->filename,
cache_info->mapped != MagickFalse ? "Anonymous" : "Heap",
type,(double) cache_info->columns,(double) cache_info->rows,
format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
cache_info->storage_class=image->storage_class;
return(MagickTrue);
}
}
RelinquishMagickResource(MemoryResource,cache_info->length);
}
/*
Create pixel cache on disk.
*/
status=AcquireMagickResource(DiskResource,cache_info->length);
if ((status == MagickFalse) || (cache_info->type == DistributedCache))
{
DistributeCacheInfo
*server_info;
if (cache_info->type == DistributedCache)
RelinquishMagickResource(DiskResource,cache_info->length);
server_info=AcquireDistributeCacheInfo(exception);
if (server_info != (DistributeCacheInfo *) NULL)
{
status=OpenDistributePixelCache(server_info,image);
if (status == MagickFalse)
{
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
GetDistributeCacheHostname(server_info));
server_info=DestroyDistributeCacheInfo(server_info);
}
else
{
/*
Create a distributed pixel cache.
*/
cache_info->type=DistributedCache;
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->server_info=server_info;
(void) FormatLocaleString(cache_info->cache_filename,
MaxTextExtent,"%s:%d",GetDistributeCacheHostname(
(DistributeCacheInfo *) cache_info->server_info),
GetDistributeCachePort((DistributeCacheInfo *)
cache_info->server_info));
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,
format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MaxTextExtent,
"open %s (%s[%d], %s, %.20gx%.20g %s)",cache_info->filename,
cache_info->cache_filename,GetDistributeCacheFile(
(DistributeCacheInfo *) cache_info->server_info),type,
(double) cache_info->columns,(double) cache_info->rows,
format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
return(MagickTrue);
}
}
RelinquishMagickResource(DiskResource,cache_info->length);
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode))
{
(void) ClosePixelCacheOnDisk(cache_info);
*cache_info->cache_filename='\0';
}
if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse)
{
RelinquishMagickResource(DiskResource,cache_info->length);
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
image->filename);
return(MagickFalse);
}
status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+
cache_info->length);
if (status == MagickFalse)
{
ThrowFileException(exception,CacheError,"UnableToExtendCache",
image->filename);
return(MagickFalse);
}
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
length=number_pixels*(sizeof(PixelPacket)+sizeof(IndexPacket));
if (length != (MagickSizeType) ((size_t) length))
cache_info->type=DiskCache;
else
{
status=AcquireMagickResource(MapResource,cache_info->length);
if ((status == MagickFalse) && (cache_info->type != MapCache) &&
(cache_info->type != MemoryCache))
cache_info->type=DiskCache;
else
{
cache_info->pixels=(PixelPacket *) MapBlob(cache_info->file,mode,
cache_info->offset,(size_t) cache_info->length);
if (cache_info->pixels == (PixelPacket *) NULL)
{
cache_info->pixels=source_info.pixels;
cache_info->type=DiskCache;
}
else
{
/*
Create file-backed memory-mapped pixel cache.
*/
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->type=MapCache;
cache_info->mapped=MagickTrue;
cache_info->indexes=(IndexPacket *) NULL;
if (cache_info->active_index_channel != MagickFalse)
cache_info->indexes=(IndexPacket *) (cache_info->pixels+
number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MaxTextExtent,
"open %s (%s[%d], %s, %.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
cache_info->file,type,(double) cache_info->columns,(double)
cache_info->rows,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
return(MagickTrue);
}
}
RelinquishMagickResource(MapResource,cache_info->length);
}
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MaxTextExtent,
"open %s (%s[%d], %s, %.20gx%.20g %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,type,(double)
cache_info->columns,(double) cache_info->rows,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r s i s t P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PersistPixelCache() attaches to or initializes a persistent pixel cache. A
% persistent pixel cache is one that resides on disk and is not destroyed
% when the program exits.
%
% The format of the PersistPixelCache() method is:
%
% MagickBooleanType PersistPixelCache(Image *image,const char *filename,
% const MagickBooleanType attach,MagickOffsetType *offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filename: the persistent pixel cache filename.
%
% o attach: A value other than zero initializes the persistent pixel cache.
%
% o initialize: A value other than zero initializes the persistent pixel
% cache.
%
% o offset: the offset in the persistent cache to store pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType PersistPixelCache(Image *image,
const char *filename,const MagickBooleanType attach,MagickOffsetType *offset,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info,
*restrict clone_info;
Image
clone_image;
MagickBooleanType
status;
ssize_t
page_size;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (void *) NULL);
assert(filename != (const char *) NULL);
assert(offset != (MagickOffsetType *) NULL);
page_size=GetMagickPageSize();
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (attach != MagickFalse)
{
/*
Attach existing persistent pixel cache.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"attach persistent cache");
(void) CopyMagickString(cache_info->cache_filename,filename,
MaxTextExtent);
cache_info->type=DiskCache;
cache_info->offset=(*offset);
if (OpenPixelCache(image,ReadMode,exception) == MagickFalse)
return(MagickFalse);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
return(MagickTrue);
}
if ((cache_info->mode != ReadMode) && (cache_info->type != MemoryCache) &&
(cache_info->reference_count == 1))
{
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->mode != ReadMode) && (cache_info->type != MemoryCache) &&
(cache_info->reference_count == 1))
{
int
status;
/*
Usurp existing persistent pixel cache.
*/
status=rename_utf8(cache_info->cache_filename,filename);
if (status == 0)
{
(void) CopyMagickString(cache_info->cache_filename,filename,
MaxTextExtent);
*offset+=cache_info->length+page_size-(cache_info->length %
page_size);
UnlockSemaphoreInfo(cache_info->semaphore);
cache_info=(CacheInfo *) ReferencePixelCache(cache_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"Usurp resident persistent cache");
return(MagickTrue);
}
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
/*
Clone persistent pixel cache.
*/
clone_image=(*image);
clone_info=(CacheInfo *) clone_image.cache;
image->cache=ClonePixelCache(cache_info);
cache_info=(CacheInfo *) ReferencePixelCache(image->cache);
(void) CopyMagickString(cache_info->cache_filename,filename,MaxTextExtent);
cache_info->type=DiskCache;
cache_info->offset=(*offset);
cache_info=(CacheInfo *) image->cache;
status=OpenPixelCache(image,IOMode,exception);
if (status != MagickFalse)
status=ClonePixelCacheRepository(cache_info,clone_info,&image->exception);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelCacheNexus() method is:
%
% PixelPacket *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% const MagickBooleanType clone,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to set.
%
% o clone: clone the pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket *QueueAuthenticPixel(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
const MagickBooleanType clone,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,clone,nexus_info,
exception));
}
MagickExport PixelPacket *QueueAuthenticPixelCacheNexus(Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
number_pixels;
PixelPacket
*restrict pixels;
RectangleInfo
region;
/*
Validate pixel cache geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception);
if (cache_info == (Cache) NULL)
return((PixelPacket *) NULL);
assert(cache_info->signature == MagickSignature);
if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) ||
(y < 0) || (x >= (ssize_t) cache_info->columns) ||
(y >= (ssize_t) cache_info->rows))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"PixelsAreNotAuthentic","`%s'",image->filename);
return((PixelPacket *) NULL);
}
offset=(MagickOffsetType) y*cache_info->columns+x;
if (offset < 0)
return((PixelPacket *) NULL);
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1;
if ((MagickSizeType) offset >= number_pixels)
return((PixelPacket *) NULL);
/*
Return pixel cache.
*/
region.x=x;
region.y=y;
region.width=columns;
region.height=rows;
pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,®ion,
(image->clip_mask != (Image *) NULL) || (image->mask != (Image *) NULL) ?
MagickTrue : MagickFalse,nexus_info,exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelsCache() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelsCache() method is:
%
% PixelPacket *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PixelPacket *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
assert(id < (int) cache_info->number_threads);
return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u e u e A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixels() queues a mutable pixel region. If the region is
% successfully initialized a pointer to a PixelPacket array representing the
% region is returned, otherwise NULL is returned. The returned pointer may
% point to a temporary working buffer for the pixels or it may point to the
% final location of the pixels in memory.
%
% Write-only access means that any existing pixel values corresponding to
% the region are ignored. This is useful if the initial image is being
% created from scratch, or if the existing pixel values are to be
% completely replaced without need to refer to their pre-existing values.
% The application is free to read and write the pixel buffer returned by
% QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not
% initialize the pixel array values. Initializing pixel array values is the
% application's responsibility.
%
% Performance is maximized if the selected region is part of one row, or
% one or more full rows, since then there is opportunity to access the
% pixels in-place (without a copy) if the image is in memory, or in a
% memory-mapped file. The returned pointer must *never* be deallocated
% by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to obtain
% the black color component or the colormap indexes (of type IndexPacket)
% corresponding to the region. Once the PixelPacket (and/or IndexPacket)
% array has been updated, the changes must be saved back to the underlying
% image using SyncAuthenticPixels() or they may be lost.
%
% The format of the QueueAuthenticPixels() method is:
%
% PixelPacket *QueueAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket *QueueAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->methods.queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
return(cache_info->methods.queue_authentic_pixels_handler(image,x,y,columns,
rows,exception));
assert(id < (int) cache_info->number_threads);
return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCacheIndexes() reads colormap indexes from the specified region of
% the pixel cache.
%
% The format of the ReadPixelCacheIndexes() method is:
%
% MagickBooleanType ReadPixelCacheIndexes(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the colormap indexes.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickOffsetType ReadPixelCacheRegion(
const CacheInfo *restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX));
#else
count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType ReadPixelCacheIndexes(CacheInfo *restrict cache_info,
NexusInfo *restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register IndexPacket
*restrict q;
register ssize_t
y;
size_t
rows;
if (cache_info->active_index_channel == MagickFalse)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(IndexPacket);
rows=nexus_info->region.height;
extent=length*rows;
q=nexus_info->indexes;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register IndexPacket
*restrict p;
/*
Read indexes from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=cache_info->indexes+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->columns;
q+=nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read indexes from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent*
sizeof(PixelPacket)+offset*sizeof(*q),length,(unsigned char *) q);
if ((MagickSizeType) count < length)
break;
offset+=cache_info->columns;
q+=nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read indexes from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCacheIndexes((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCachePixels() reads pixels from the specified region of the pixel
% cache.
%
% The format of the ReadPixelCachePixels() method is:
%
% MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ReadPixelCachePixels(CacheInfo *restrict cache_info,
NexusInfo *restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register PixelPacket
*restrict q;
register ssize_t
y;
size_t
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(PixelPacket);
rows=nexus_info->region.height;
extent=length*rows;
q=nexus_info->pixels;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register PixelPacket
*restrict p;
/*
Read pixels from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=cache_info->pixels+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->columns;
q+=nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read pixels from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset*
sizeof(*q),length,(unsigned char *) q);
if ((MagickSizeType) count < length)
break;
offset+=cache_info->columns;
q+=nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read pixels from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e f e r e n c e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferencePixelCache() increments the reference count associated with the
% pixel cache returning a pointer to the cache.
%
% The format of the ReferencePixelCache method is:
%
% Cache ReferencePixelCache(Cache cache_info)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
*/
MagickExport Cache ReferencePixelCache(Cache cache)
{
CacheInfo
*restrict cache_info;
assert(cache != (Cache *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickSignature);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count++;
UnlockSemaphoreInfo(cache_info->semaphore);
return(cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheMethods() sets the image pixel methods to the specified ones.
%
% The format of the SetPixelCacheMethods() method is:
%
% SetPixelCacheMethods(Cache *,CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickExport void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods)
{
CacheInfo
*restrict cache_info;
GetOneAuthenticPixelFromHandler
get_one_authentic_pixel_from_handler;
GetOneVirtualPixelFromHandler
get_one_virtual_pixel_from_handler;
/*
Set cache pixel methods.
*/
assert(cache != (Cache) NULL);
assert(cache_methods != (CacheMethods *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL)
cache_info->methods.get_virtual_pixel_handler=
cache_methods->get_virtual_pixel_handler;
if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL)
cache_info->methods.destroy_pixel_handler=
cache_methods->destroy_pixel_handler;
if (cache_methods->get_virtual_indexes_from_handler !=
(GetVirtualIndexesFromHandler) NULL)
cache_info->methods.get_virtual_indexes_from_handler=
cache_methods->get_virtual_indexes_from_handler;
if (cache_methods->get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
cache_info->methods.get_authentic_pixels_handler=
cache_methods->get_authentic_pixels_handler;
if (cache_methods->queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
cache_info->methods.queue_authentic_pixels_handler=
cache_methods->queue_authentic_pixels_handler;
if (cache_methods->sync_authentic_pixels_handler !=
(SyncAuthenticPixelsHandler) NULL)
cache_info->methods.sync_authentic_pixels_handler=
cache_methods->sync_authentic_pixels_handler;
if (cache_methods->get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
cache_info->methods.get_authentic_pixels_from_handler=
cache_methods->get_authentic_pixels_from_handler;
if (cache_methods->get_authentic_indexes_from_handler !=
(GetAuthenticIndexesFromHandler) NULL)
cache_info->methods.get_authentic_indexes_from_handler=
cache_methods->get_authentic_indexes_from_handler;
get_one_virtual_pixel_from_handler=
cache_info->methods.get_one_virtual_pixel_from_handler;
if (get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
cache_info->methods.get_one_virtual_pixel_from_handler=
cache_methods->get_one_virtual_pixel_from_handler;
get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
if (get_one_authentic_pixel_from_handler !=
(GetOneAuthenticPixelFromHandler) NULL)
cache_info->methods.get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e N e x u s P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheNexusPixels() defines the region of the cache for the
% specified cache nexus.
%
% The format of the SetPixelCacheNexusPixels() method is:
%
% PixelPacket SetPixelCacheNexusPixels(const CacheInfo *cache_info,
% const MapMode mode,const RectangleInfo *region,
% const MagickBooleanType buffered,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o region: A pointer to the RectangleInfo structure that defines the
% region of this particular cache nexus.
%
% o buffered: pixels are buffered.
%
% o nexus_info: the cache nexus to set.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType AcquireCacheNexusPixels(
const CacheInfo *restrict cache_info,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
if (nexus_info->length != (MagickSizeType) ((size_t) nexus_info->length))
return(MagickFalse);
nexus_info->mapped=MagickFalse;
nexus_info->cache=(PixelPacket *) MagickAssumeAligned(AcquireAlignedMemory(1,
(size_t) nexus_info->length));
if (nexus_info->cache == (PixelPacket *) NULL)
{
nexus_info->mapped=MagickTrue;
nexus_info->cache=(PixelPacket *) MapBlob(-1,IOMode,0,(size_t)
nexus_info->length);
}
if (nexus_info->cache == (PixelPacket *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
return(MagickTrue);
}
static inline MagickBooleanType IsAuthenticPixelCache(
const CacheInfo *restrict cache_info,const NexusInfo *restrict nexus_info)
{
MagickBooleanType
status;
MagickOffsetType
offset;
/*
Does nexus pixels point directly to in-core cache pixels or is it buffered?
*/
if (cache_info->type == PingCache)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
status=nexus_info->pixels == (cache_info->pixels+offset) ? MagickTrue :
MagickFalse;
return(status);
}
static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info,
const MapMode mode)
{
magick_unreferenced(nexus_info);
magick_unreferenced(mode);
if (mode == ReadMode)
{
MagickCachePrefetch((unsigned char *) nexus_info->pixels,0,1);
return;
}
MagickCachePrefetch((unsigned char *) nexus_info->pixels,1,1);
}
static PixelPacket *SetPixelCacheNexusPixels(const CacheInfo *cache_info,
const MapMode mode,const RectangleInfo *region,
const MagickBooleanType buffered,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
assert(cache_info != (const CacheInfo *) NULL);
assert(cache_info->signature == MagickSignature);
if (cache_info->type == UndefinedCache)
return((PixelPacket *) NULL);
nexus_info->region=(*region);
if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) &&
(buffered == MagickFalse))
{
ssize_t
x,
y;
x=nexus_info->region.x+(ssize_t) nexus_info->region.width-1;
y=nexus_info->region.y+(ssize_t) nexus_info->region.height-1;
if (((nexus_info->region.x >= 0) && (x < (ssize_t) cache_info->columns) &&
(nexus_info->region.y >= 0) && (y < (ssize_t) cache_info->rows)) &&
((nexus_info->region.height == 1UL) || ((nexus_info->region.x == 0) &&
((nexus_info->region.width == cache_info->columns) ||
((nexus_info->region.width % cache_info->columns) == 0)))))
{
MagickOffsetType
offset;
/*
Pixels are accessed directly from memory.
*/
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
nexus_info->pixels=cache_info->pixels+offset;
nexus_info->indexes=(IndexPacket *) NULL;
if (cache_info->active_index_channel != MagickFalse)
nexus_info->indexes=cache_info->indexes+offset;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
nexus_info->authentic_pixel_cache=IsAuthenticPixelCache(cache_info,
nexus_info);
return(nexus_info->pixels);
}
}
/*
Pixels are stored in a staging region until they are synced to the cache.
*/
number_pixels=(MagickSizeType) nexus_info->region.width*
nexus_info->region.height;
length=number_pixels*sizeof(PixelPacket);
if (cache_info->active_index_channel != MagickFalse)
length+=number_pixels*sizeof(IndexPacket);
if (nexus_info->cache == (PixelPacket *) NULL)
{
nexus_info->length=length;
status=AcquireCacheNexusPixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
{
nexus_info->length=0;
return((PixelPacket *) NULL);
}
}
else
if (nexus_info->length < length)
{
RelinquishCacheNexusPixels(nexus_info);
nexus_info->length=length;
status=AcquireCacheNexusPixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
{
nexus_info->length=0;
return((PixelPacket *) NULL);
}
}
nexus_info->pixels=nexus_info->cache;
nexus_info->indexes=(IndexPacket *) NULL;
if (cache_info->active_index_channel != MagickFalse)
nexus_info->indexes=(IndexPacket *) (nexus_info->pixels+number_pixels);
PrefetchPixelCacheNexusPixels(nexus_info,mode);
nexus_info->authentic_pixel_cache=IsAuthenticPixelCache(cache_info,
nexus_info);
return(nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the
% pixel cache and returns the previous setting. A virtual pixel is any pixel
% access that is outside the boundaries of the image cache.
%
% The format of the SetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod SetPixelCacheVirtualMethod(const Image *image,
% const VirtualPixelMethod virtual_pixel_method)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
*/
static MagickBooleanType SetCacheAlphaChannel(Image *image,
const Quantum opacity)
{
CacheInfo
*restrict cache_info;
CacheView
*restrict image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
image->matte=MagickTrue;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,&image->exception); /* must be virtual */
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
&image->exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
q->opacity=opacity;
q++;
}
status=SyncCacheViewAuthenticPixels(image_view,&image->exception);
}
image_view=DestroyCacheView(image_view);
return(status);
}
MagickExport VirtualPixelMethod SetPixelCacheVirtualMethod(const Image *image,
const VirtualPixelMethod virtual_pixel_method)
{
CacheInfo
*restrict cache_info;
VirtualPixelMethod
method;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
method=cache_info->virtual_pixel_method;
cache_info->virtual_pixel_method=virtual_pixel_method;
if ((image->columns != 0) && (image->rows != 0))
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
{
if ((image->background_color.opacity != OpaqueOpacity) &&
(image->matte == MagickFalse))
(void) SetCacheAlphaChannel((Image *) image,OpaqueOpacity);
if ((IsPixelGray(&image->background_color) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) SetImageColorspace((Image *) image,sRGBColorspace);
break;
}
case TransparentVirtualPixelMethod:
{
if (image->matte == MagickFalse)
(void) SetCacheAlphaChannel((Image *) image,OpaqueOpacity);
break;
}
default:
break;
}
return(method);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the
% in-memory or disk cache. The method returns MagickTrue if the pixel region
% is synced, otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelCacheNexus() method is:
%
% MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to sync.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
NexusInfo *restrict nexus_info,ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
MagickBooleanType
status;
/*
Transfer pixels to the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->cache == (Cache) NULL)
ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->type == UndefinedCache)
return(MagickFalse);
if ((image->storage_class == DirectClass) &&
(image->clip_mask != (Image *) NULL) &&
(ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if ((image->storage_class == DirectClass) &&
(image->mask != (Image *) NULL) &&
(MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
assert(cache_info->signature == MagickSignature);
status=WritePixelCachePixels(cache_info,nexus_info,exception);
if ((cache_info->active_index_channel != MagickFalse) &&
(WritePixelCacheIndexes(cache_info,nexus_info,exception) == MagickFalse))
return(MagickFalse);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory
% or disk cache. The method returns MagickTrue if the pixel region is synced,
% otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelsCache() method is:
%
% MagickBooleanType SyncAuthenticPixelsCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SyncAuthenticPixelsCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncAuthenticPixels() method is:
%
% MagickBooleanType SyncAuthenticPixels(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncAuthenticPixels(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->methods.sync_authentic_pixels_handler !=
(SyncAuthenticPixelsHandler) NULL)
return(cache_info->methods.sync_authentic_pixels_handler(image,exception));
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImagePixelCache() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncImagePixelCache() method is:
%
% MagickBooleanType SyncImagePixelCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
assert(image != (Image *) NULL);
assert(exception != (ExceptionInfo *) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception);
return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e P i x e l C a c h e I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCacheIndexes() writes the colormap indexes to the specified
% region of the pixel cache.
%
% The format of the WritePixelCacheIndexes() method is:
%
% MagickBooleanType WritePixelCacheIndexes(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the colormap indexes.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCacheIndexes(CacheInfo *cache_info,
NexusInfo *restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const IndexPacket
*restrict p;
register ssize_t
y;
size_t
rows;
if (cache_info->active_index_channel == MagickFalse)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(IndexPacket);
rows=nexus_info->region.height;
extent=(MagickSizeType) length*rows;
p=nexus_info->indexes;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register IndexPacket
*restrict q;
/*
Write indexes to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=cache_info->indexes+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=nexus_info->region.width;
q+=cache_info->columns;
}
break;
}
case DiskCache:
{
/*
Write indexes to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+extent*
sizeof(PixelPacket)+offset*sizeof(*p),length,(const unsigned char *)
p);
if ((MagickSizeType) count < length)
break;
p+=nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write indexes to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCacheIndexes((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCachePixels() writes image pixels to the specified region of the
% pixel cache.
%
% The format of the WritePixelCachePixels() method is:
%
% MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info,
NexusInfo *restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const PixelPacket
*restrict p;
register ssize_t
y;
size_t
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(PixelPacket);
rows=nexus_info->region.height;
extent=length*rows;
p=nexus_info->pixels;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register PixelPacket
*restrict q;
/*
Write pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=cache_info->pixels+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=nexus_info->region.width;
q+=cache_info->columns;
}
break;
}
case DiskCache:
{
/*
Write pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+offset*
sizeof(*p),length,(const unsigned char *) p);
if ((MagickSizeType) count < length)
break;
p+=nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write pixels to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
|
GB_unaryop__minv_uint16_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint16_int16
// op(A') function: GB_tran__minv_uint16_int16
// C type: uint16_t
// A type: int16_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 16)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 16) ;
// casting
#define GB_CASTING(z, x) \
uint16_t z = (uint16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT16 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint16_int16
(
uint16_t *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint16_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ast-dump-openmp-taskloop-simd.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp taskloop simd
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp taskloop simd
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp taskloop simd collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp taskloop simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp taskloop simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-taskloop-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1>
// CHECK-NEXT: | `-OMPTaskLoopSimdDirective {{.*}} <line:4:9, col:26>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:5:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .st. 'const long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .liter. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .reductions. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-taskloop-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1>
// CHECK-NEXT: | `-OMPTaskLoopSimdDirective {{.*}} <line:10:9, col:26>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .st. 'const long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .liter. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .reductions. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-taskloop-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1>
// CHECK-NEXT: | `-OMPTaskLoopSimdDirective {{.*}} <line:17:9, col:38>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:27, col:37>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:36> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:36> 'int' 1
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .st. 'const long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .liter. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .reductions. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-taskloop-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1>
// CHECK-NEXT: | `-OMPTaskLoopSimdDirective {{.*}} <line:24:9, col:38>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:27, col:37>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:36> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:36> 'int' 2
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .lb. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .ub. 'const unsigned long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .st. 'const long'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .liter. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .reductions. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-taskloop-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1>
// CHECK-NEXT: `-OMPTaskLoopSimdDirective {{.*}} <line:31:9, col:38>
// CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:27, col:37>
// CHECK-NEXT: | `-ConstantExpr {{.*}} <col:36> 'int'
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:36> 'int' 2
// CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .lb. 'const unsigned long'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .ub. 'const unsigned long'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .st. 'const long'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .liter. 'const int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .reductions. 'void *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-taskloop-simd.c:31:9) *const restrict'
// CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
|
GB_emult_template.c | //------------------------------------------------------------------------------
// GB_emult_template: phase1 and phase2 for C=A.*B, C<M>=A.*B
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Computes C=A.*B (no mask) or C<M>=A.*B (mask present and not complemented).
// Does not handle the case C<!M>=A.*B. The complemented mask is handled in
// GB_mask instead. If present, the mask M is assumed to be very sparse
// compared with A and B.
// phase1: does not compute C itself, but just counts the # of entries in each
// vector of C. Fine tasks compute the # of entries in their slice of a
// single vector of C, and the results are cumsum'd in GB_task_cumsum.
// phase2: computes C, using the counts computed by phase1.
{
// iB_first is unused if the operator is FIRST
#include "GB_unused.h"
//--------------------------------------------------------------------------
// get A, B, M, and C
//--------------------------------------------------------------------------
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ah = A->h ;
const int64_t *restrict Ai = A->i ;
const int64_t vlen = A->vlen ;
const int64_t *restrict Bp = B->p ;
const int64_t *restrict Bh = B->h ;
const int64_t *restrict Bi = B->i ;
const int64_t *restrict Mp = NULL ;
const int64_t *restrict Mh = NULL ;
const int64_t *restrict Mi = NULL ;
const GB_void *restrict Mx = NULL ;
GB_cast_function cast_M = NULL ;
size_t msize = 0 ;
if (M != NULL)
{
Mp = M->p ;
Mh = M->h ;
Mi = M->i ;
Mx = M->x ;
cast_M = GB_cast_factory (GB_BOOL_code, M->type->code) ;
msize = M->type->size ;
}
#if defined ( GB_PHASE_2_OF_2 )
const GB_ATYPE *restrict Ax = A->x ;
const GB_ATYPE *restrict Bx = B->x ;
const int64_t *restrict Cp = C->p ;
const int64_t *restrict Ch = C->h ;
int64_t *restrict Ci = C->i ;
GB_CTYPE *restrict Cx = C->x ;
#endif
//--------------------------------------------------------------------------
// phase1: count entries in each C(:,j); phase2: compute C
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (int taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
int64_t kfirst = TaskList [taskid].kfirst ;
int64_t klast = TaskList [taskid].klast ;
bool fine_task = (klast == -1) ;
int64_t len ;
if (fine_task)
{
// a fine task operates on a slice of a single vector
klast = kfirst ;
len = TaskList [taskid].len ;
}
else
{
// a coarse task operates on one or more whole vectors
len = vlen ;
}
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// get j, the kth vector of C
//------------------------------------------------------------------
int64_t j = (Ch == NULL) ? k : Ch [k] ;
#if defined ( GB_PHASE_1_OF_2 )
int64_t cjnz = 0 ;
#else
int64_t pC, pC_end ;
if (fine_task)
{
// A fine task computes a slice of C(:,j)
pC = TaskList [taskid ].pC ;
pC_end = TaskList [taskid+1].pC ;
ASSERT (Cp [k] <= pC && pC <= pC_end && pC_end <= Cp [k+1]) ;
}
else
{
// The vectors of C are never sliced for a coarse task.
pC = Cp [k] ;
pC_end = Cp [k+1] ;
}
int64_t cjnz = pC_end - pC ;
if (cjnz == 0) continue ;
#endif
//------------------------------------------------------------------
// get A(:,j)
//------------------------------------------------------------------
int64_t pA = -1, pA_end = -1 ;
if (fine_task)
{
// A fine task operates on Ai,Ax [pA...pA_end-1], which is
// A fine task operates on Ai,Ax [pA...pA_end-1], which is
// a subset of the vector A(:,j)
pA = TaskList [taskid].pA ;
pA_end = TaskList [taskid].pA_end ;
}
else
{
// A coarse task operates on the entire vector A (:,j)
int64_t kA = (Ch == Ah) ? k :
((C_to_A == NULL) ? j : C_to_A [k]) ;
if (kA >= 0)
{
pA = Ap [kA] ;
pA_end = Ap [kA+1] ;
}
}
int64_t ajnz = pA_end - pA ; // nnz in A(:,j) for this slice
bool adense = (ajnz == len) ;
// get the first and last indices in A(:,j) for this vector
int64_t iA_first = -1 ;
if (ajnz > 0)
{
iA_first = Ai [pA] ;
}
#if defined ( GB_PHASE_1_OF_2 ) || defined ( GB_DEBUG )
int64_t iA_last = -1 ;
if (ajnz > 0)
{
iA_last = Ai [pA_end-1] ;
}
#endif
//------------------------------------------------------------------
// get B(:,j)
//------------------------------------------------------------------
int64_t pB = -1, pB_end = -1 ;
if (fine_task)
{
// A fine task operates on Bi,Bx [pB...pB_end-1], which is
// a subset of the vector B(:,j)
pB = TaskList [taskid].pB ;
pB_end = TaskList [taskid].pB_end ;
}
else
{
// A coarse task operates on the entire vector B (:,j)
int64_t kB = (Ch == Bh) ? k :
((C_to_B == NULL) ? j : C_to_B [k]) ;
if (kB >= 0)
{
pB = Bp [kB] ;
pB_end = Bp [kB+1] ;
}
}
int64_t bjnz = pB_end - pB ; // nnz in B(:,j) for this slice
bool bdense = (bjnz == len) ;
// get the first and last indices in B(:,j) for this vector
int64_t iB_first = -1 ;
if (bjnz > 0)
{
iB_first = Bi [pB] ;
}
#if defined ( GB_PHASE_1_OF_2 ) || defined ( GB_DEBUG )
int64_t iB_last = -1 ;
if (bjnz > 0)
{
iB_last = Bi [pB_end-1] ;
}
#endif
//------------------------------------------------------------------
// phase1: count nnz (C (:,j)); phase2: compute C(:,j)
//------------------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
if (ajnz == 0 || bjnz == 0)
{
//--------------------------------------------------------------
// A(:,j) and/or B(:,j) are empty
//--------------------------------------------------------------
;
}
else if (iA_last < iB_first || iB_last < iA_first)
{
//--------------------------------------------------------------
// intersection of A(:,j) and B(:,j) is empty
//--------------------------------------------------------------
// the last entry of A(:,j) comes before the first entry
// of B(:,j), or visa versa
;
}
else
#endif
if (M == NULL)
{
if (adense && bdense)
{
//----------------------------------------------------------
// A(:,j) and B(:,j) dense: thus C(:,j) dense
//----------------------------------------------------------
ASSERT (ajnz == bjnz) ;
ASSERT (iA_first == iB_first) ;
ASSERT (iA_last == iB_last ) ;
#if defined ( GB_PHASE_1_OF_2 )
cjnz = ajnz ;
#else
ASSERT (cjnz == ajnz) ;
for (int64_t p = 0 ; p < ajnz ; p++)
{
Ci [pC + p] = p + iA_first ;
GB_GETA (aij, Ax, pA + p) ;
GB_GETB (bij, Bx, pB + p) ;
GB_BINOP (GB_CX (pC + p), aij, bij) ;
}
#endif
}
else if (adense)
{
//----------------------------------------------------------
// A(:,j) is dense, B(:,j) is sparse: thus C(:,j) sparse
//----------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
cjnz = bjnz ;
#else
ASSERT (cjnz == bjnz) ;
for (int64_t p = 0 ; p < bjnz ; p++)
{
int64_t i = Bi [pB + p] ;
Ci [pC + p] = i ;
GB_GETA (aij, Ax, pA + i - iA_first) ;
GB_GETB (bij, Bx, pB + p) ;
GB_BINOP (GB_CX (pC + p), aij, bij) ;
}
#endif
}
else if (bdense)
{
//----------------------------------------------------------
// A(:,j) is sparse, B(:,j) is dense: thus C(:,j) sparse
//----------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
cjnz = ajnz ;
#else
ASSERT (cjnz == ajnz) ;
for (int64_t p = 0 ; p < ajnz ; p++)
{
int64_t i = Ai [pA + p] ;
Ci [pC + p] = i ;
GB_GETA (aij, Ax, pA + p) ;
GB_GETB (bij, Bx, pB + i - iB_first) ;
GB_BINOP (GB_CX (pC + p), aij, bij) ;
}
#endif
}
else if (ajnz > 32 * bjnz)
{
//----------------------------------------------------------
// A(:,j) is much denser than B(:,j)
//----------------------------------------------------------
for ( ; pB < pB_end ; pB++)
{
int64_t i = Bi [pB] ;
// find i in A(:,j)
int64_t pright = pA_end - 1 ;
bool found ;
GB_BINARY_SEARCH (i, Ai, pA, pright, found) ;
if (found)
{
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
ASSERT (pC < pC_end) ;
Ci [pC] = i ;
GB_GETA (aij, Ax, pA) ;
GB_GETB (bij, Bx, pB) ;
GB_BINOP (GB_CX (pC), aij, bij) ;
pC++ ;
#endif
}
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
#endif
}
else if (bjnz > 32 * ajnz)
{
//----------------------------------------------------------
// B(:,j) is much denser than A(:,j)
//----------------------------------------------------------
for ( ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ;
// find i in B(:,j)
int64_t pright = pB_end - 1 ;
bool found ;
GB_BINARY_SEARCH (i, Bi, pB, pright, found) ;
if (found)
{
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
ASSERT (pC < pC_end) ;
Ci [pC] = i ;
GB_GETA (aij, Ax, pA) ;
GB_GETB (bij, Bx, pB) ;
GB_BINOP (GB_CX (pC), aij, bij) ;
pC++ ;
#endif
}
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
#endif
}
else
{
//----------------------------------------------------------
// A(:,j) and B(:,j) have about the same # of entries
//----------------------------------------------------------
// linear-time scan of A(:,j) and B(:,j)
while (pA < pA_end && pB < pB_end)
{
int64_t iA = Ai [pA] ;
int64_t iB = Bi [pB] ;
if (iA < iB)
{
// A(i,j) exists but not B(i,j)
pA++ ;
}
else if (iB < iA)
{
// B(i,j) exists but not A(i,j)
pB++ ;
}
else
{
// both A(i,j) and B(i,j) exist
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
ASSERT (pC < pC_end) ;
Ci [pC] = iB ;
GB_GETA (aij, Ax, pA) ;
GB_GETB (bij, Bx, pB) ;
GB_BINOP (GB_CX (pC), aij, bij) ;
pC++ ;
#endif
pA++ ;
pB++ ;
}
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
#endif
}
}
else
{
//--------------------------------------------------------------
// Mask is present
//--------------------------------------------------------------
int64_t pM = -1 ;
int64_t pM_end = -1 ;
if (fine_task)
{
// A fine task operates on Mi,Mx [pM...pM_end-1], which is
// a subset of the vector M(:,j)
pM = TaskList [taskid].pM ;
pM_end = TaskList [taskid].pM_end ;
}
else
{
int64_t kM = -1 ;
if (Ch == Mh)
{
// Ch is the same as Mh (a shallow copy), or both NULL
kM = k ;
}
else
{
kM = (C_to_M == NULL) ? j : C_to_M [k] ;
}
if (kM >= 0)
{
pM = Mp [kM] ;
pM_end = Mp [kM+1] ;
}
}
//--------------------------------------------------------------
// C(:,j)<M(:,j) = A(:,j) .* B (:,j)
//--------------------------------------------------------------
for ( ; pM < pM_end ; pM++)
{
//----------------------------------------------------------
// get M(i,j) for A(i,j) .* B (i,j)
//----------------------------------------------------------
int64_t i = Mi [pM] ;
bool mij ;
cast_M (&mij, Mx +(pM*msize), 0) ;
if (!mij) continue ;
//----------------------------------------------------------
// get A(i,j)
//----------------------------------------------------------
int64_t apright = pA_end - 1 ;
bool afound ;
GB_BINARY_SEARCH (i, Ai, pA, apright, afound) ;
if (!afound) continue ;
//----------------------------------------------------------
// get B(i,j)
//----------------------------------------------------------
int64_t bpright = pB_end - 1 ;
bool bfound ;
GB_BINARY_SEARCH (i, Bi, pB, bpright, bfound) ;
if (!bfound) continue ;
//----------------------------------------------------------
// C(i,j) = A(i,j) .* B(i,j)
//----------------------------------------------------------
// C (i,j) = A (i,j) .* B (i,j)
#if defined ( GB_PHASE_1_OF_2 )
cjnz++ ;
#else
Ci [pC] = i ;
GB_GETA (aij, Ax, pA) ;
GB_GETB (bij, Bx, pB) ;
GB_BINOP (GB_CX (pC), aij, bij) ;
pC++ ;
#endif
}
#if defined ( GB_PHASE_2_OF_2 )
ASSERT (pC == pC_end) ;
#endif
}
//------------------------------------------------------------------
// final count of nnz (C (:,j))
//------------------------------------------------------------------
#if defined ( GB_PHASE_1_OF_2 )
if (fine_task)
{
TaskList [taskid].pC = cjnz ;
}
else
{
Cp [k] = cjnz ;
}
#endif
}
}
}
|
GB_unaryop__abs_fp32_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_fp32_int64
// op(A') function: GB_tran__abs_fp32_int64
// C type: float
// A type: int64_t
// cast: float cij = (float) aij
// unaryop: cij = fabsf (aij)
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = fabsf (x) ;
// casting
#define GB_CASTING(z, aij) \
float z = (float) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_FP32 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_fp32_int64
(
float *Cx, // Cx and Ax may be aliased
int64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_fp32_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
cancel.c | #include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#endif
void foo(int iend, int ist)
{
int i;
#pragma omp parallel
{
#pragma omp single
printf ("Using %d threads.\n",omp_get_num_threads());
#pragma omp for nowait schedule(static)
for (i=iend;i>=ist;i--)
{
printf("Iteration %d is carried out by thread %d\n",i, omp_get_thread_num());
}
#pragma omp cancel parallel
}
}
|
RandOpt.c | /* kcollins - RandomAccess core_single_cpu kernel from HPCC */
/* with C driver for standalone testing */
/*
* This code has been contributed by the DARPA HPCS program. Contact
* David Koester <dkoester@mitre.org> or Bob Lucas <rflucas@isi.edu>
* if you have questions.
*
* GUPS (Giga UPdates per Second) is a measurement that profiles the memory
* architecture of a system and is a measure of performance similar to MFLOPS.
* The HPCS HPCchallenge RandomAccess benchmark is intended to exercise the
* GUPS capability of a system, much like the LINPACK benchmark is intended to
* exercise the MFLOPS capability of a computer. In each case, we would
* expect these benchmarks to achieve close to the "peak" capability of the
* memory system. The extent of the similarities between RandomAccess and
* LINPACK are limited to both benchmarks attempting to calculate a peak system
* capability.
*
* GUPS is calculated by identifying the number of memory locations that can be
* randomly updated in one second, divided by 1 billion (1e9). The term "randomly"
* means that there is little relationship between one address to be updated and
* the next, except that they occur in the space of one half the total system
* memory. An update is a read-modify-write operation on a table of 64-bit words.
* An address is generated, the value at that address read from memory, modified
* by an integer operation (add, and, or, xor) with a literal value, and that
* new value is written back to memory.
*
* We are interested in knowing the GUPS performance of both entire systems and
* system subcomponents --- e.g., the GUPS rating of a distributed memory
* multiprocessor the GUPS rating of an SMP node, and the GUPS rating of a
* single processor. While there is typically a scaling of FLOPS with processor
* count, a similar phenomenon may not always occur for GUPS.
*
* For additional information on the GUPS metric, the HPCchallenge RandomAccess
* Benchmark,and the rules to run RandomAccess or modify it to optimize
* performance -- see http://icl.cs.utk.edu/hpcc/
*
*/
/*
* This file contains the computational core of the single cpu version
* of GUPS. The inner loop should easily be vectorized by compilers
* with such support.
*
* This core is used by both the single_cpu and star_single_cpu tests.
*/
/* Number of updates to table (suggested: 4x number of table entries) */
#include <sys/types.h>
#include <stdlib.h>
#include <stdint.h>
#include <stdio.h>
#include <omp.h>
#include <string.h>
#include "hif.h"
#define POLY 0x0000000000000007UL
#define PERIOD 1317624576693539401L
#define NUPDATE (4 * TableSize)
#define NUM_THREADS 32
uint64_t HPCC_starts(int64_t);
static void
RandomAccessUpdate(uint64_t TableSize, uint64_t *Table) {
uint64_t i;
uint64_t *ran;
int j;
uint32_t unitCnt = __htc_get_unit_count();
uint64_t ranSize = unitCnt * NUM_THREADS;
ran = (uint64_t *)malloc(ranSize*sizeof(uint64_t));
if (! ran) {
printf( "Failed to allocate memory for the ran array (%ld).\n",
ranSize);
exit(1);
}
#pragma omp parallel for
for (j=0; j<ranSize; j++) {
ran[j] = HPCC_starts ((NUPDATE/ranSize) * j);
}
fprintf(stderr,"ran array has been initialized\n"); fflush(stderr);
uint32_t updates_per_unit = NUPDATE/unitCnt;
printf("will use %d units and %d threads per unit, %d total threads\n",unitCnt,NUM_THREADS,unitCnt*NUM_THREADS);
printf("NUPDATE is %ld updates_per_unit is %ld\n", NUPDATE, updates_per_unit);
#pragma omp parallel num_threads(unitCnt)
{
int unit = omp_get_thread_num();
uint64_t *unitran = ran + (unit * NUM_THREADS);
#pragma omp target device(unit)
{
#pragma omp parallel num_threads(NUM_THREADS)
{
uint64_t pran = unitran[omp_get_thread_num()];
#pragma omp for schedule(static, 1) nowait
for (i=0; i< updates_per_unit; i++) {
pran = (pran << 1) ^ ((int64_t) pran < 0 ? POLY : 0);
Table[pran & (TableSize-1)] ^= pran;
}
}
}
}
}
/* Utility routine to start random number generator at Nth step */
uint64_t HPCC_starts(int64_t n)
{
int i, j;
uint64_t m2[64];
uint64_t temp, ran;
while (n < 0) n += PERIOD;
while (n > PERIOD) n -= PERIOD;
if (n == 0) return 0x1;
temp = 0x1;
for (i=0; i<64; i++) {
m2[i] = temp;
temp = (temp << 1) ^ ((int64_t) temp < 0 ? POLY : 0);
temp = (temp << 1) ^ ((int64_t) temp < 0 ? POLY : 0);
}
for (i=62; i>=0; i--)
if ((n >> i) & 1)
break;
ran = 0x2;
while (i > 0) {
temp = 0;
for (j=0; j<64; j++)
if ((ran >> j) & 1)
temp ^= m2[j];
ran = temp;
i -= 1;
if ((n >> i) & 1)
ran = (ran << 1) ^ ((int64_t) ran < 0 ? POLY : 0);
}
return ran;
}
/*kcollins timers*/
#include <sys/time.h>
#include <sys/resource.h>
double RTSEC() {
struct timeval tp;
struct timezone tzp;
gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
double CPUSEC() {
struct rusage ru;
getrusage(RUSAGE_SELF,&ru);
return( (double) ru.ru_utime.tv_sec + (double) ru.ru_utime.tv_usec * 1.e-6 );
}
int
main (int argc, char **argv) {
uint64_t i;
uint64_t temp;
double cputime; /* CPU time to update table */
double realtime; /* Real time to update table */
double GUPs;
uint64_t *Table;
uint64_t *cp_Table;
uint64_t TableSize;
pers_attach();
int power = 24;
if(argc > 1) {
power = atoi(argv[1]);
}
TableSize = 1<<power;
Table = (uint64_t *)calloc( TableSize, sizeof(uint64_t) );
if (! Table) {
printf( "Failed to allocate memory for the update table (%ld).\n", TableSize);
return 1;
}
cp_Table = ((uint64_t *)(pers_cp_malloc(TableSize*sizeof(uint64_t ))));
if (!cp_Table) {
printf("Failed to allocate memory for the cp update table (%ld).\n",TableSize);
return 1;
}
/* Print parameters for run */
printf( "Main table size = %ld words\n", TableSize);
printf( "Number of updates = %ld\n", NUPDATE);
/* Initialize main table */
for (i=0; i<TableSize; i++) Table[i] = i;
pers_cp_memcpy(cp_Table, Table, TableSize*sizeof(uint64_t ));
/* Begin timing here */
cputime = -CPUSEC();
realtime = -RTSEC();
// RandomAccessUpdate(TableSize,Table);
RandomAccessUpdate(TableSize,cp_Table);
/* End timed section */
cputime += CPUSEC();
realtime += RTSEC();
pers_cp_memcpy(Table, cp_Table, TableSize*sizeof(uint64_t ));
/* make sure no division by zero */
GUPs = (realtime > 0.0 ? 1.0 / realtime : -1.0);
GUPs *= 1e-9*NUPDATE;
/* Print timing results */
printf( "CPU time used = %.6f seconds\n", cputime);
printf( "Real time used = %.6f seconds\n", realtime);
printf( "%.9f Billion(10^9) Updates per second [GUP/s]\n", GUPs );
/* Verification of results (in serial or "safe" mode; optional) */
temp = 0x1;
for (i=0; i<NUPDATE; i++) {
temp = (temp << 1) ^ (((int64_t) temp < 0) ? POLY : 0);
Table[temp & (TableSize-1)] ^= temp;
}
temp = 0;
for (i=0; i<TableSize; i++)
if (Table[i] != i)
temp++;
printf( "Found %ld errors in %ld locations (%s).\n",
temp, TableSize, (temp <= 0.01*TableSize) ? "passed" : "failed");
free( Table );
pers_detach();
return 0;
}
|
main.c | #define _POSIX_C_SOURCE 199309L
#include <stdlib.h>
#include <stdio.h>
#include <inttypes.h>
#include <unistd.h>
#include <time.h>
#include <math.h>
#include <string.h>
#include <omp.h>
#include <argp.h>
const char *argp_program_version =
"benchmark 0.1";
/* Program documentation. */
static char doc[] =
"Do some benchmark with openMP";
static struct argp_option options[] = {
{"verbose", 'v', 0, 0, "Produce verbose output" },
{"quiet", 'q', 0, 0, "Don't produce any output" },
{"iterations", 'i', "NUMBER", 0, "Number of iterations" },
{"output", 'o', "file", 0, "output to file instead of stdout" },
{"reps", 'r', "NUMBER", 0, "number of repetions in each iteration" },
{"data", 'b', "NUMBER", 0, "number of kB to iterate over (if size extends LVL1 cache, speed up can be faster then according to Amdahl's law)" },
{"operation", 'O', "name", 0, "operation e.g. mul, add. ..." },
{"numThreads", 'n', "NUMBER", 0, "specifies number of threads to be used. Default is num of logical cpus" },
{ 0 }
};
struct arguments
{
int silent, verbose, numThreads , buffer_size_kb , use_reps_iteration;
int use_output_file;
char *output_file;
char *operation;
size_t iterations, reps_per_iteration;
};
/* parse a single option. */
static error_t
parse_opt (int key, char *arg, struct argp_state *state)
{
/* get the input argument from argp_parse, which we
know is a pointer to our arguments structure. */
struct arguments *arguments = state->input;
switch (key)
{
case 'q': case 's':
arguments->silent = 1;
break;
case 'v':
arguments->verbose = 1;
break;
case 'o':
arguments->output_file = arg;
arguments->use_output_file = 1;
break;
case 'b':
arguments->buffer_size_kb = strtol(arg,NULL,10);//TODO error handling
break;
case 'O':
arguments->operation = arg;
break;
case 'n':
arguments->numThreads = strtol(arg,NULL,10);//TODO error handling
break;
case 'i':
arguments->iterations = strtol(arg,NULL,10);//TODO error handling
break;
case 'r':
arguments->use_reps_iteration = 1;
arguments->reps_per_iteration = strtol(arg,NULL,10);//TODO error handling
break;
case ARGP_KEY_ARG:
argp_usage (state);
break;
default:
return ARGP_ERR_UNKNOWN;
}
return 0;
}
/* our argp parser. */
static struct argp argp = { options, parse_opt, 0 , doc };
//parsing args set up end---------------------------------------------------------------
clock_t ticks, new_ticks;
struct timespec t1, t2;
void timespec_diff(struct timespec *start, struct timespec *stop,
struct timespec *result)
{
if ((stop->tv_nsec - start->tv_nsec) < 0) {
result->tv_sec = stop->tv_sec - start->tv_sec - 1;
result->tv_nsec = stop->tv_nsec - start->tv_nsec + 1000000000;
} else {
result->tv_sec = stop->tv_sec - start->tv_sec;
result->tv_nsec = stop->tv_nsec - start->tv_nsec;
}
return;
}
int main(int argc, char **argv) {
//parsing args----------------------------------------------------------------
struct arguments arguments;
/* default values. */
arguments.silent = 0;
arguments.verbose = 0;
arguments.reps_per_iteration = 100;
arguments.iterations = 100;
arguments.output_file = "-";
arguments.operation = "mul";
arguments.numThreads = 0;
arguments.buffer_size_kb = 0;
arguments.use_reps_iteration = 0;
argp_parse (&argp, argc, argv, 0, 0, &arguments);
//parsing args end---------------------------------------------------------------
//allocate mem for measured times
struct timespec * t_times = malloc(arguments.iterations * sizeof(struct timespec));
uint64_t * nanos = malloc(arguments.iterations * sizeof(uint64_t));
double * cpu_clocks = malloc(arguments.iterations * sizeof(double));
double * float_buffer = NULL;
//allocate mem for buffer
if (arguments.buffer_size_kb){
if (arguments.verbose)
printf("allocating %li bytes of memory\n", sizeof(double)*128*arguments.buffer_size_kb);
float_buffer = malloc(sizeof(double) * 128 * arguments.buffer_size_kb) ; //128 * 8byte(sizeof(double)) = 1kbyte
if (float_buffer == NULL)
perror("malloc\n");
memset(float_buffer,0, 128*sizeof(double));
for (size_t i = 0 ; i < arguments.buffer_size_kb * 128 ; i++){
float_buffer[i] = 1.1f;
}
if (arguments.verbose) {
if (arguments.use_reps_iteration) {
printf("warn: ignoring reps per iteration and set it to 128 * buffer_size\n");
}
else {
printf("setting reps per iteration to 128 * buffer_size\n");
}
}
arguments.reps_per_iteration = arguments.buffer_size_kb * 128;
}
float f2 = (float) arguments.iterations + 1.1f; //avoid compiler optimization, because iterations is unknown for compiler
float f1 = 1.1f;
//manage threads
if(arguments.numThreads == 0 ){
//setting thread number automatic
}
else{
omp_set_num_threads(arguments.numThreads);
}
if(arguments.verbose && !arguments.silent){
printf("using threads: %d\n", omp_get_max_threads());
}
//iterate
for (size_t j = 0; j < arguments.iterations ; j++){
f2 = (float) arguments.iterations + 0.1 + j;
clock_gettime(CLOCK_MONOTONIC, &t1);
ticks = clock();
//printf("time: %i\t", t1.tv_nsec);
if (arguments.buffer_size_kb == 0) {
#pragma omp parallel for private(f1,f2)
for (size_t i = 0; i < arguments.reps_per_iteration; i++) {
f1 = f2 * 1.1f;
//printf("thread: %i of %i\n", omp_get_thread_num() ,omp_get_num_threads()); for debugging
}
}
else {
#pragma omp parallel for
for (size_t i = 0 ; i < arguments.buffer_size_kb * 128 ; i++ ){
float_buffer[i] = float_buffer[i] * float_buffer[i];
//printf("thread: %i of %i\n", omp_get_thread_num() ,omp_get_num_threads()); //for debugging
}
}
clock_gettime(CLOCK_MONOTONIC, &t2);
new_ticks = clock();
struct timespec c;
timespec_diff(&t1,&t2,&c);
t_times[j] = c;
nanos[j] = c.tv_sec * 1000000000 + c.tv_nsec;
cpu_clocks[j] = (double )(new_ticks - ticks) * 1000000000 / (double) CLOCKS_PER_SEC;
}
//calculate mean and variance
uint64_t mean = 0;
double cpu_time_mean = 0.0f;
for ( size_t i = 0 ; i < arguments.iterations; i++){
mean += nanos[i];
cpu_time_mean += cpu_clocks[i];
if (arguments.verbose && !arguments.silent){
printf("real time per iteration = %ld sec %ld nsec \t cpu_time = %lf\n", t_times[i].tv_sec , t_times[i].tv_nsec, cpu_clocks[i]);
}
}
if (arguments.verbose){
printf("--------------------------------------------\n");
}
mean /= arguments.iterations;
cpu_time_mean /= arguments.iterations;
//variance of real time adn cpu time
uint64_t variance = 0;
double cpu_time_variance = 0;
for ( size_t i = 0 ; i < arguments.iterations; i++){
variance += (mean - nanos[i]) * (mean - nanos[i]);
cpu_time_variance += (cpu_time_mean - cpu_clocks[i]) * (cpu_time_mean - cpu_clocks[i]);
}
variance /= arguments.iterations;
cpu_time_variance /= arguments.iterations;
double std_deviation = sqrt(variance);
double rel_deviation = (double) std_deviation / (double) mean ;
//gflops
double gflop =( double ) arguments.reps_per_iteration / (double) mean ; // flops per nanosecond = Gflops
double gflop_deviation = rel_deviation * gflop;
double vgfkop = gflop_deviation * gflop_deviation;
//time calculated from cpu ticks
double cpu_time_deviation = sqrt(cpu_time_variance);
double cpu_time_rel_deviation = cpu_time_deviation / cpu_time_mean;
//gflops
double cpu_ticks_gflop =( double ) arguments.reps_per_iteration / (double) cpu_time_mean ; // flops per nanosecond = Gflops
double cpu_ticks_gflop_deviation = cpu_time_rel_deviation * cpu_ticks_gflop;
double cpu_ticks_vgfkop = cpu_ticks_gflop_deviation * cpu_ticks_gflop_deviation;
if(arguments.silent){
return 0;
}
if (arguments.verbose) {
printf("All values displayed in nanosecond and relative deviations in %%\n");
if(arguments.buffer_size_kb)
printf("Using buffer with %ikBytes", arguments.buffer_size_kb);
printf("real time from clock_gettime for one iteration (%ld operations):\n", arguments.reps_per_iteration);
printf("mean: %ld\tdeviation: %f\tvariance: %ld\trel deviation: %f\n", mean, std_deviation, variance, rel_deviation);
printf("\ngflops (1000000000 operations per second):\n");
printf("mean: %f\tdeviation: %f\tvariance: %f\trel deviation: %f\n", gflop, gflop_deviation, vgfkop,
rel_deviation);
printf("\ntime calculated from cpu ticks per iteration (%ld operations)(does not make sense for more then 1 thread):\n",
arguments.reps_per_iteration);
printf("mean: %f\tdeviation: %f\tvariance: %f\trel deviation: %f\n", cpu_time_mean, cpu_time_deviation,
cpu_time_variance, cpu_time_rel_deviation);
printf("\ngflops (1000000000 operations per second):\n");
printf("mean: %1f\tdeviation: %f\tvariance: %f\trel deviation: %f\n", cpu_ticks_gflop,
cpu_ticks_gflop_deviation, cpu_ticks_vgfkop, cpu_time_rel_deviation);
}
else {
printf("%f\t%f\t%f\t%f\t%i\n", gflop, gflop_deviation, vgfkop,
rel_deviation, omp_get_max_threads());
}
return 0;
}
|
inner_product.c | /*
This source file is part of the Geophysical Fluids Modeling Framework (GAME), which is released under the MIT license.
Github repository: https://github.com/OpenNWP/GAME
*/
/*
In this file, the inner product of two vector fields is computed.
*/
#include <stdio.h>
#include "../game_types.h"
int inner_product(Vector_field in_field_0, Vector_field in_field_1, Scalar_field out_field, Grid *grid)
{
/*
This function computes the inner product of the two vector fields in_field_0 and in_field_1. This is needed for computing the dissipation due to momentum diffusion (friction).
*/
int i, no_of_edges, base_index;
#pragma omp parallel for private (i, no_of_edges, base_index)
for (int h_index = 0; h_index < NO_OF_SCALARS_H; ++h_index)
{
no_of_edges = 6;
if (h_index < NO_OF_PENTAGONS)
{
no_of_edges = 5;
}
for (int layer_index = 0; layer_index < NO_OF_LAYERS; ++layer_index)
{
i = layer_index*NO_OF_SCALARS_H + h_index;
base_index = 8*i;
out_field[i] = 0;
for (int j = 0; j < no_of_edges; ++j)
{
out_field[i] += grid -> inner_product_weights[base_index + j]*in_field_0[NO_OF_SCALARS_H + layer_index*NO_OF_VECTORS_PER_LAYER + grid -> adjacent_vector_indices_h[6*h_index + j]]*
in_field_1[NO_OF_SCALARS_H + layer_index*NO_OF_VECTORS_PER_LAYER + grid -> adjacent_vector_indices_h[6*h_index + j]];
}
out_field[i] += grid -> inner_product_weights[base_index + 6]*in_field_0[h_index + layer_index*NO_OF_VECTORS_PER_LAYER]*in_field_1[h_index + layer_index*NO_OF_VECTORS_PER_LAYER];
out_field[i] += grid -> inner_product_weights[base_index + 7]*in_field_0[h_index + (layer_index + 1)*NO_OF_VECTORS_PER_LAYER]*in_field_1[h_index + (layer_index + 1)*NO_OF_VECTORS_PER_LAYER];
}
}
return 0;
}
|
activations.c | #include "activations.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
char *get_activation_string(ACTIVATION a)
{
switch(a){
case LOGISTIC:
return "logistic";
case LOGGY:
return "loggy";
case RELU:
return "relu";
case ELU:
return "elu";
case SELU:
return "selu";
case RELIE:
return "relie";
case RAMP:
return "ramp";
case LINEAR:
return "linear";
case TANH:
return "tanh";
case PLSE:
return "plse";
case LEAKY:
return "leaky";
case STAIR:
return "stair";
case HARDTAN:
return "hardtan";
case LHTAN:
return "lhtan";
default:
break;
}
return "relu";
}
ACTIVATION get_activation(char *s)
{
if (strcmp(s, "logistic")==0) return LOGISTIC;
if (strcmp(s, "loggy")==0) return LOGGY;
if (strcmp(s, "relu")==0) return RELU;
if (strcmp(s, "elu")==0) return ELU;
if (strcmp(s, "selu") == 0) return SELU;
if (strcmp(s, "relie")==0) return RELIE;
if (strcmp(s, "plse")==0) return PLSE;
if (strcmp(s, "hardtan")==0) return HARDTAN;
if (strcmp(s, "lhtan")==0) return LHTAN;
if (strcmp(s, "linear")==0) return LINEAR;
if (strcmp(s, "ramp")==0) return RAMP;
if (strcmp(s, "leaky")==0) return LEAKY;
if (strcmp(s, "tanh")==0) return TANH;
if (strcmp(s, "stair")==0) return STAIR;
fprintf(stderr, "Couldn't find activation function %s, going with ReLU\n", s);
return RELU;
}
float activate(float x, ACTIVATION a)
{
switch(a){
case LINEAR:
return linear_activate(x);
case LOGISTIC:
return logistic_activate(x);
case LOGGY:
return loggy_activate(x);
case RELU:
return relu_activate(x);
case ELU:
return elu_activate(x);
case SELU:
return selu_activate(x);
case RELIE:
return relie_activate(x);
case RAMP:
return ramp_activate(x);
case LEAKY:
return leaky_activate(x);
case TANH:
return tanh_activate(x);
case PLSE:
return plse_activate(x);
case STAIR:
return stair_activate(x);
case HARDTAN:
return hardtan_activate(x);
case LHTAN:
return lhtan_activate(x);
}
return 0;
}
void activate_array(float *x, const int n, const ACTIVATION a)
{
int i;
if (a == LINEAR) {}
else if (a == LEAKY) {
#pragma omp parallel for
for (i = 0; i < n; ++i) {
x[i] = leaky_activate(x[i]);
}
}
else if (a == LOGISTIC) {
#pragma omp parallel for
for (i = 0; i < n; ++i) {
x[i] = logistic_activate(x[i]);
}
}
else {
for (i = 0; i < n; ++i) {
x[i] = activate(x[i], a);
}
}
}
float gradient(float x, ACTIVATION a)
{
switch(a){
case LINEAR:
return linear_gradient(x);
case LOGISTIC:
return logistic_gradient(x);
case LOGGY:
return loggy_gradient(x);
case RELU:
return relu_gradient(x);
case ELU:
return elu_gradient(x);
case SELU:
return selu_gradient(x);
case RELIE:
return relie_gradient(x);
case RAMP:
return ramp_gradient(x);
case LEAKY:
return leaky_gradient(x);
case TANH:
return tanh_gradient(x);
case PLSE:
return plse_gradient(x);
case STAIR:
return stair_gradient(x);
case HARDTAN:
return hardtan_gradient(x);
case LHTAN:
return lhtan_gradient(x);
}
return 0;
}
void gradient_array(const float *x, const int n, const ACTIVATION a, float *delta)
{
int i;
for(i = 0; i < n; ++i){
delta[i] *= gradient(x[i], a);
}
}
|
omp_ex_04.c | #include <stdio.h>
#include <omp.h>
/*
MIT License
Copyright (c) 2019 NOUREDDINE DAGHBOUDJ
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
int main()
{
// After compiling this code, run it for a couple of times
// What did you notice?
#pragma omp parallel
{
printf("Hello World! from thread %i\n", omp_get_thread_num());
}
return 0;
}
|
GB_unaryop__minv_bool_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_bool_uint16
// op(A') function: GB_tran__minv_bool_uint16
// C type: bool
// A type: uint16_t
// cast: ;
// unaryop: cij = true
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = true ;
// casting
#define GB_CASTING(z, aij) \
; ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_BOOL || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_bool_uint16
(
bool *Cx, // Cx and Ax may be aliased
uint16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_bool_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pooling_2x2.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "cstl/utils.h"
static void pooling2x2s2_max_neon(const Mat& bottom_blob, Mat& top_blob, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = w - 2*outw + w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q=0; q<inch; q++)
{
const float* img0 = bottom_blob.channel(q);
float* outptr = top_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
for (int i = 0; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"prfm pldl1keep, [%1, #256] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4s, v1.4s}, [%1], #32 \n"
"ld1 {v2.4s, v3.4s}, [%2], #32 \n"
"fmax v0.4s, v0.4s, v2.4s \n"
"fmax v1.4s, v1.4s, v3.4s \n"
"fmaxp v2.4s, v0.4s, v1.4s \n"
"subs %w0, %w0, #1 \n"
"st1 {v2.4s}, [%3], #16 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(outptr) // %3
: "0"(nn),
"1"(r0),
"2"(r1),
"3"(outptr)
: "cc", "memory", "v0", "v1", "v2", "v3"
);
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%1, #256] \n"
"pld [%2, #256] \n"
"vld1.f32 {d0-d3}, [%1]! \n"
"vld1.f32 {d4-d7}, [%2]! \n"
"vmax.f32 q0, q0, q2 \n"
"vmax.f32 q1, q1, q3 \n"
"vpmax.f32 d4, d0, d1 \n"
"vpmax.f32 d5, d2, d3 \n"
"subs %0, #1 \n"
"vst1.f32 {d4-d5}, [%3]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(outptr) // %3
: "0"(nn),
"1"(r0),
"2"(r1),
"3"(outptr)
: "cc", "memory", "q0", "q1", "q2", "q3"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float max0 = max(r0[0], r0[1]);
float max1 = max(r1[0], r1[1]);
*outptr = max(max0, max1);
r0 += 2;
r1 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
}
}
}
|
ex3-parallel.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
char* read_file(char *filename, long* filesize);
int count_number_spaces(char* string, long stringsize);
int main(){
long filesize;
int nspaces;
char *filename = "example.in";
char *file_content = read_file(filename, &filesize);
nspaces = count_number_spaces(file_content, filesize);
printf("size %ld spaces %d\n", filesize, nspaces);
// we need to free the space we have used
// to store the contents of the file
free(file_content);
}
char* read_file(char *filename, long *fsize){
FILE *fp = fopen(filename, "r+");
// First we figure out the end of the file
fseek(fp, 0, SEEK_END);
*(fsize) = ftell(fp);
// Now we go to the beginning
fseek(fp, 0, SEEK_SET);
// We allocate the memory necessary to store the whole file
char *file_content = malloc(*fsize + 1);
fread(file_content, *fsize, 1, fp);
fclose(fp);
return file_content;
}
int count_number_spaces(char *string, long stringsize){
int i = 0;
int count = 0;
#pragma omp parallel
{
#pragma omp for reduction (+:count)
for (i = 0; i < (int) stringsize; i++){
if (*(string + i) == ' '){
count++;
}
}
}
return count;
}
|
pooling_3x3_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void pooling3x3s2_max_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob.channel(q);
float* outptr = top_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"
"fmax v16.4s, v0.4s, v1.4s \n"
"fmax v17.4s, v2.4s, v3.4s \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n"
"fmax v18.4s, v4.4s, v5.4s \n"
"fmax v19.4s, v6.4s, v7.4s \n"
"ld1 {v8.4s}, [%1] \n"
"fmax v20.4s, v16.4s, v2.4s \n"
"fmax v21.4s, v17.4s, v4.4s \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"
"fmax v22.4s, v18.4s, v6.4s \n"
"fmax v23.4s, v19.4s, v8.4s \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n"
"fmax v16.4s, v0.4s, v1.4s \n"
"fmax v17.4s, v2.4s, v3.4s \n"
"fmax v18.4s, v4.4s, v5.4s \n"
"fmax v19.4s, v6.4s, v7.4s \n"
"ld1 {v8.4s}, [%2] \n"
"fmax v24.4s, v16.4s, v2.4s \n"
"fmax v25.4s, v17.4s, v4.4s \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"
"fmax v26.4s, v18.4s, v6.4s \n"
"fmax v27.4s, v19.4s, v8.4s \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n"
"fmax v16.4s, v0.4s, v1.4s \n"
"fmax v17.4s, v2.4s, v3.4s \n"
"fmax v18.4s, v4.4s, v5.4s \n"
"fmax v19.4s, v6.4s, v7.4s \n"
"ld1 {v8.4s}, [%3] \n"
"fmax v28.4s, v16.4s, v2.4s \n"
"fmax v29.4s, v17.4s, v4.4s \n"
"fmax v30.4s, v18.4s, v6.4s \n"
"fmax v31.4s, v19.4s, v8.4s \n"
"fmax v20.4s, v20.4s, v24.4s \n"
"fmax v21.4s, v21.4s, v25.4s \n"
"fmax v22.4s, v22.4s, v26.4s \n"
"fmax v23.4s, v23.4s, v27.4s \n"
"fmax v20.4s, v20.4s, v28.4s \n"
"fmax v21.4s, v21.4s, v29.4s \n"
"fmax v22.4s, v22.4s, v30.4s \n"
"fmax v23.4s, v23.4s, v31.4s \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0], #64 \n"
: "=r"(outptr), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr),
"1"(r0),
"2"(r1),
"3"(r2)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
#else // __aarch64__
asm volatile(
"pld [%1, #512] \n"
"vldm %1!, {d0-d7} \n"
"pld [%2, #512] \n"
"vldm %2!, {d8-d15} \n"
"vmax.f32 q0, q0, q4 \n"
"vmax.f32 q1, q1, q5 \n"
"pld [%3, #512] \n"
"vldm %3!, {d16-d23} \n"
"vmax.f32 q2, q2, q6 \n"
"vmax.f32 q3, q3, q7 \n"
"vmax.f32 q0, q0, q8 \n"
"vmax.f32 q1, q1, q9 \n"
"pld [%1, #512] \n"
"vldm %1!, {d8-d15} \n"
"vmax.f32 q2, q2, q10 \n"
"vmax.f32 q3, q3, q11 \n"
"pld [%2, #512] \n"
"vldm %2!, {d16-d23} \n"
"vmax.f32 q4, q4, q8 \n"
"vmax.f32 q5, q5, q9 \n"
"pld [%3, #512] \n"
"vldm %3!, {d24-d31} \n"
"vmax.f32 q6, q6, q10 \n"
"vmax.f32 q7, q7, q11 \n"
"vmax.f32 q4, q4, q12 \n"
"vmax.f32 q5, q5, q13 \n"
"vld1.f32 {d24-d25}, [%1 :128] \n"
"vld1.f32 {d26-d27}, [%2 :128] \n"
"vmax.f32 q6, q6, q14 \n"
"vmax.f32 q7, q7, q15 \n"
"vld1.f32 {d28-d29}, [%3 :128] \n"
"vmax.f32 q8, q12, q13 \n"
"vmax.f32 q8, q8, q14 \n"
"vmax.f32 q12, q0, q1 \n"
"vmax.f32 q13, q2, q3 \n"
"vmax.f32 q14, q4, q5 \n"
"vmax.f32 q15, q6, q7 \n"
"vmax.f32 q12, q12, q2 \n"
"vmax.f32 q13, q13, q4 \n"
"vmax.f32 q14, q14, q6 \n"
"vmax.f32 q15, q15, q8 \n"
"vstm %0!, {d24-d31} \n"
: "=r"(outptr), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr),
"1"(r0),
"2"(r1),
"3"(r2)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j + 1 < outw; j += 2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n"
"fmax v16.4s, v0.4s, v4.4s \n"
"fmax v17.4s, v1.4s, v5.4s \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%3], #64 \n"
"fmax v18.4s, v2.4s, v6.4s \n"
"fmax v19.4s, v3.4s, v7.4s \n"
"ld1 {v0.4s}, [%1] \n"
"fmax v16.4s, v16.4s, v20.4s \n"
"fmax v17.4s, v17.4s, v21.4s \n"
"ld1 {v1.4s}, [%2] \n"
"fmax v18.4s, v18.4s, v22.4s \n"
"fmax v19.4s, v19.4s, v23.4s \n"
"ld1 {v2.4s}, [%3] \n"
"fmax v3.4s, v0.4s, v1.4s \n"
"fmax v20.4s, v16.4s, v17.4s \n"
"fmax v21.4s, v18.4s, v19.4s \n"
"fmax v3.4s, v3.4s, v2.4s \n"
"fmax v20.4s, v20.4s, v18.4s \n"
"fmax v21.4s, v21.4s, v3.4s \n"
"st1 {v20.4s, v21.4s}, [%0], #32 \n"
: "=r"(outptr), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr),
"1"(r0),
"2"(r1),
"3"(r2)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
#else // __aarch64__
asm volatile(
"pld [%1, #512] \n"
"vldm %1!, {d0-d7} \n"
"pld [%2, #512] \n"
"vldm %2!, {d8-d15} \n"
"vmax.f32 q12, q0, q4 \n"
"vmax.f32 q13, q1, q5 \n"
"pld [%3, #512] \n"
"vldm %3!, {d16-d23} \n"
"vmax.f32 q14, q2, q6 \n"
"vmax.f32 q15, q3, q7 \n"
"vld1.f32 {d0-d1}, [%1 :128] \n"
"vmax.f32 q12, q12, q8 \n"
"vmax.f32 q13, q13, q9 \n"
"vld1.f32 {d2-d3}, [%2 :128] \n"
"vmax.f32 q14, q14, q10 \n"
"vmax.f32 q15, q15, q11 \n"
"vld1.f32 {d4-d5}, [%3 :128] \n"
"vmax.f32 q3, q0, q1 \n"
"vmax.f32 q4, q12, q13 \n"
"vmax.f32 q5, q14, q15 \n"
"vmax.f32 q3, q3, q2 \n"
"vmax.f32 q4, q4, q14 \n"
"vmax.f32 q5, q5, q3 \n"
"vst1.f32 {d8-d11}, [%0 :128]! \n"
: "=r"(outptr), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr),
"1"(r0),
"2"(r1),
"3"(r2)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j < outw; j++)
{
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0 + 4);
float32x4_t _r02 = vld1q_f32(r0 + 8);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1 + 4);
float32x4_t _r12 = vld1q_f32(r1 + 8);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2 + 4);
float32x4_t _r22 = vld1q_f32(r2 + 8);
float32x4_t _max0 = vmaxq_f32(vmaxq_f32(_r00, _r01), _r02);
float32x4_t _max1 = vmaxq_f32(vmaxq_f32(_r10, _r11), _r12);
float32x4_t _max2 = vmaxq_f32(vmaxq_f32(_r20, _r21), _r22);
float32x4_t _max = vmaxq_f32(vmaxq_f32(_max0, _max1), _max2);
vst1q_f32(outptr, _max);
r0 += 8;
r1 += 8;
r2 += 8;
outptr += 4;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
|
mandelbrot.c | /*
* mandelbrot.c: Simple Mandelbrot Set Rendering Program in C99
* (c)2019 Seiji Nishimura <seiji1976@gmail.com>
* $Id: mandelbrot.c,v 1.1.1.3 2019/04/03 00:00:00 seiji Exp seiji $
*/
#ifdef USE_MPI
#include <mpi.h>
#endif
#include <time.h>
#include <math.h>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <stdbool.h>
typedef double real_t;
// parameters
#define C_R (-0.74323348754012)
#define C_I ( 0.13121889397412)
#define RADIUS (1.E-7)
#define WIDTH 1920
#define HEIGHT 1080
#define COLORMAP_CYCLE (0x01<<9)
#define OUTPUT_FNAME "output.ppm"
// constants
#define D (2.0*RADIUS/MIN(WIDTH,HEIGHT))
#define MIN_SAMPLING (0x01<<4)
#define MAX_SAMPLING (0x01<<16)
#define MAX_ITER (0x01<<16)
// macro functions
#define FRAND() ((float) rand()/(RAND_MAX+1.0f))
#define MIN(x,y) (((x)<(y))?(x):(y))
#define MAX(x,y) (((x)>(y))?(x):(y))
#define MALLOC(n,t) ((t *) calloc((n),sizeof(t)))
// prototypes
void init_colormap (uint8_t *);
void init_jitter (float *, float *);
void draw_image (uint8_t *, uint8_t *, uint8_t *, float *, float *, int, int);
void rough_sketch (uint8_t *, uint8_t *, int, int);
bool detect_edge (uint8_t *, uint8_t *, uint8_t *, uint8_t *, int , int );
bool same_color (uint8_t , uint8_t , uint8_t , uint8_t , uint8_t, uint8_t);
#pragma omp declare simd notinbranch
int mandelbrot (real_t , real_t );
void write_out_image(uint8_t *, char *);
//======================================================================
int main(int argc, char **argv)
{
uint8_t *colormap = NULL, *sketch = NULL, *image = NULL;
float *dx = NULL, *dy = NULL;
int nprocs = 1 , myrank = 0 ;
#ifdef USE_MPI
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
#endif
if ((colormap = MALLOC(3 * (COLORMAP_CYCLE + 1), uint8_t)) == NULL ||
(sketch = MALLOC(3 * WIDTH * HEIGHT , uint8_t)) == NULL ||
(image = MALLOC(3 * WIDTH * HEIGHT , uint8_t)) == NULL ||
(dx = MALLOC(MAX_SAMPLING , float )) == NULL ||
(dy = MALLOC(MAX_SAMPLING , float )) == NULL) {
perror("malloc");
return EXIT_FAILURE;
}
if (myrank == 0) {
init_colormap(colormap);
init_jitter (dx, dy );
}
#ifdef USE_MPI
MPI_Bcast(colormap, 3 * (COLORMAP_CYCLE + 1) * sizeof(uint8_t), MPI_BYTE, 0, MPI_COMM_WORLD);
MPI_Bcast(dx , MAX_SAMPLING * sizeof(float ), MPI_BYTE, 0, MPI_COMM_WORLD);
MPI_Bcast(dy , MAX_SAMPLING * sizeof(float ), MPI_BYTE, 0, MPI_COMM_WORLD);
#endif
draw_image(image, sketch, colormap, dx, dy, nprocs, myrank);
if (myrank == 0) {
write_out_image(image, OUTPUT_FNAME);
}
free(dy );
free(dx );
free(image );
free(sketch );
free(colormap);
#ifdef USE_MPI
MPI_Finalize();
#endif
return EXIT_SUCCESS;
}
//----------------------------------------------------------------------
void init_colormap(uint8_t *colormap)
{ // initialize colormap table.
for (int i = 0; i < COLORMAP_CYCLE; i++) {
uint8_t r = ((int) (127.0 * cos((2.0 * M_PI * i) / (COLORMAP_CYCLE )) + 0.5)) + 128,
g = ((int) (127.0 * sin((2.0 * M_PI * i) / (COLORMAP_CYCLE )) + 0.5)) + 128,
b = ((int) (127.0 * sin((2.0 * M_PI * i) / (COLORMAP_CYCLE>>1)) + 0.5)) + 128;
colormap[3 * i + 0] = r;
colormap[3 * i + 1] = g;
colormap[3 * i + 2] = b;
}
colormap[3 * COLORMAP_CYCLE + 0] =
colormap[3 * COLORMAP_CYCLE + 1] =
colormap[3 * COLORMAP_CYCLE + 2] = 0;
return;
}
//----------------------------------------------------------------------
void init_jitter(float *dx, float *dy)
{ // initialize jittering table.
srand((int) time(NULL));
dx[0] = dy[0] = 0.0f;
for (int i = 1; i < MAX_SAMPLING; i++) {
dx[i] = FRAND();
dy[i] = FRAND();
}
return;
}
//----------------------------------------------------------------------
void draw_image(uint8_t *image, uint8_t *sketch, uint8_t *colormap,
float *dx, float *dy, int nprocs, int myrank)
{ // draw anti-aliased image.
rough_sketch(sketch, colormap, nprocs, myrank);
#pragma omp parallel for schedule(dynamic,1)
for (size_t l = myrank; l < WIDTH * HEIGHT; l += nprocs) {
int i = l % WIDTH,
j = l / WIDTH;
uint8_t r , g , b ,
rr, gg, bb;
if (detect_edge(sketch, &r, &g, &b, i, j)) { // Monte Carlo Integration
int n = 1, m = MIN_SAMPLING,
sum_r = r, sum_g = g, sum_b = b;
do {
rr = r;
gg = g;
bb = b;
#pragma omp simd reduction(+:sum_r,sum_g,sum_b)
for (int k = n; k < m; k++) {
real_t p_r = C_R + D * (i + dx[k] - WIDTH / 2),
p_i = C_I - D * (j + dy[k] - HEIGHT / 2);
int index = mandelbrot(p_r, p_i);
sum_r += colormap[3 * index + 0];
sum_g += colormap[3 * index + 1];
sum_b += colormap[3 * index + 2];
}
r = (uint8_t) ((sum_r + (m>>1)) / m);
g = (uint8_t) ((sum_g + (m>>1)) / m);
b = (uint8_t) ((sum_b + (m>>1)) / m);
} while (!same_color(r, g, b, rr, gg, bb) &&
(m = (n = m) << 1) <= MAX_SAMPLING);
}
image[3 * l + 0] = r;
image[3 * l + 1] = g;
image[3 * l + 2] = b;
}
#ifdef USE_MPI
MPI_Allreduce(MPI_IN_PLACE, image, 3 * WIDTH * HEIGHT * sizeof(uint8_t),
MPI_BYTE, MPI_BOR, MPI_COMM_WORLD);
#endif
return;
}
//----------------------------------------------------------------------
void rough_sketch(uint8_t *sketch, uint8_t *colormap, int nprocs, int myrank)
{ // draw rough sketch image.
#pragma omp parallel for schedule(dynamic,1)
for (size_t k = myrank; k < WIDTH * HEIGHT; k += nprocs) {
int i = k % WIDTH,
j = k / WIDTH;
real_t p_r = C_R + D * (i - WIDTH / 2),
p_i = C_I - D * (j - HEIGHT / 2);
int index = mandelbrot(p_r, p_i);
uint8_t r = colormap[3 * index + 0],
g = colormap[3 * index + 1],
b = colormap[3 * index + 2];
sketch[3 * k + 0] = r;
sketch[3 * k + 1] = g;
sketch[3 * k + 2] = b;
}
#ifdef USE_MPI
MPI_Allreduce(MPI_IN_PLACE, sketch, 3 * WIDTH * HEIGHT * sizeof(uint8_t),
MPI_BYTE, MPI_BOR, MPI_COMM_WORLD);
#endif
return;
}
//----------------------------------------------------------------------
bool detect_edge(uint8_t *image, uint8_t *r, uint8_t *g, uint8_t *b, int x, int y)
{ // detect whether (x,y) is edge or not.
*r = image[3 * (x + y * WIDTH) + 0];
*g = image[3 * (x + y * WIDTH) + 1];
*b = image[3 * (x + y * WIDTH) + 2];
for (int j = MAX(0, y-1); j <= MIN(HEIGHT-1, y+1); j++) {
for (int i = MAX(0, x-1); i <= MIN(WIDTH-1, x+1); i++) {
int rr = image[3 * (i + j * WIDTH) + 0],
gg = image[3 * (i + j * WIDTH) + 1],
bb = image[3 * (i + j * WIDTH) + 2];
if (!same_color(*r, *g, *b, rr, gg, bb)) {
return true;
}
}
}
return false;
}
//----------------------------------------------------------------------
bool same_color(uint8_t r1, uint8_t g1, uint8_t b1,
uint8_t r2, uint8_t g2, uint8_t b2)
#if 0
{ // detect whether (r1,g1,b1) and (r2,g2,b2) are same or not.
return r1 == r2 &&
g1 == g2 &&
b1 == b2;
}
#else //......................................
{ // detect whether (r1,g1,b1) and (r2,g2,b2) are same or not.
return 3 * abs((int) r1 - r2) +
6 * abs((int) g1 - g2) +
1 * abs((int) b1 - b2) < 15;
}
#endif
//----------------------------------------------------------------------
int mandelbrot(real_t p_r, real_t p_i)
{
int i;
real_t z_r, z_i, work;
z_r = p_r;
z_i = p_i;
work = 2.0 * z_r * z_i;
for (i = 1; i < MAX_ITER && (z_r *= z_r) +
(z_i *= z_i) < 4.0; i++) {
z_r += p_r - z_i ;
z_i = p_i + work;
work = 2.0 * z_r * z_i;
}
// convert #iter to index of the colormap
if (i &= MAX_ITER - 1) {
i &= COLORMAP_CYCLE - 1;
} else { // i == MAX_ITER
i = COLORMAP_CYCLE ;
}
return i;
}
//----------------------------------------------------------------------
void write_out_image(uint8_t *image, char *fname)
{ // write out image as a PPM file.
FILE *fp = NULL;
if ((fp = fopen(fname, "wb")) == NULL) {
perror(fname);
exit(EXIT_FAILURE);
}
fprintf(fp, "P6\n%d %d\n255\n", WIDTH, HEIGHT);
fwrite (image, sizeof(uint8_t), 3 * WIDTH * HEIGHT, fp);
if (ferror(fp)) {
perror(fname);
exit(EXIT_FAILURE);
}
fclose(fp);
return;
}
|
Texture.h | #ifndef _TEXTURE_H_
#define _TEXTURE_H_
//#include "Util.h"
//#include "Vector3.h"
#include "FreeImage/Dist/FreeImage.h"
#include <math.h>
#include <stdio.h>
class Texture
{
public:
Texture()
{
}
Texture( const Texture& sizeAndFormtGiver )
{
Allocate( sizeAndFormtGiver.GetWidth(), sizeAndFormtGiver.GetHeight(), sizeAndFormtGiver.m_type, FreeImage_GetBPP( sizeAndFormtGiver.m_img ) );
}
~Texture()
{
Deallocate();
}
bool Allocate(int w, int h, FREE_IMAGE_TYPE imgType = FIT_BITMAP, int bpp = 32)
{
Deallocate();
m_img = FreeImage_AllocateT(imgType, w, h, bpp);
if( m_img )
{
m_type = imgType;
}
return m_img != NULL;
}
void Deallocate()
{
if (m_img)
{
FreeImage_Unload(m_img);
m_img = NULL;
m_type = FIT_UNKNOWN;
}
}
bool Load(FREE_IMAGE_FORMAT format, const char* inFile)
{
Deallocate();
m_img = FreeImage_Load(format, inFile);
if (m_img == NULL)
{
fprintf(stderr, "Couldnt load image %s\n", inFile);
return false;
}
m_type = FreeImage_GetImageType( m_img );
return true;
}
bool Save(FREE_IMAGE_FORMAT format, const char* fileName) const
{
if (!m_img)
return false;
return FreeImage_Save(format, m_img, fileName) != 0;
}
bool GetPixel(unsigned int x, unsigned int y, void* value) const
{
if (!m_img)
return false;
if (m_type == FIT_BITMAP)
{
BOOL result;
{
//#pragma omp critical(GetPixel)
result = FreeImage_GetPixelColor(m_img, x, y, (RGBQUAD*)value);
}
return result != 0;
}
else if (m_type == FIT_FLOAT)
{
float* const row = (float*)FreeImage_GetScanLine(m_img, y);
if (!row)
return false;
*(float*)value = row[x];
return true;
}
else if( m_type == FIT_RGBAF )
{
float* const row = (float*)FreeImage_GetScanLine( m_img, y );
if( !row )
return false;
unsigned bpp = FreeImage_GetBPP( m_img );
const int CHANNELS = bpp / 32;
float* writePtr = static_cast<float*>(value);
for( int xi = 0; xi < CHANNELS; xi++ )
{
writePtr[xi] = row[x*CHANNELS + xi];
}
return true;
}
return false;
}
bool GetPixel_Bilin(float x, float y, void* value) const
{
if (!m_img)
return false;
int w = GetWidth(), h = GetHeight();
x *= float(w);
x -= 0.5f; // opengl has pixel centers at 0.5 positions
x = fmodf(x, float(w));
if (x < 0.0f) x += float(w);
int x1, x0 = int(x);
x -= float(x0);
x1 = (x0 + 1) % w;
y *= float(h);
y -= 0.5f; // opengl has pixel centers at 0.5 positions
y = fmodf(y, float(h));
if (y < 0.0f) y += float(h);
int y1, y0 = int(y);
y -= float(y0);
y1 = (y0 + 1) % h;
if (m_type == FIT_BITMAP)
{
RGBQUAD val00, val01, val10, val11;
if (
!GetPixel(x0, y0, &val00) ||
!GetPixel(x1, y0, &val01) ||
!GetPixel(x0, y1, &val10) ||
!GetPixel(x1, y1, &val11)
)
{
return false;
}
RGBQUAD& result = *(RGBQUAD*)value;
result.rgbBlue = (BYTE)(((1.0f - x) * val00.rgbBlue + x * val01.rgbBlue) * (1.0f - y)
+ ((1.0f - x) * val10.rgbBlue + x * val11.rgbBlue) * y);
result.rgbGreen = (BYTE)(((1.0f - x) * val00.rgbGreen + x * val01.rgbGreen) * (1.0f - y)
+ ((1.0f - x) * val10.rgbGreen + x * val11.rgbGreen) * y);
result.rgbRed = (BYTE)(((1.0f - x) * val00.rgbRed + x * val01.rgbRed) * (1.0f - y)
+ ((1.0f - x) * val10.rgbRed + x * val11.rgbRed) * y);
result.rgbReserved = (BYTE)(((1.0f - x) * val00.rgbReserved + x * val01.rgbReserved) * (1.0f - y)
+ ((1.0f - x) * val10.rgbReserved + x * val11.rgbReserved) * y);
return true;
}
else if (m_type == FIT_FLOAT)
{
float val00, val01, val10, val11;
if (
!GetPixel(x0, y0, &val00) ||
!GetPixel(x1, y0, &val01) ||
!GetPixel(x0, y1, &val10) ||
!GetPixel(x1, y1, &val11)
)
{
return false;
}
float& result = *(float*)value;
result = (((1.0f - x) * val00 + x * val01) * (1.0f - y)
+ ((1.0f - x) * val10 + x * val11) * y);
return true;
}
else if( m_type == FIT_RGBAF )
{
const int CHANNELS = 4;
float val00[CHANNELS], val01[CHANNELS], val10[CHANNELS], val11[CHANNELS];
if(
!GetPixel( x0, y0, val00 ) ||
!GetPixel( x1, y0, val01 ) ||
!GetPixel( x0, y1, val10 ) ||
!GetPixel( x1, y1, val11 )
)
{
return false;
}
float* result = (float*)value;
for( int i = 0; i < CHANNELS; i++ )
{
result[i] = (((1.0f - x) * val00[i] + x * val01[i]) * (1.0f - y)
+ ((1.0f - x) * val10[i] + x * val11[i]) * y);
}
return true;
}
return false;
}
//bool GetPixel_Bilin(float x, float y, Vector3& result) const
//{
// if (m_type == FIT_BITMAP)
// {
// RGBQUAD value;
// if (!GetPixel_Bilin(x, y, &value))
// return false;
// result.x = value.rgbRed / 255.0f;
// result.y = value.rgbGreen / 255.0f;
// result.z = value.rgbBlue / 255.0f;
// return true;
// }
// else if (m_type == FIT_FLOAT)
// {
// float value;
// if (!GetPixel_Bilin(x, y, &value))
// return false;
// result = value;
// return true;
// }
// return false;
//}
bool SetPixel(unsigned int x, unsigned int y, void* value)
{
if (!m_img)
return false;
if (m_type == FIT_BITMAP)
{
BOOL result;
{
//#pragma omp critical(SetPixel)
result = FreeImage_SetPixelColor(m_img, x, y, (RGBQUAD*)value);
}
return result != 0;
}
else if ( m_type == FIT_FLOAT )
{
float* const row = (float*)FreeImage_GetScanLine(m_img, y);
if (!row)
return false;
row[x] = *(float*)value;
return true;
}
else if( m_type == FIT_RGBAF )
{
float* const row = (float*)FreeImage_GetScanLine( m_img, y );
if( !row )
return false;
const int CHANNELS = 4;
for( int i = 0; i < CHANNELS; i++ )
row[x * CHANNELS + i] = ((float*)value)[i];
return true;
}
return false;
}
// bool SetPixel(unsigned int x, unsigned int y, Vector3& value)
// {
// if (m_img == NULL)
// return false;
//
// if (m_type == FIT_BITMAP)
// {
// value = saturate(value);
//
// RGBQUAD pixColour;
// pixColour.rgbRed = (BYTE)(value.x * 255.0f);
// pixColour.rgbGreen = (BYTE)(value.y * 255.0f);
// pixColour.rgbBlue = (BYTE)(value.z * 255.0f);
// pixColour.rgbReserved = 255;
//
// BOOL result;
// {
////#pragma omp critical(SetPixel_Vec3)
// result = FreeImage_SetPixelColor(m_img, x, y, &pixColour);
// }
// return result != 0;
// }
// else if (m_type == FIT_FLOAT)
// {
// float* const row = (float*)FreeImage_GetScanLine(m_img, y);
//
// if (!row)
// return false;
//
// row[x] = value.x;
//
// return true;
// }
//
// return false;
// }
int GetWidth() const { return m_img == NULL ? -1 : FreeImage_GetWidth(m_img); }
int GetHeight() const { return m_img == NULL ? -1 : FreeImage_GetHeight(m_img); }
bool IsValid() const { return m_img != NULL; }
private:
FIBITMAP* m_img = NULL;
FREE_IMAGE_TYPE m_type = FIT_UNKNOWN;
};
int FindXZDisplacementSourceUV( const Texture& i_dispTex, float i_dispTexHorizScale, float i_u, float i_v, float& o_u, float& o_v );
#endif //_TEXTURE_H_
|
no_option_no_warn.c | // RUN: %clang_cc1 -verify -Wno-source-uses-openmp -o - %s
// RUN: %clang_cc1 -verify -Wno-source-uses-openmp -o - %s
// SIMD-ONLY0-NOT: {{__kmpc|__tgt}}
// expected-no-diagnostics
int a;
#pragma omp threadprivate(a, b)
#pragma omp parallel
|
GB_unaryop__abs_fp64_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_fp64_bool
// op(A') function: GB_tran__abs_fp64_bool
// C type: double
// A type: bool
// cast: double cij = (double) aij
// unaryop: cij = fabs (aij)
#define GB_ATYPE \
bool
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = fabs (x) ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_FP64 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_fp64_bool
(
double *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_fp64_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
syncbench.c | /****************************************************************************
* *
* OpenMP MicroBenchmark Suite - Version 3.1 *
* *
* produced by *
* *
* Mark Bull, Fiona Reid and Nix Mc Donnell *
* *
* at *
* *
* Edinburgh Parallel Computing Centre *
* *
* email: markb@epcc.ed.ac.uk or fiona@epcc.ed.ac.uk *
* *
* *
* This version copyright (c) The University of Edinburgh, 2015. *
* *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. *
* You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* *
****************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#ifdef XRAY
#include <xray.h>
#endif
#include "common.h"
#include "syncbench.h"
omp_lock_t lock;
int main(int argc, char **argv) {
#ifdef XRAY
struct XRayTraceCapture* trace = XRayInit(
20, // max. call depth
16 * 1000 * 1000, // memory for report
13, // frame count
"syncbench.map");
#endif
// Start Paraver tracing
#ifdef PARAVERTRACE
Extrae_init();
#endif
init(argc, argv);
omp_init_lock(&lock);
#ifdef XRAY
/* GENERATE REFERENCE TIME */
XRayStartFrame(trace);
reference("reference time 1", &refer);
XRayEndFrame(trace);
/* TEST PARALLEL REGION */
XRayStartFrame(trace);
benchmark("PARALLEL", &testpr);
XRayEndFrame(trace);
/* TEST FOR */
XRayStartFrame(trace);
benchmark("FOR", &testfor);
XRayEndFrame(trace);
/* TEST PARALLEL FOR */
XRayStartFrame(trace);
benchmark("PARALLEL FOR", &testpfor);
XRayEndFrame(trace);
/* TEST BARRIER */
XRayStartFrame(trace);
benchmark("BARRIER", &testbar);
XRayEndFrame(trace);
/* TEST SINGLE */
XRayStartFrame(trace);
benchmark("SINGLE", &testsing);
XRayEndFrame(trace);
/* TEST CRITICAL*/
XRayStartFrame(trace);
benchmark("CRITICAL", &testcrit);
XRayEndFrame(trace);
/* TEST LOCK/UNLOCK */
XRayStartFrame(trace);
benchmark("LOCK/UNLOCK", &testlock);
XRayEndFrame(trace);
/* TEST ORDERED SECTION */
XRayStartFrame(trace);
benchmark("ORDERED", &testorder);
XRayEndFrame(trace);
/* GENERATE NEW REFERENCE TIME */
XRayStartFrame(trace);
reference("reference time 2", &referatom);
XRayEndFrame(trace);
/* TEST ATOMIC */
XRayStartFrame(trace);
benchmark("ATOMIC", &testatom);
XRayEndFrame(trace);
/* GENERATE NEW REFERENCE TIME */
XRayStartFrame(trace);
reference("reference time 3", &referred);
XRayEndFrame(trace);
/* TEST REDUCTION (1 var) */
XRayStartFrame(trace);
benchmark("REDUCTION", &testred);
XRayEndFrame(trace);
#endif
#ifdef PARAVERTRACE
Extrae_fini();
#endif
#ifdef XRAY
XRaySaveReport(trace,
"syncbench.xray", // report file
0.05f, // Only output funcs that have higher runtime [%]
1000); // Only output funcs that have higher runtime [cycles]
XRayShutdown(trace);
#endif
finalise();
return EXIT_SUCCESS;
}
void refer() {
int j;
for (j = 0; j < innerreps; j++) {
delay(delaylength);
}
}
void referatom(){
int j;
double aaaa = 0.0;
double epsilon = 1.0e-15;
double b, c;
b = 1.0;
c = (1.0 + epsilon);
for (j = 0; j < innerreps; j++) {
aaaa += b;
b *= c;
}
if (aaaa < 0.0)
printf("%f\n", aaaa);
}
void referred() {
int j;
int aaaa = 0;
for (j = 0; j < innerreps; j++) {
delay(delaylength);
aaaa += 1;
}
}
void testpr() {
int j;
#ifdef XRAY
static int n = 1;
XRayAnnotate("n = %i", n);
n++;
#endif
for (j = 0; j < innerreps; j++) {
#pragma omp parallel
{
delay(delaylength);
}
}
}
void testfor() {
int i, j;
#pragma omp parallel private(j)
{
for (j = 0; j < innerreps; j++) {
#pragma omp for
for (i = 0; i < nthreads; i++) {
delay(delaylength);
}
}
}
}
void testpfor() {
int i, j;
#ifdef XRAY
static int n = 1;
XRayAnnotate("n = %i", n);
n++;
#endif
for (j = 0; j < innerreps; j++) {
#pragma omp parallel for
for (i = 0; i < nthreads; i++) {
delay(delaylength);
}
}
}
void testbar() {
int j;
#pragma omp parallel private(j)
{
for (j = 0; j < innerreps; j++) {
delay(delaylength);
#pragma omp barrier
}
}
}
void testsing() {
int j;
#pragma omp parallel private(j)
{
for (j = 0; j < innerreps; j++) {
#pragma omp single
delay(delaylength);
}
}
}
void testcrit() {
int j;
#pragma omp parallel private(j)
{
for (j = 0; j < innerreps / nthreads; j++) {
#pragma omp critical
{
delay(delaylength);
}
}
}
}
void testlock() {
int j;
#pragma omp parallel private(j)
{
for (j = 0; j < innerreps / nthreads; j++) {
omp_set_lock(&lock);
delay(delaylength);
omp_unset_lock(&lock);
}
}
}
void testorder() {
int j;
#pragma omp parallel for ordered schedule (static,1)
for (j = 0; j < (int)innerreps; j++) {
#pragma omp ordered
delay(delaylength);
}
}
void testatom() {
int j;
double aaaa = 0.0;
double epsilon = 1.0e-15;
double b,c;
b = 1.0;
c = (1.0 + epsilon);
#pragma omp parallel private(j) firstprivate(b)
{
for (j = 0; j < innerreps / nthreads; j++) {
#pragma omp atomic
aaaa += b;
b *= c;
}
}
if (aaaa < 0.0)
printf("%f\n", aaaa);
}
void testred() {
int j;
int aaaa = 0;
for (j = 0; j < innerreps; j++) {
#pragma omp parallel reduction(+:aaaa)
{
delay(delaylength);
aaaa += 1;
}
}
}
|
pack_tril.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <string.h>
#include <complex.h>
#include "config.h"
#include "np_helper.h"
void NPdsymm_triu(int n, double *mat, int hermi)
{
size_t i, j, j0, j1;
if (hermi == HERMITIAN || hermi == SYMMETRIC) {
TRIU_LOOP(i, j) {
mat[i*n+j] = mat[j*n+i];
}
} else {
TRIU_LOOP(i, j) {
mat[i*n+j] = -mat[j*n+i];
}
}
}
void NPzhermi_triu(int n, double complex *mat, int hermi)
{
size_t i, j, j0, j1;
if (hermi == HERMITIAN) {
TRIU_LOOP(i, j) {
mat[i*n+j] = conj(mat[j*n+i]);
}
} else if (hermi == SYMMETRIC) {
TRIU_LOOP(i, j) {
mat[i*n+j] = mat[j*n+i];
}
} else {
TRIU_LOOP(i, j) {
mat[i*n+j] = -conj(mat[j*n+i]);
}
}
}
void NPdunpack_tril(int n, double *tril, double *mat, int hermi)
{
size_t i, j, ij;
for (ij = 0, i = 0; i < n; i++) {
for (j = 0; j <= i; j++, ij++) {
mat[i*n+j] = tril[ij];
}
}
if (hermi) {
NPdsymm_triu(n, mat, hermi);
}
}
// unpack one row from the compact matrix-tril coefficients
void NPdunpack_row(int ndim, int row_id, double *tril, double *row)
{
int i;
size_t idx = (size_t)row_id * (row_id + 1) / 2;
memcpy(row, tril+idx, sizeof(double)*row_id);
for (i = row_id; i < ndim; i++) {
idx += i;
row[i] = tril[idx];
}
}
void NPzunpack_tril(int n, double complex *tril, double complex *mat,
int hermi)
{
size_t i, j, ij;
for (ij = 0, i = 0; i < n; i++) {
for (j = 0; j <= i; j++, ij++) {
mat[i*n+j] = tril[ij];
}
}
if (hermi) {
NPzhermi_triu(n, mat, hermi);
}
}
void NPdpack_tril(int n, double *tril, double *mat)
{
size_t i, j, ij;
for (ij = 0, i = 0; i < n; i++) {
for (j = 0; j <= i; j++, ij++) {
tril[ij] = mat[i*n+j];
}
}
}
void NPzpack_tril(int n, double complex *tril, double complex *mat)
{
size_t i, j, ij;
for (ij = 0, i = 0; i < n; i++) {
for (j = 0; j <= i; j++, ij++) {
tril[ij] = mat[i*n+j];
}
}
}
/* out += in[idx[:,None],idy] */
void NPdtake_2d(double *out, double *in, int *idx, int *idy,
int odim, int idim, int nx, int ny)
{
#pragma omp parallel default(none) \
shared(out, in, idx,idy, odim, idim, nx, ny)
{
size_t i, j;
double *pin;
#pragma omp for schedule (static)
for (i = 0; i < nx; i++) {
pin = in + (size_t)idim * idx[i];
for (j = 0; j < ny; j++) {
out[i*odim+j] = pin[idy[j]];
}
}
}
}
void NPztake_2d(double complex *out, double complex *in, int *idx, int *idy,
int odim, int idim, int nx, int ny)
{
#pragma omp parallel default(none) \
shared(out, in, idx,idy, odim, idim, nx, ny)
{
size_t i, j;
double complex *pin;
#pragma omp for schedule (static)
for (i = 0; i < nx; i++) {
pin = in + (size_t)idim * idx[i];
for (j = 0; j < ny; j++) {
out[i*odim+j] = pin[idy[j]];
}
}
}
}
/* out[idx[:,None],idy] += in */
void NPdtakebak_2d(double *out, double *in, int *idx, int *idy,
int odim, int idim, int nx, int ny)
{
#pragma omp parallel default(none) \
shared(out, in, idx,idy, odim, idim, nx, ny)
{
size_t i, j;
double *pout;
#pragma omp for schedule (static)
for (i = 0; i < nx; i++) {
pout = out + (size_t)odim * idx[i];
for (j = 0; j < ny; j++) {
pout[idy[j]] += in[i*idim+j];
}
}
}
}
void NPztakebak_2d(double complex *out, double complex *in, int *idx, int *idy,
int odim, int idim, int nx, int ny)
{
#pragma omp parallel default(none) \
shared(out, in, idx,idy, odim, idim, nx, ny)
{
size_t i, j;
double complex *pout;
#pragma omp for schedule (static)
for (i = 0; i < nx; i++) {
pout = out + (size_t)odim * idx[i];
for (j = 0; j < ny; j++) {
pout[idy[j]] += in[i*idim+j];
}
}
}
}
void NPdunpack_tril_2d(int count, int n, double *tril, double *mat, int hermi)
{
#pragma omp parallel default(none) \
shared(count, n, tril, mat, hermi)
{
int ic;
size_t nn = n * n;
size_t n2 = n*(n+1)/2;
#pragma omp for schedule (static)
for (ic = 0; ic < count; ic++) {
NPdunpack_tril(n, tril+n2*ic, mat+nn*ic, hermi);
}
}
}
void NPzunpack_tril_2d(int count, int n,
double complex *tril, double complex *mat, int hermi)
{
#pragma omp parallel default(none) \
shared(count, n, tril, mat, hermi)
{
int ic;
size_t nn = n * n;
size_t n2 = n*(n+1)/2;
#pragma omp for schedule (static)
for (ic = 0; ic < count; ic++) {
NPzunpack_tril(n, tril+n2*ic, mat+nn*ic, hermi);
}
}
}
void NPdpack_tril_2d(int count, int n, double *tril, double *mat)
{
#pragma omp parallel default(none) \
shared(count, n, tril, mat)
{
int ic;
size_t nn = n * n;
size_t n2 = n*(n+1)/2;
#pragma omp for schedule (static)
for (ic = 0; ic < count; ic++) {
NPdpack_tril(n, tril+n2*ic, mat+nn*ic);
}
}
}
void NPzpack_tril_2d(int count, int n, double complex *tril, double complex *mat)
{
#pragma omp parallel default(none) \
shared(count, n, tril, mat)
{
int ic;
size_t nn = n * n;
size_t n2 = n*(n+1)/2;
#pragma omp for schedule (static)
for (ic = 0; ic < count; ic++) {
NPzpack_tril(n, tril+n2*ic, mat+nn*ic);
}
}
}
|
omp_bug6.c | /******************************************************************************
* FILE: omp_bug6.c
* DESCRIPTION:
* Fails compilation in most cases.
* Compare to omp_orphan.c.
* AUTHOR: Blaise Barney 6/05
* LAST REVISED: 06/30/05
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define VECLEN 100
float a[VECLEN], b[VECLEN];
float dotprod ()
{
int i,tid;
float sum;
tid = omp_get_thread_num();
#pragma omp for reduction(+:sum)
for (i=0; i < VECLEN; i++) {
sum = sum + (a[i]*b[i]);
printf(" tid= %d i=%d\n",tid,i);
}
}
int main (int argc, char *argv[]) {
int i;
float sum;
for (i=0; i < VECLEN; i++)
a[i] = b[i] = 1.0 * i;
sum = 0.0;
#pragma omp parallel shared(sum)
dotprod();
printf("Sum = %f\n",sum);
}
|
kij_optimize.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int A_row;
int A_col;
int B_row;
int B_col;
int **constructMatrix(int row, int col){
int **matrix = (int **)malloc(sizeof(int *) * row);
for (int i = 0; i < row;i++){
matrix[i] = (int *)malloc(sizeof(int) * col);
}
return matrix;
}
void freeMatrix(int **matrix, int row, int col){
for (int i = 0; i < row;i++){
free(matrix[i]);
}
free(matrix);
}
int main(int argc, char *argv[]){
A_row = atoi(*(argv + 1));
A_col = atoi(*(argv + 2));
B_row = atoi(*(argv + 3));
B_col = atoi(*(argv + 4));
int number_of_threads = atoi(*(argv + 5));
FILE *input = fopen("matrix", "r");
int **A = constructMatrix(A_row, A_col);
int **B = constructMatrix(B_row, B_col);
int **C = constructMatrix(A_row, B_col);
//read A
for (int i = 0; i < A_row;i++){
for (int j = 0; j < A_col;j++){
fscanf(input, "%d", &A[i][j]);
}
}
//read B
for (int i = 0; i < B_row;i++){
for (int j = 0; j < B_col;j++){
fscanf(input, "%d", &B[i][j]);
}
}
fclose(input);
double start_time = omp_get_wtime();
//multiply:
int i, j, k;
int temp;
#pragma omp parallel for shared(A,B,C) private(i,j,k,temp) num_threads(number_of_threads)
for (k = 0; k < A_col;k++){
for (i = 0; i < A_row;i++){
temp = A[i][k];
for (j = 0; j < B_col;j++){
#pragma omp atomic
C[i][j] += temp * B[k][j];
}
}
}
double end_time = omp_get_wtime();
printf("%s: %g sec.\n", "kij_optimize_runtime", end_time - start_time);
//output the result to compare with golden result
FILE *out = fopen("kij_optimize_result", "w");
for (int i = 0; i < A_row;i++){
for (int j = 0; j < B_col;j++){
fprintf(out, "%d ", C[i][j]);
}
fprintf(out, "\n");
}
fprintf(out, "\n");
fclose(out);
freeMatrix(A, A_row, A_col);
freeMatrix(B, B_row, B_col);
freeMatrix(C, A_row, B_col);
return 0;
} |
fac_restrict2.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision: 2.19 $
***********************************************************************EHEADER*/
/******************************************************************************
* OpenMP Problems
*
* Are private static arrays a problem?
*
******************************************************************************/
/******************************************************************************
* FAC composite level restriction.
* Injection away from the refinement patches; constant restriction
* inside patch.
******************************************************************************/
#include "_hypre_sstruct_ls.h"
#include "fac.h"
#define MapCellRank(i, j , k, rank) \
{ \
rank = 4*k + 2*j + i; \
}
#define InverseMapCellRank(rank, stencil) \
{ \
HYPRE_Int ij,ii,jj,kk; \
ij = (rank%4); \
ii = (ij%2); \
jj = (ij-ii)/2; \
kk = (rank-2*jj-ii)/4; \
hypre_SetIndex(stencil, ii, jj, kk); \
}
/*--------------------------------------------------------------------------
* hypre_FacSemiRestrictData data structure
*--------------------------------------------------------------------------*/
typedef struct
{
HYPRE_Int nvars;
hypre_Index stride;
hypre_SStructPVector *fgrid_cvectors; /* the grid of this vector may not
be on the actual grid */
hypre_BoxArrayArray **identity_arrayboxes;
hypre_BoxArrayArray **fullwgt_ownboxes;
hypre_BoxArrayArray **fullwgt_sendboxes;
HYPRE_Int ***own_cboxnums; /* local crs boxnums of ownboxes */
hypre_CommPkg **interlevel_comm;
/* hypre_CommPkg **intralevel_comm;*/ /* may need to build an intra comm so
that each processor only fullwts its
own fine data- may need to add contrib */
} hypre_FacSemiRestrictData2;
/*--------------------------------------------------------------------------
* hypre_FacSemiRestrictCreate
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_FacSemiRestrictCreate2( void **fac_restrict_vdata_ptr)
{
HYPRE_Int ierr = 0;
hypre_FacSemiRestrictData2 *fac_restrict_data;
fac_restrict_data = hypre_CTAlloc(hypre_FacSemiRestrictData2, 1);
*fac_restrict_vdata_ptr = (void *) fac_restrict_data;
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_FacSemiRestrictSetup:
* Two types of communication are needed- one for the interlevel coarsened
* fine boxes, and the other for the ghostlayer of the restricted vector.
*
* Approach: Identity away from the patches & fullweighting in a patch.
* Since a fbox may not have the desired mapping
* fbox= [a_0, a_1, a_2]x [b_0, b_1, b_2], a_i= c_i*rfactor[i]
* b_i= f_i*rfactor[i] + g_i
* with g_i= (rfactor[i]-1), attention must be paid to what the own_boxes,
* send_boxes, and recv_boxes are. These map overlap. The reason:
* myproc fullwgts what it can or equivalently, gets the restriction
* contributions of its data. Some off_procs can compute the remaining
* part of the agglomerate belonging to myproc and communicate it to myproc.
* Hence, myproc's own_boxes contains these nodes as well as myproc's
* recv_boxes.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_FacSemiRestrictSetup2( void *fac_restrict_vdata,
hypre_SStructVector *r,
HYPRE_Int part_crse,
HYPRE_Int part_fine,
hypre_SStructPVector *rc,
hypre_Index rfactors )
{
HYPRE_Int ierr = 0;
hypre_FacSemiRestrictData2 *fac_restrict_data = fac_restrict_vdata;
MPI_Comm comm= hypre_SStructPVectorComm(rc);
hypre_CommInfo *comm_info;
hypre_CommPkg **interlevel_comm;
hypre_SStructPVector *rf= hypre_SStructVectorPVector(r, part_fine);
hypre_StructVector *s_rc, *s_cvector;
hypre_SStructPGrid *pgrid;
hypre_SStructPVector *fgrid_cvectors;
hypre_SStructPGrid *fgrid_coarsen;
hypre_BoxArrayArray **identity_arrayboxes;
hypre_BoxArrayArray **fullwgt_ownboxes;
hypre_BoxArrayArray **fullwgt_sendboxes;
hypre_BoxArray *boxarray;
hypre_BoxArray *tmp_boxarray, *intersect_boxes;
HYPRE_Int ***own_cboxnums;
hypre_BoxArrayArray **send_boxes, *send_rboxes;
HYPRE_Int ***send_processes;
HYPRE_Int ***send_remote_boxnums;
hypre_BoxArrayArray **recv_boxes, *recv_rboxes;
HYPRE_Int ***recv_processes;
HYPRE_Int ***recv_remote_boxnums;
hypre_BoxManager *boxman;
hypre_BoxManEntry **boxman_entries;
HYPRE_Int nboxman_entries;
hypre_Box box, scaled_box;
hypre_Index zero_index, index, ilower, iupper;
HYPRE_Int ndim= hypre_SStructVectorNDim(r);
HYPRE_Int myproc, proc;
HYPRE_Int nvars, vars;
HYPRE_Int num_values;
HYPRE_Int i, cnt1, cnt2;
HYPRE_Int fi, ci;
hypre_MPI_Comm_rank(comm, &myproc);
hypre_ClearIndex(zero_index);
nvars= hypre_SStructPVectorNVars(rc);
(fac_restrict_data -> nvars)= nvars;
hypre_CopyIndex(rfactors, (fac_restrict_data -> stride));
for (i= ndim; i< 3; i++)
{
rfactors[i]= 1;
}
/* work vector for storing the fullweighted fgrid boxes */
hypre_SStructPGridCreate(hypre_SStructPVectorComm(rf), ndim, &fgrid_coarsen);
pgrid= hypre_SStructPVectorPGrid(rf);
for (vars= 0; vars< nvars; vars++)
{
boxarray= hypre_StructGridBoxes(hypre_SStructPGridSGrid(pgrid, vars));
hypre_ForBoxI(fi, boxarray)
{
hypre_CopyBox(hypre_BoxArrayBox(boxarray, fi), &box);
hypre_StructMapFineToCoarse(hypre_BoxIMin(&box), zero_index,
rfactors, hypre_BoxIMin(&box));
hypre_StructMapFineToCoarse(hypre_BoxIMax(&box), zero_index,
rfactors, hypre_BoxIMax(&box));
hypre_SStructPGridSetExtents(fgrid_coarsen,
hypre_BoxIMin(&box),
hypre_BoxIMax(&box));
}
}
hypre_SStructPGridSetVariables( fgrid_coarsen, nvars,
hypre_SStructPGridVarTypes(pgrid) );
hypre_SStructPGridAssemble(fgrid_coarsen);
hypre_SStructPVectorCreate(hypre_SStructPGridComm(fgrid_coarsen), fgrid_coarsen,
&fgrid_cvectors);
hypre_SStructPVectorInitialize(fgrid_cvectors);
hypre_SStructPVectorAssemble(fgrid_cvectors);
/* pgrid fgrid_coarsen no longer needed */
hypre_SStructPGridDestroy(fgrid_coarsen);
fac_restrict_data -> fgrid_cvectors= fgrid_cvectors;
/*--------------------------------------------------------------------------
* boxes that are not underlying a fine box:
*
* algorithm: subtract all coarsened fine grid boxes that intersect with
* this processor's coarse boxes. Note that we cannot loop over all the
* coarsened fine boxes and subtract them from the coarse grid since we do
* not know if some of the overlying fine boxes belong on another
* processor. For each cbox, we get a boxarray of boxes that are not
* underlying-> size(identity_arrayboxes[vars])= #cboxes.
*
* Note that no contraction is needed for the intersect boxes since they
* will be subtracted from the cbox. Contraction can erroneously lead
* to bigger identity boxes.
*--------------------------------------------------------------------------*/
identity_arrayboxes= hypre_CTAlloc(hypre_BoxArrayArray *, nvars);
pgrid= hypre_SStructPVectorPGrid(rc);
hypre_ClearIndex(index);
for (i= 0; i< ndim; i++)
{
index[i]= rfactors[i]-1;
}
tmp_boxarray = hypre_BoxArrayCreate(0);
for (vars= 0; vars< nvars; vars++)
{
boxman= hypre_SStructGridBoxManager(hypre_SStructVectorGrid(r),
part_fine, vars);
boxarray= hypre_StructGridBoxes(hypre_SStructPGridSGrid(pgrid, vars));
identity_arrayboxes[vars]= hypre_BoxArrayArrayCreate(hypre_BoxArraySize(boxarray));
hypre_ForBoxI(ci, boxarray)
{
hypre_CopyBox(hypre_BoxArrayBox(boxarray, ci), &box);
hypre_AppendBox(&box,
hypre_BoxArrayArrayBoxArray(identity_arrayboxes[vars], ci));
hypre_StructMapCoarseToFine(hypre_BoxIMin(&box), zero_index,
rfactors, hypre_BoxIMin(&scaled_box));
hypre_StructMapCoarseToFine(hypre_BoxIMax(&box), index,
rfactors, hypre_BoxIMax(&scaled_box));
hypre_BoxManIntersect(boxman, hypre_BoxIMin(&scaled_box),
hypre_BoxIMax(&scaled_box), &boxman_entries,
&nboxman_entries);
/* all send and coarsened fboxes on this processor are collected */
intersect_boxes= hypre_BoxArrayCreate(0);
for (i= 0; i< nboxman_entries; i++)
{
hypre_BoxManEntryGetExtents(boxman_entries[i], ilower, iupper);
hypre_BoxSetExtents(&box, ilower, iupper);
hypre_IntersectBoxes(&box, &scaled_box, &box);
hypre_StructMapFineToCoarse(hypre_BoxIMin(&box), zero_index,
rfactors, hypre_BoxIMin(&box));
hypre_StructMapFineToCoarse(hypre_BoxIMax(&box), zero_index,
rfactors, hypre_BoxIMax(&box));
hypre_AppendBox(&box, intersect_boxes);
}
hypre_SubtractBoxArrays(hypre_BoxArrayArrayBoxArray(identity_arrayboxes[vars], ci),
intersect_boxes, tmp_boxarray);
hypre_MinUnionBoxes(hypre_BoxArrayArrayBoxArray(identity_arrayboxes[vars], ci));
hypre_TFree(boxman_entries);
hypre_BoxArrayDestroy(intersect_boxes);
}
}
hypre_BoxArrayDestroy(tmp_boxarray);
fac_restrict_data -> identity_arrayboxes= identity_arrayboxes;
/*--------------------------------------------------------------------------
* fboxes that are coarsened. Some will be sent. We create the communication
* pattern. For each fbox, we need a boxarray of sendboxes or ownboxes.
*
* Algorithm: Coarsen each fbox and see which cboxes it intersects using
* BoxManIntersect. Cboxes that do not belong on the processor will have
* a chunk sent to it.
*
* Note that no contraction is needed. Contraction can lead to erroneous
* send_boxes.
*--------------------------------------------------------------------------*/
interlevel_comm= hypre_CTAlloc(hypre_CommPkg *, nvars);
fullwgt_sendboxes= hypre_CTAlloc(hypre_BoxArrayArray *, nvars);
fullwgt_ownboxes= hypre_CTAlloc(hypre_BoxArrayArray *, nvars);
own_cboxnums= hypre_CTAlloc(HYPRE_Int **, nvars);
send_boxes= hypre_CTAlloc(hypre_BoxArrayArray *, nvars);
send_processes= hypre_CTAlloc(HYPRE_Int **, nvars);
send_remote_boxnums= hypre_CTAlloc(HYPRE_Int **, nvars);
pgrid= hypre_SStructPVectorPGrid(rf);
for (vars= 0; vars< nvars; vars++)
{
boxman= hypre_SStructGridBoxManager(hypre_SStructVectorGrid(r),
part_crse, vars);
boxarray= hypre_StructGridBoxes(hypre_SStructPGridSGrid(pgrid, vars));
fullwgt_sendboxes[vars]= hypre_BoxArrayArrayCreate(hypre_BoxArraySize(boxarray));
fullwgt_ownboxes[vars] = hypre_BoxArrayArrayCreate(hypre_BoxArraySize(boxarray));
own_cboxnums[vars] = hypre_CTAlloc(HYPRE_Int *, hypre_BoxArraySize(boxarray));
send_boxes[vars] = hypre_BoxArrayArrayCreate(hypre_BoxArraySize(boxarray));
send_processes[vars] = hypre_CTAlloc(HYPRE_Int *, hypre_BoxArraySize(boxarray));
send_remote_boxnums[vars]= hypre_CTAlloc(HYPRE_Int *, hypre_BoxArraySize(boxarray));
hypre_ForBoxI(fi, boxarray)
{
hypre_CopyBox(hypre_BoxArrayBox(boxarray, fi), &box);
hypre_StructMapFineToCoarse(hypre_BoxIMin(&box), zero_index,
rfactors, hypre_BoxIMin(&scaled_box));
hypre_StructMapFineToCoarse(hypre_BoxIMax(&box), zero_index,
rfactors, hypre_BoxIMax(&scaled_box));
hypre_BoxManIntersect(boxman, hypre_BoxIMin(&scaled_box),
hypre_BoxIMax(&scaled_box), &boxman_entries, &nboxman_entries);
cnt1= 0; cnt2= 0;
for (i= 0; i< nboxman_entries; i++)
{
hypre_SStructBoxManEntryGetProcess(boxman_entries[i], &proc);
if (proc != myproc)
{
cnt1++;
}
else
{
cnt2++;
}
}
send_processes[vars][fi] = hypre_CTAlloc(HYPRE_Int, cnt1);
send_remote_boxnums[vars][fi]= hypre_CTAlloc(HYPRE_Int, cnt1);
own_cboxnums[vars][fi] = hypre_CTAlloc(HYPRE_Int, cnt2);
cnt1= 0; cnt2= 0;
for (i= 0; i< nboxman_entries; i++)
{
hypre_BoxManEntryGetExtents(boxman_entries[i], ilower, iupper);
hypre_BoxSetExtents(&box, ilower, iupper);
hypre_IntersectBoxes(&box, &scaled_box, &box);
hypre_SStructBoxManEntryGetProcess(boxman_entries[i], &proc);
if (proc != myproc)
{
hypre_AppendBox(&box,
hypre_BoxArrayArrayBoxArray(fullwgt_sendboxes[vars], fi));
hypre_AppendBox(&box,
hypre_BoxArrayArrayBoxArray(send_boxes[vars], fi));
send_processes[vars][fi][cnt1]= proc;
hypre_SStructBoxManEntryGetBoxnum(boxman_entries[i],
&send_remote_boxnums[vars][fi][cnt1]);
cnt1++;
}
else
{
hypre_AppendBox(&box,
hypre_BoxArrayArrayBoxArray(fullwgt_ownboxes[vars], fi));
hypre_SStructBoxManEntryGetBoxnum(boxman_entries[i],
&own_cboxnums[vars][fi][cnt2]);
cnt2++;
}
}
hypre_TFree(boxman_entries);
} /* hypre_ForBoxI(fi, boxarray) */
} /* for (vars= 0; vars< nvars; vars++) */
(fac_restrict_data -> fullwgt_sendboxes)= fullwgt_sendboxes;
(fac_restrict_data -> fullwgt_ownboxes)= fullwgt_ownboxes;
(fac_restrict_data -> own_cboxnums)= own_cboxnums;
/*--------------------------------------------------------------------------
* coarsened fboxes this processor will receive.
*
* Algorithm: For each cbox on this processor, refine it and find which
* processors the refinement belongs in. The processors owning a chunk
* are the recv_processors.
*--------------------------------------------------------------------------*/
recv_boxes= hypre_CTAlloc(hypre_BoxArrayArray *, nvars);
recv_processes= hypre_CTAlloc(HYPRE_Int **, nvars);
/* dummy pointer for CommInfoCreate */
recv_remote_boxnums= hypre_CTAlloc(HYPRE_Int **, nvars);
pgrid= hypre_SStructPVectorPGrid(rc);
for (vars= 0; vars< nvars; vars++)
{
boxman= hypre_SStructGridBoxManager(hypre_SStructVectorGrid(r),
part_fine, vars);
boxarray= hypre_StructGridBoxes(hypre_SStructPGridSGrid(pgrid, vars));
recv_boxes[vars] = hypre_BoxArrayArrayCreate(hypre_BoxArraySize(boxarray));
recv_processes[vars]= hypre_CTAlloc(HYPRE_Int *, hypre_BoxArraySize(boxarray));
recv_remote_boxnums[vars]= hypre_CTAlloc(HYPRE_Int *, hypre_BoxArraySize(boxarray));
hypre_ForBoxI(ci, boxarray)
{
hypre_CopyBox(hypre_BoxArrayBox(boxarray, ci), &box);
hypre_StructMapCoarseToFine(hypre_BoxIMin(&box), zero_index,
rfactors, hypre_BoxIMin(&scaled_box));
hypre_StructMapCoarseToFine(hypre_BoxIMax(&box), index,
rfactors, hypre_BoxIMax(&scaled_box));
hypre_BoxManIntersect(boxman, hypre_BoxIMin(&scaled_box),
hypre_BoxIMax(&scaled_box), &boxman_entries, &nboxman_entries);
cnt1= 0;
for (i= 0; i< nboxman_entries; i++)
{
hypre_SStructBoxManEntryGetProcess(boxman_entries[i], &proc);
if (proc != myproc)
{
cnt1++;
}
}
recv_processes[vars][ci]= hypre_CTAlloc(HYPRE_Int, cnt1);
recv_remote_boxnums[vars][ci]= hypre_CTAlloc(HYPRE_Int , cnt1);
cnt1= 0;
for (i= 0; i< nboxman_entries; i++)
{
hypre_SStructBoxManEntryGetProcess(boxman_entries[i], &proc);
if (proc != myproc)
{
hypre_BoxManEntryGetExtents(boxman_entries[i], ilower, iupper);
hypre_BoxSetExtents(&box, ilower, iupper);
hypre_IntersectBoxes(&box, &scaled_box, &box);
/* no contracting neede */
hypre_StructMapFineToCoarse(hypre_BoxIMin(&box), zero_index,
rfactors, hypre_BoxIMin(&box));
hypre_StructMapFineToCoarse(hypre_BoxIMax(&box), zero_index,
rfactors, hypre_BoxIMax(&box));
hypre_AppendBox(&box,
hypre_BoxArrayArrayBoxArray(recv_boxes[vars], ci));
recv_processes[vars][ci][cnt1]= proc;
cnt1++;
} /* if (proc != myproc) */
} /* for (i= 0; i< nmap_entries; i++) */
hypre_TFree(boxman_entries);
} /* hypre_ForBoxI(ci, boxarray) */
} /* for (vars= 0; vars< nvars; vars++) */
num_values= 1;
for (vars= 0; vars< nvars; vars++)
{
s_rc = hypre_SStructPVectorSVector(rc, vars);
s_cvector= hypre_SStructPVectorSVector(fgrid_cvectors, vars);
send_rboxes= hypre_BoxArrayArrayDuplicate(send_boxes[vars]);
recv_rboxes= hypre_BoxArrayArrayDuplicate(recv_boxes[vars]);
hypre_CommInfoCreate(send_boxes[vars], recv_boxes[vars],
send_processes[vars], recv_processes[vars],
send_remote_boxnums[vars], recv_remote_boxnums[vars],
send_rboxes, recv_rboxes, 1, &comm_info);
hypre_CommPkgCreate(comm_info,
hypre_StructVectorDataSpace(s_cvector),
hypre_StructVectorDataSpace(s_rc),
num_values, NULL, 0,
hypre_StructVectorComm(s_rc),
&interlevel_comm[vars]);
hypre_CommInfoDestroy(comm_info);
}
hypre_TFree(send_boxes);
hypre_TFree(recv_boxes);
hypre_TFree(send_processes);
hypre_TFree(recv_processes);
hypre_TFree(send_remote_boxnums);
hypre_TFree(recv_remote_boxnums);
(fac_restrict_data -> interlevel_comm)= interlevel_comm;
return ierr;
}
HYPRE_Int
hypre_FACRestrict2( void * fac_restrict_vdata,
hypre_SStructVector * xf,
hypre_SStructPVector * xc)
{
HYPRE_Int ierr = 0;
hypre_FacSemiRestrictData2 *restrict_data = fac_restrict_vdata;
hypre_SStructPVector *fgrid_cvectors = restrict_data->fgrid_cvectors;
hypre_BoxArrayArray **identity_arrayboxes= restrict_data->identity_arrayboxes;
hypre_BoxArrayArray **fullwgt_ownboxes = restrict_data->fullwgt_ownboxes;
HYPRE_Int ***own_cboxnums = restrict_data->own_cboxnums;
hypre_CommPkg **interlevel_comm= restrict_data-> interlevel_comm;
hypre_CommHandle *comm_handle;
HYPRE_Int ndim = hypre_SStructVectorNDim(xf);
hypre_BoxArrayArray *arrayarray_ownboxes;
hypre_IndexRef stride; /* refinement factors */
hypre_StructGrid *fgrid;
hypre_BoxArray *fgrid_boxes;
hypre_Box *fgrid_box;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
hypre_Box *cgrid_box;
hypre_BoxArray *own_boxes;
hypre_Box *own_box;
HYPRE_Int *boxnums;
hypre_Box *xc_temp_dbox;
hypre_Box *xf_dbox;
hypre_StructVector *xc_temp;
hypre_StructVector *xc_var;
hypre_StructVector *xf_var;
HYPRE_Int xci;
HYPRE_Int xfi;
double ***xfp;
double ***xcp;
double ***xcp_temp;
hypre_Index loop_size, lindex;
hypre_Index start, fbox_size, node_offset;
hypre_Index startc;
hypre_Index stridec;
hypre_Index rfactors;
hypre_Index temp_index1, temp_index2;
HYPRE_Int fi, ci;
HYPRE_Int nvars, var;
HYPRE_Int volume_crse_cell;
HYPRE_Int i, j, k;
HYPRE_Int imax, jmax, kmax;
HYPRE_Int icell, jcell, kcell, ijkcell;
double *sum;
double scaling;
HYPRE_Int part_crse= 0;
HYPRE_Int part_fine= 1;
HYPRE_Int num_coarse_cells;
/*-----------------------------------------------------------------------
* Initialize some things
*-----------------------------------------------------------------------*/
stride= (restrict_data -> stride);
hypre_ClearIndex(stridec);
for (i= 0; i< ndim; i++)
{
stridec[i]= 1;
}
hypre_CopyIndex(stride, rfactors);
for (i= ndim; i< 3; i++)
{
rfactors[i]= 1;
}
volume_crse_cell= 1;
for (i= 0; i< ndim; i++)
{
volume_crse_cell*= rfactors[i];
}
/*-----------------------------------------------------------------------
* We are assuming the refinement and coarsening have same variable
* types.
*-----------------------------------------------------------------------*/
nvars= hypre_SStructPVectorNVars(xc);
/*-----------------------------------------------------------------------
* For each coordinate direction, a fine node can contribute only to the
* left or right cell=> only 2 coarse cells per direction.
*-----------------------------------------------------------------------*/
num_coarse_cells= 1;
for (i= 0; i< ndim; i++)
{
num_coarse_cells*= 2;
}
sum= hypre_CTAlloc(double, num_coarse_cells);
/*--------------------------------------------------------------------------
* Scaling for averaging restriction.
*--------------------------------------------------------------------------*/
scaling= 1.0;
for (i= 0; i< ndim-2; i++)
{
scaling*= rfactors[0];
}
/*-----------------------------------------------------------------------
* Initialize the coarse vector to zero.
*-----------------------------------------------------------------------*/
hypre_SStructPVectorSetConstantValues(xc, 0.0);
/*-----------------------------------------------------------------------
* Copy the coarse data: xf[part_crse] -> xc
*-----------------------------------------------------------------------*/
hypre_SStructPartialPCopy(hypre_SStructVectorPVector(xf, part_crse),
xc, identity_arrayboxes);
/*-----------------------------------------------------------------------
* Piecewise constant restriction over the refinement patch.
*
* Initialize the work vector by setting to zero.
*-----------------------------------------------------------------------*/
hypre_SStructPVectorSetConstantValues(fgrid_cvectors, 0.0);
/*-----------------------------------------------------------------------
* Allocate memory for the data pointers. Assuming constant restriction.
* We stride through the refinement patch by the refinement factors, and
* so we must have pointers to the intermediate fine nodes=> xfp will
* be size rfactors[2]*rfactors[1]. Because the fbox may not have the
* ideal refinement form, we need to contribute to 2^ndim cells.
*-----------------------------------------------------------------------*/
if (ndim > 1)
{
xcp_temp= hypre_TAlloc(double **, (ndim-1));
xcp = hypre_TAlloc(double **, (ndim-1));
for (k= 0; k< (ndim-1); k++)
{
xcp_temp[k]= hypre_TAlloc(double *, 2);
xcp[k] = hypre_TAlloc(double *, 2);
}
}
else /* 1d does not really require these double ptrs */
{
xcp_temp = hypre_TAlloc(double **, 1);
xcp = hypre_TAlloc(double **, 1);
xcp_temp[0]= hypre_TAlloc(double *, 1);
xcp[0] = hypre_TAlloc(double *, 1);
}
/* memory allocation of xfp is okay for all dimensions */
xfp= hypre_TAlloc(double **, rfactors[2]);
for (k= 0; k< rfactors[2]; k++)
{
xfp[k]= hypre_TAlloc(double *, rfactors[1]);
}
for (var= 0; var< nvars; var++)
{
xc_temp= hypre_SStructPVectorSVector(fgrid_cvectors, var);
xf_var= hypre_SStructPVectorSVector(hypre_SStructVectorPVector(xf,part_fine),
var);
fgrid = hypre_StructVectorGrid(xf_var);
fgrid_boxes = hypre_StructGridBoxes(fgrid);
cgrid = hypre_StructVectorGrid(xc_temp);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
hypre_ForBoxI(fi, fgrid_boxes)
{
fgrid_box= hypre_BoxArrayBox(fgrid_boxes, fi);
/*--------------------------------------------------------------------
* Get the ptrs for the fine struct_vectors.
*--------------------------------------------------------------------*/
xf_dbox = hypre_BoxArrayBox(hypre_StructVectorDataSpace(xf_var), fi);
for (k= 0; k< rfactors[2]; k++)
{
for (j=0; j< rfactors[1]; j++)
{
hypre_SetIndex(temp_index1, 0, j, k);
xfp[k][j]= hypre_StructVectorBoxData(xf_var, fi) +
hypre_BoxOffsetDistance(xf_dbox, temp_index1);
}
}
/*--------------------------------------------------------------------
* Get the ptrs for the coarse struct_vectors. Note that the coarse
* work vector is indexed with respect to the local fine box no.'s.
* Work vectors were created this way.
* Dimensionally dependent.
*--------------------------------------------------------------------*/
xc_temp_dbox= hypre_BoxArrayBox(hypre_StructVectorDataSpace(xc_temp), fi);
if (ndim > 1)
{
for (k= 0; k< (ndim-1); k++)
{
for (j=0; j< 2; j++)
{
hypre_SetIndex(temp_index1, 0, j, k);
xcp_temp[k][j]= hypre_StructVectorBoxData(xc_temp, fi) +
hypre_BoxOffsetDistance(xc_temp_dbox, temp_index1);
}
}
}
else /* 1d case */
{
hypre_ClearIndex(temp_index1);
xcp_temp[0][0]= hypre_StructVectorBoxData(xc_temp, fi) +
hypre_BoxOffsetDistance(xc_temp_dbox, temp_index1);
}
hypre_CopyIndex(hypre_BoxIMin(fgrid_box), start);
hypre_CopyIndex(hypre_BoxIMax(fgrid_box), fbox_size);
/*--------------------------------------------------------------------
* Adjust "fbox_size" so that this hypre_Index is appropriate for
* ndim < 3.
* fbox_size= hypre_BoxIMax(fgrid_box)-hypre_BoxIMin(fgrid_box)+1.
*--------------------------------------------------------------------*/
for (i= 0; i< 3; i++)
{
fbox_size[i]-= (start[i]-1);
}
/*--------------------------------------------------------------------
* The fine intersection box may not be divisible by the refinement
* factor. We need to know the remainder to determine which
* coarse node gets the restricted values.
*--------------------------------------------------------------------*/
hypre_ClearIndex(node_offset);
for (i= 0; i< ndim; i++)
{
node_offset[i]= rfactors[i]-(start[i]%rfactors[i])-1;
}
hypre_SetIndex(temp_index2, 0, 0, 0);
hypre_StructMapFineToCoarse(start, temp_index2, rfactors, startc);
hypre_BoxGetSize(fgrid_box, temp_index1);
hypre_StructMapFineToCoarse(temp_index1, temp_index2, rfactors, loop_size);
hypre_BoxLoop2Begin(ndim, loop_size,
xf_dbox, start, stride, xfi,
xc_temp_dbox, startc, stridec, xci);
#if 0 /* Are private static arrays a problem? */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,xfi,xci,imax,jmax,kmax,k,kcell,j,jcell,i,icell,ijkcell,temp_index2) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop2For(xfi, xci)
{
/*-----------------------------------------------------------------
* Arithmetic average the refinement patch values to get
* restricted coarse grid values in an agglomerate; i.e.,
* piecewise constant restriction.
*-----------------------------------------------------------------*/
hypre_BoxLoopGetIndex(lindex);
imax= hypre_min( (fbox_size[0]-lindex[0]*stride[0]), rfactors[0] );
jmax= hypre_min( (fbox_size[1]-lindex[1]*stride[1]), rfactors[1] );
kmax= hypre_min( (fbox_size[2]-lindex[2]*stride[2]), rfactors[2] );
for (i= 0; i< num_coarse_cells; i++)
{
sum[i]= 0.0;
}
for (k= 0; k< kmax; k++)
{
kcell= 1;
if (k <= node_offset[2])
{
kcell= 0;
}
for (j= 0; j< jmax; j++)
{
jcell= 1;
if (j <= node_offset[1])
{
jcell= 0;
}
for (i= 0; i< imax; i++)
{
icell= 1;
if (i <= node_offset[0])
{
icell= 0;
}
MapCellRank(icell, jcell , kcell, ijkcell);
sum[ijkcell]+= xfp[k][j][xfi+i];
}
}
}
/*-----------------------------------------------------------------
* Add the compute averages to the correct coarse cell.
*-----------------------------------------------------------------*/
for (ijkcell= 0; ijkcell< num_coarse_cells; ijkcell++)
{
if (sum[ijkcell] != 0.0)
{
sum[ijkcell]/= scaling;
InverseMapCellRank(ijkcell, temp_index2);
i= temp_index2[0];
j= temp_index2[1];
k= temp_index2[2];
xcp_temp[k][j][xci+i]+= sum[ijkcell];
}
}
}
hypre_BoxLoop2End(xfi, xci);
} /* hypre_ForBoxI(fi, fgrid_boxes) */
} /* for (var= 0; var< nvars; var++)*/
/*------------------------------------------------------------------
* Communicate calculated restricted function over the coarsened
* patch. Only actual communicated values will be put in the
* coarse vector.
*------------------------------------------------------------------*/
for (var= 0; var< nvars; var++)
{
xc_temp= hypre_SStructPVectorSVector(fgrid_cvectors, var);
xc_var= hypre_SStructPVectorSVector(xc, var);
hypre_InitializeCommunication(interlevel_comm[var],
hypre_StructVectorData(xc_temp),
hypre_StructVectorData(xc_var), 0, 0,
&comm_handle);
hypre_FinalizeCommunication(comm_handle);
}
/*------------------------------------------------------------------
* Need to add the coarsened patches that belong on this processor
* to the coarse vector.
*------------------------------------------------------------------*/
for (var= 0; var< nvars; var++)
{
xc_temp= hypre_SStructPVectorSVector(fgrid_cvectors, var);
xc_var= hypre_SStructPVectorSVector(xc, var);
cgrid = hypre_StructVectorGrid(xc_temp);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
arrayarray_ownboxes= fullwgt_ownboxes[var];
hypre_ForBoxI(ci, cgrid_boxes)
{
cgrid_box= hypre_BoxArrayBox(cgrid_boxes, ci);
xc_temp_dbox= hypre_BoxArrayBox(hypre_StructVectorDataSpace(xc_temp), ci);
xcp_temp[0][0]= hypre_StructVectorBoxData(xc_temp, ci);
/*--------------------------------------------------------------
* Each ci box of cgrid_box has a boxarray of subboxes. Copy
* each of these subboxes to the coarse vector.
*--------------------------------------------------------------*/
own_boxes= hypre_BoxArrayArrayBoxArray(arrayarray_ownboxes, ci);
boxnums = own_cboxnums[var][ci];
hypre_ForBoxI(i, own_boxes)
{
own_box= hypre_BoxArrayBox(own_boxes, i);
xf_dbox= hypre_BoxArrayBox(hypre_StructVectorDataSpace(xc_var), boxnums[i]);
xcp[0][0]= hypre_StructVectorBoxData(xc_var, boxnums[i]);
hypre_BoxGetSize(own_box, loop_size);
hypre_BoxLoop2Begin(ndim, loop_size,
xc_temp_dbox, hypre_BoxIMin(own_box), stridec, xfi,
xf_dbox, hypre_BoxIMin(own_box), stridec, xci);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,xfi,xci) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(xfi, xci)
{
xcp[0][0][xci]+= xcp_temp[0][0][xfi];
}
hypre_BoxLoop2End(xfi, xci);
} /* hypre_ForBoxI(i, own_boxes) */
} /* hypre_ForBoxI(ci, cgrid_boxes) */
} /* for (var= 0; var< nvars; var++) */
hypre_TFree(sum);
for (k= 0; k< rfactors[2]; k++)
{
hypre_TFree(xfp[k]);
}
hypre_TFree(xfp);
if (ndim > 1)
{
for (k= 0; k< (ndim-1); k++)
{
hypre_TFree(xcp_temp[k]);
hypre_TFree(xcp[k]);
}
}
else
{
hypre_TFree(xcp_temp[0]);
hypre_TFree(xcp[0]);
}
hypre_TFree(xcp_temp);
hypre_TFree(xcp);
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_FacSemiRestrictDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_FacSemiRestrictDestroy2( void *fac_restrict_vdata )
{
HYPRE_Int ierr = 0;
hypre_FacSemiRestrictData2 *fac_restrict_data = fac_restrict_vdata;
HYPRE_Int nvars;
HYPRE_Int i, j;
if (fac_restrict_data)
{
nvars= (fac_restrict_data-> nvars);
hypre_SStructPVectorDestroy(fac_restrict_data-> fgrid_cvectors);
for (i= 0; i< nvars; i++)
{
hypre_BoxArrayArrayDestroy((fac_restrict_data -> identity_arrayboxes)[i]);
hypre_BoxArrayArrayDestroy((fac_restrict_data -> fullwgt_sendboxes)[i]);
for (j= 0; j< hypre_BoxArrayArraySize(fac_restrict_data->fullwgt_ownboxes[i]); j++)
{
hypre_TFree((fac_restrict_data -> own_cboxnums)[i][j]);
}
hypre_TFree((fac_restrict_data -> own_cboxnums)[i]);
hypre_BoxArrayArrayDestroy((fac_restrict_data -> fullwgt_ownboxes)[i]);
hypre_CommPkgDestroy((fac_restrict_data -> interlevel_comm)[i]);
}
hypre_TFree(fac_restrict_data -> identity_arrayboxes);
hypre_TFree(fac_restrict_data -> fullwgt_sendboxes);
hypre_TFree(fac_restrict_data -> own_cboxnums);
hypre_TFree(fac_restrict_data -> fullwgt_ownboxes);
hypre_TFree(fac_restrict_data -> interlevel_comm);
hypre_TFree(fac_restrict_data);
}
return ierr;
}
|
GB_binop__div_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__div_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__div_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__div_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__div_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__div_uint16)
// A*D function (colscale): GB (_AxD__div_uint16)
// D*A function (rowscale): GB (_DxB__div_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__div_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__div_uint16)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_uint16)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_uint16)
// C=scalar+B GB (_bind1st__div_uint16)
// C=scalar+B' GB (_bind1st_tran__div_uint16)
// C=A+scalar GB (_bind2nd__div_uint16)
// C=A'+scalar GB (_bind2nd_tran__div_uint16)
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = GB_IDIV_UNSIGNED (aij, bij, 16)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IDIV_UNSIGNED (x, y, 16) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_DIV || GxB_NO_UINT16 || GxB_NO_DIV_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__div_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__div_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__div_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__div_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__div_uint16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__div_uint16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__div_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__div_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__div_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__div_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__div_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__div_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IDIV_UNSIGNED (x, bij, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__div_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IDIV_UNSIGNED (aij, y, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_UNSIGNED (x, aij, 16) ; \
}
GrB_Info GB (_bind1st_tran__div_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_UNSIGNED (aij, y, 16) ; \
}
GrB_Info GB (_bind2nd_tran__div_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
eltwise_add_arm.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "option.h"
#include "mat.h"
namespace ncnn{
static void eltwise_add_arm(const Mat& bottom_blob, const Mat& bottom_blob1, Mat& top_blob, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int size = w * h;
//#pragma omp parallel for num_threads(opt.num_threads)
for (int q=0; q<channels; q++)
{
const float* ptr = bottom_blob.channel(q);
const float* ptr1 = bottom_blob1.channel(q);
float* outptr = top_blob.channel(q);
#if __ARM_NEON
int nn = size >> 2;
int remain = size - (nn << 2);
#else
int remain = size;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4s}, [%1], #16 \n"
"ld1 {v1.4s}, [%2], #16 \n"
"fadd v0.4s, v0.4s, v1.4s \n"
"subs %w0, %w0, #1 \n"
"st1 {v0.4s}, [%3], #16 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(ptr), // %1
"=r"(ptr1), // %2
"=r"(outptr) // %3
: "0"(nn),
"1"(ptr),
"2"(ptr1),
"3"(outptr)
: "cc", "memory", "v0", "v1"
);
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%1, #128] \n"
"pld [%2, #128] \n"
"vld1.f32 {d0-d1}, [%1]! \n"
"vld1.f32 {d2-d3}, [%2]! \n"
"vadd.f32 q0, q0, q1 \n"
"subs %0, #1 \n"
"vst1.f32 {d0-d1}, [%3]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(ptr), // %1
"=r"(ptr1), // %2
"=r"(outptr) // %3
: "0"(nn),
"1"(ptr),
"2"(ptr1),
"3"(outptr)
: "cc", "memory", "q0", "q1"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
*outptr = *ptr + *ptr1;
ptr++;
ptr1++;
outptr++;
}
}
}
}
|
residualbased_elimination_builder_and_solver.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
//
//
#if !defined(KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER )
#define KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER
/* System includes */
#include <set>
#include <unordered_set>
#ifdef _OPENMP
#include <omp.h>
#endif
/* External includes */
/* Project includes */
#include "utilities/timer.h"
#include "includes/define.h"
#include "includes/key_hash.h"
#include "solving_strategies/builder_and_solvers/builder_and_solver.h"
#include "includes/model_part.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedEliminationBuilderAndSolver
* @ingroup KratosCore
* @brief Current class provides an implementation for standard builder and solving operations.
* @details The RHS is constituted by the unbalanced loads (residual)
* Degrees of freedom are reordered putting the restrained degrees of freedom at
* the end of the system ordered in reverse order with respect to the DofSet.
* Imposition of the dirichlet conditions is naturally dealt with as the residual already contains
* this information.
* Calculation of the reactions involves a cost very similiar to the calculation of the total residual
* @author Riccardo Rossi
*/
template<class TSparseSpace,
class TDenseSpace, //= DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class ResidualBasedEliminationBuilderAndSolver
: public BuilderAndSolver< TSparseSpace, TDenseSpace, TLinearSolver >
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedEliminationBuilderAndSolver);
typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef Node<3> NodeType;
typedef typename BaseType::NodesArrayType NodesArrayType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename BaseType::ConditionsArrayType ConditionsArrayType;
typedef typename BaseType::ElementsContainerType ElementsContainerType;
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor. (with parameters)
*/
explicit ResidualBasedEliminationBuilderAndSolver(
typename TLinearSolver::Pointer pNewLinearSystemSolver,
Parameters ThisParameters
) : BaseType(pNewLinearSystemSolver)
{
// Validate default parameters
Parameters default_parameters = Parameters(R"(
{
"name" : "ResidualBasedEliminationBuilderAndSolver"
})" );
ThisParameters.ValidateAndAssignDefaults(default_parameters);
}
/**
* @brief Constructor.
*/
explicit ResidualBasedEliminationBuilderAndSolver(
typename TLinearSolver::Pointer pNewLinearSystemSolver)
: BaseType(pNewLinearSystemSolver)
{
// KRATOS_INFO("ResidualBasedEliminationBuilderAndSolver") << "Using the standard builder and solver " << std::endl;
}
/** Destructor.
*/
~ResidualBasedEliminationBuilderAndSolver() override
{
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Function to perform the build of the RHS. The vector could be sized as the total number
* of dofs or as the number of unrestrained ones
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param b The RHS vector
*/
void Build(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& b) override
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
//getting the elements from the model
const int nelements = static_cast<int>(rModelPart.Elements().size());
//getting the array of the conditions
const int nconditions = static_cast<int>(rModelPart.Conditions().size());
ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
ModelPart::ElementsContainerType::iterator el_begin = rModelPart.ElementsBegin();
ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin();
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
const double start_build = OpenMPUtils::GetCurrentTime();
// assemble all elements
#pragma omp parallel firstprivate(nelements, nconditions, LHS_Contribution, RHS_Contribution, EquationId )
{
#pragma omp for schedule(guided, 512) nowait
for (int k = 0; k < nelements; k++)
{
ModelPart::ElementsContainerType::iterator it = el_begin + k;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool element_is_active = true;
if ((it)->IsDefined(ACTIVE))
element_is_active = (it)->Is(ACTIVE);
if (element_is_active)
{
//calculate elemental contribution
pScheme->CalculateSystemContributions(*(it.base()), LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
#ifdef USE_LOCKS_IN_ASSEMBLY
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mLockArray);
#else
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
#endif
// clean local elemental memory
pScheme->CleanMemory(*(it.base()));
}
}
#pragma omp for schedule(guided, 512)
for (int k = 0; k < nconditions; k++)
{
ModelPart::ConditionsContainerType::iterator it = cond_begin + k;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool condition_is_active = true;
if ((it)->IsDefined(ACTIVE))
condition_is_active = (it)->Is(ACTIVE);
if (condition_is_active)
{
//calculate elemental contribution
pScheme->Condition_CalculateSystemContributions(*(it.base()), LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo);
#ifdef USE_LOCKS_IN_ASSEMBLY
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mLockArray);
#else
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
#endif
// clean local elemental memory
pScheme->CleanMemory(*(it.base()));
}
}
}
const double stop_build = OpenMPUtils::GetCurrentTime();
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", (this->GetEchoLevel() >=1 && rModelPart.GetCommunicator().MyPID() == 0)) << "System build time: " << stop_build - start_build << std::endl;
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0) << "Finished building" << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building of the LHS
* @details Depending on the implementation choosen the size of the matrix could
* be equal to the total number of Dofs or to the number of unrestrained dofs
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
*/
void BuildLHS(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A) override
{
KRATOS_TRY
//getting the elements from the model
ElementsArrayType& rElements = rModelPart.Elements();
//getting the array of the conditions
ConditionsArrayType& rConditions = rModelPart.Conditions();
//resetting to zero the vector of reactions
TSparseSpace::SetToZero(*(BaseType::mpReactionsVector));
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it = rElements.ptr_begin(); it != rElements.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->Calculate_LHS_Contribution(*it, LHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
AssembleLHS(A, LHS_Contribution, EquationId);
// clean local elemental memory
pScheme->CleanMemory(*it);
}
LHS_Contribution.resize(0, 0, false);
// assemble all conditions
for (typename ConditionsArrayType::ptr_iterator it = rConditions.ptr_begin(); it != rConditions.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->Condition_Calculate_LHS_Contribution(*it, LHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
AssembleLHS(A, LHS_Contribution, EquationId);
}
KRATOS_CATCH("")
}
/**
* @brief Build a rectangular matrix of size n*N where "n" is the number of unrestrained degrees of freedom
* and "N" is the total number of degrees of freedom involved.
* @details This matrix is obtained by building the total matrix without the lines corresponding to the fixed
* degrees of freedom (but keeping the columns!!)
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
*/
void BuildLHS_CompleteOnFreeRows(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A) override
{
KRATOS_TRY
//getting the elements from the model
ElementsArrayType& rElements = rModelPart.Elements();
//getting the array of the conditions
ConditionsArrayType& rConditions = rModelPart.Conditions();
ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
//resetting to zero the vector of reactions
TSparseSpace::SetToZero(*(BaseType::mpReactionsVector));
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it = rElements.ptr_begin(); it != rElements.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->Calculate_LHS_Contribution(*it, LHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
AssembleLHS_CompleteOnFreeRows(A, LHS_Contribution, EquationId);
// clean local elemental memory
pScheme->CleanMemory(*it);
}
LHS_Contribution.resize(0, 0, false);
// assemble all conditions
for (typename ConditionsArrayType::ptr_iterator it = rConditions.ptr_begin(); it != rConditions.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->Condition_Calculate_LHS_Contribution(*it, LHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
AssembleLHS_CompleteOnFreeRows(A, LHS_Contribution, EquationId);
}
KRATOS_CATCH("")
}
/**
* @brief This is a call to the linear system solver
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void SystemSolve(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b
) override
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00)
{
//do solve
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
}
else
TSparseSpace::SetToZero(Dx);
// Prints informations about the current time
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
*@brief This is a call to the linear system solver (taking into account some physical particularities of the problem)
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
* @param rModelPart The model part of the problem to solve
*/
void SystemSolveWithPhysics(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b,
ModelPart& rModelPart
)
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00)
{
//provide physical data as needed
if(BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded() )
BaseType::mpLinearSystemSolver->ProvideAdditionalData(A, Dx, b, BaseType::mDofSet, rModelPart);
//do solve
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
}
else
{
TSparseSpace::SetToZero(Dx);
KRATOS_WARNING_IF("ResidualBasedEliminationBuilderAndSolver", rModelPart.GetCommunicator().MyPID() == 0) << "ATTENTION! setting the RHS to zero!" << std::endl;
}
// Prints informations about the current time
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building and solving phase at the same time.
* @details It is ideally the fastest and safer function to use when it is possible to solve
* just after building
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void BuildAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
KRATOS_TRY
Timer::Start("Build");
Build(pScheme, rModelPart, A, b);
Timer::Stop("Build");
// Does nothing...dirichlet conditions are naturally dealt with in defining the residual
ApplyDirichletConditions(pScheme, rModelPart, A, Dx, b);
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
const double start_solve = OpenMPUtils::GetCurrentTime();
Timer::Start("Solve");
SystemSolveWithPhysics(A, Dx, b, rModelPart);
Timer::Stop("Solve");
const double stop_solve = OpenMPUtils::GetCurrentTime();
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", (this->GetEchoLevel() >=1 && rModelPart.GetCommunicator().MyPID() == 0)) << "System solve time: " << stop_solve - start_solve << std::endl;
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Corresponds to the previews, but the System's matrix is considered already built and only the RHS is built again
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void BuildRHSAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
KRATOS_TRY
BuildRHS(pScheme, rModelPart, b);
SystemSolve(A, Dx, b);
KRATOS_CATCH("")
}
/**
* @brief Function to perform the build of the RHS.
* @details The vector could be sized as the total number of dofs or as the number of unrestrained ones
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void BuildRHS(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemVectorType& b) override
{
KRATOS_TRY
//resetting to zero the vector of reactions
if(BaseType::mCalculateReactionsFlag)
{
TSparseSpace::SetToZero(*(BaseType::mpReactionsVector));
}
//Getting the Elements
ElementsArrayType& pElements = rModelPart.Elements();
//getting the array of the conditions
ConditionsArrayType& pConditions = rModelPart.Conditions();
ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
//contributions to the system
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different terms
Element::EquationIdVectorType EquationId;
// assemble all elements
#pragma omp parallel firstprivate( RHS_Contribution, EquationId)
{
const int nelements = static_cast<int>(pElements.size());
#pragma omp for schedule(guided, 512) nowait
for (int i = 0; i<nelements; i++)
{
typename ElementsArrayType::iterator it = pElements.begin() + i;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool element_is_active = true;
if ((it)->IsDefined(ACTIVE))
element_is_active = (it)->Is(ACTIVE);
if (element_is_active)
{
// Calculate elemental Right Hand Side Contribution
pScheme->Calculate_RHS_Contribution(*(it.base()), RHS_Contribution, EquationId, CurrentProcessInfo);
// Assemble the elemental contribution
AssembleRHS(b, RHS_Contribution, EquationId);
}
}
// assemble all conditions
const int nconditions = static_cast<int>(pConditions.size());
#pragma omp for schedule(guided, 512)
for (int i = 0; i<nconditions; i++)
{
auto it = pConditions.begin() + i;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool condition_is_active = true;
if ((it)->IsDefined(ACTIVE))
condition_is_active = (it)->Is(ACTIVE);
if (condition_is_active)
{
//calculate elemental contribution
pScheme->Condition_Calculate_RHS_Contribution(*(it.base()), RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
AssembleRHS(b, RHS_Contribution, EquationId);
}
}
}
KRATOS_CATCH("")
}
/**
* @brief Builds the list of the DofSets involved in the problem by "asking" to each element
* and condition its Dofs.
* @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the
* way the matrix and RHS are built
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void SetUpDofSet(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart
) override
{
KRATOS_TRY;
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << "Setting up the dofs" << std::endl;
//Gets the array of elements from the modeler
ElementsArrayType& pElements = rModelPart.Elements();
const int nelements = static_cast<int>(pElements.size());
Element::DofsVectorType ElementalDofList;
ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
unsigned int nthreads = OpenMPUtils::GetNumThreads();
// typedef boost::fast_pool_allocator< NodeType::DofType::Pointer > allocator_type;
// typedef std::unordered_set < NodeType::DofType::Pointer,
// DofPointerHasher,
// DofPointerComparor,
// allocator_type > set_type;
typedef std::unordered_set < NodeType::DofType::Pointer, DofPointerHasher> set_type;
std::vector<set_type> dofs_aux_list(nthreads);
// std::vector<allocator_type> allocators(nthreads);
for (int i = 0; i < static_cast<int>(nthreads); i++)
{
// dofs_aux_list[i] = set_type( allocators[i]);
dofs_aux_list[i].reserve(nelements);
}
#pragma omp parallel for firstprivate(nelements, ElementalDofList)
for (int i = 0; i < static_cast<int>(nelements); i++)
{
typename ElementsArrayType::iterator it = pElements.begin() + i;
const unsigned int this_thread_id = OpenMPUtils::ThisThread();
// gets list of Dof involved on every element
pScheme->GetElementalDofList(*(it.base()), ElementalDofList, CurrentProcessInfo);
dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end());
}
ConditionsArrayType& pConditions = rModelPart.Conditions();
const int nconditions = static_cast<int>(pConditions.size());
#pragma omp parallel for firstprivate(nconditions, ElementalDofList)
for (int i = 0; i < nconditions; i++)
{
typename ConditionsArrayType::iterator it = pConditions.begin() + i;
const unsigned int this_thread_id = OpenMPUtils::ThisThread();
// gets list of Dof involved on every element
pScheme->GetConditionDofList(*(it.base()), ElementalDofList, CurrentProcessInfo);
dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end());
}
//here we do a reduction in a tree so to have everything on thread 0
unsigned int old_max = nthreads;
unsigned int new_max = ceil(0.5*static_cast<double>(old_max));
while (new_max >= 1 && new_max != old_max)
{
// //just for debugging
// std::cout << "old_max" << old_max << " new_max:" << new_max << std::endl;
// for (int i = 0; i < new_max; i++)
// {
// if (i + new_max < old_max)
// {
// std::cout << i << " - " << i + new_max << std::endl;
// }
// }
// std::cout << "********************" << std::endl;
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(new_max); i++)
{
if (i + new_max < old_max)
{
dofs_aux_list[i].insert(dofs_aux_list[i + new_max].begin(), dofs_aux_list[i + new_max].end());
dofs_aux_list[i + new_max].clear();
}
}
old_max = new_max;
new_max = ceil(0.5*static_cast<double>(old_max));
}
DofsArrayType Doftemp;
BaseType::mDofSet = DofsArrayType();
Doftemp.reserve(dofs_aux_list[0].size());
for (auto it = dofs_aux_list[0].begin(); it != dofs_aux_list[0].end(); it++)
{
Doftemp.push_back(*it);
}
Doftemp.Sort();
BaseType::mDofSet = Doftemp;
// Throws an execption if there are no Degrees of freedom involved in the analysis
KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl;
BaseType::mDofSetIsInitialized = true;
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0) << "Finished setting up the dofs" << std::endl;
#ifdef USE_LOCKS_IN_ASSEMBLY
if (mLockArray.size() != 0)
{
for (int i = 0; i < static_cast<int>(mLockArray.size()); i++)
omp_destroy_lock(&mLockArray[i]);
}
mLockArray.resize(BaseType::mDofSet.size());
for (int i = 0; i < static_cast<int>(mLockArray.size()); i++)
omp_init_lock(&mLockArray[i]);
#endif
// If reactions are to be calculated, we check if all the dofs have reactions defined
// This is tobe done only in debug mode
#ifdef KRATOS_DEBUG
if(BaseType::GetCalculateReactionsFlag())
{
for(auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator)
{
KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " <<std::endl
<< "Node : "<<dof_iterator->Id()<< std::endl
<< "Dof : "<<(*dof_iterator)<<std::endl<<"Not possible to calculate reactions."<<std::endl;
}
}
#endif
KRATOS_CATCH("");
}
/**
* @brief Organises the dofset in order to speed up the building phase
* @param rModelPart The model part of the problem to solve
*/
void SetUpSystem(
ModelPart& rModelPart
) override
{
// Set equation id for degrees of freedom
// the free degrees of freedom are positioned at the beginning of the system,
// while the fixed one are at the end (in opposite order).
//
// that means that if the EquationId is greater than "mEquationSystemSize"
// the pointed degree of freedom is restrained
//
int free_id = 0;
int fix_id = BaseType::mDofSet.size();
for (typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator)
if (dof_iterator->IsFixed())
dof_iterator->SetEquationId(--fix_id);
else
dof_iterator->SetEquationId(free_id++);
BaseType::mEquationSystemSize = fix_id;
}
//**************************************************************************
//**************************************************************************
void ResizeAndInitializeVectors(
typename TSchemeType::Pointer pScheme,
TSystemMatrixPointerType& pA,
TSystemVectorPointerType& pDx,
TSystemVectorPointerType& pb,
ModelPart& rModelPart
) override
{
KRATOS_TRY
if (pA == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0));
pA.swap(pNewA);
}
if (pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0));
pDx.swap(pNewDx);
}
if (pb == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0));
pb.swap(pNewb);
}
if (BaseType::mpReactionsVector == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0));
BaseType::mpReactionsVector.swap(pNewReactionsVector);
}
TSystemMatrixType& A = *pA;
TSystemVectorType& Dx = *pDx;
TSystemVectorType& b = *pb;
//resizing the system vectors and matrix
if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized
{
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false);
ConstructMatrixStructure(pScheme, A, rModelPart);
}
else
{
if (A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize)
{
//KRATOS_WATCH("it should not come here!!!!!!!! ... this is SLOW");
KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl;
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, true);
ConstructMatrixStructure(pScheme, A, rModelPart);
}
}
if (Dx.size() != BaseType::mEquationSystemSize)
Dx.resize(BaseType::mEquationSystemSize, false);
if (b.size() != BaseType::mEquationSystemSize)
b.resize(BaseType::mEquationSystemSize, false);
//if needed resize the vector for the calculation of reactions
if (BaseType::mCalculateReactionsFlag == true)
{
const std::size_t reactions_vector_size = BaseType::mDofSet.size() - BaseType::mEquationSystemSize;
if (BaseType::mpReactionsVector->size() != reactions_vector_size)
BaseType::mpReactionsVector->resize(reactions_vector_size, false);
}
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void CalculateReactions(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
//refresh RHS to have the correct reactions
BuildRHS(pScheme, rModelPart, b);
// Updating variables
std::size_t i;
TSystemVectorType& r_reactions_vector = *BaseType::mpReactionsVector;
for (auto it2 = BaseType::mDofSet.ptr_begin(); it2 != BaseType::mDofSet.ptr_end(); ++it2) {
i = (*it2)->EquationId();
if (i >= BaseType::mEquationSystemSize) {
i -= BaseType::mEquationSystemSize;
(*it2)->GetSolutionStepReactionValue() = -r_reactions_vector[i];
}
}
}
/**
* @brief Applies the dirichlet conditions. This operation may be very heavy or completely
* unexpensive depending on the implementation choosen and on how the System Matrix is built.
* @details For explanation of how it works for a particular implementation the user
* should refer to the particular Builder And Solver choosen
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void ApplyDirichletConditions(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
}
/**
* @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed
*/
void Clear() override
{
this->mDofSet = DofsArrayType();
this->mpReactionsVector.reset();
// this->mReactionsVector = TSystemVectorType();
this->mpLinearSystemSolver->Clear();
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1) << "Clear Function called" << std::endl;
}
/**
* @brief This function is designed to be called once to perform all the checks needed
* on the input provided. Checks can be "expensive" as the function is designed
* to catch user's errors.
* @param rModelPart The model part of the problem to solve
* @return 0 all ok
*/
int Check(ModelPart& rModelPart) override
{
KRATOS_TRY
return 0;
KRATOS_CATCH("");
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedEliminationBuilderAndSolver";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
#ifdef USE_LOCKS_IN_ASSEMBLY
std::vector<omp_lock_t> mLockArray;
#endif
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief This function does the assembling of the LHS and RHS
* @note The main difference respect the block builder and solver is the fact that the fixed DoFs are not considered on the assembling
*/
void Assemble(
TSystemMatrixType& A,
TSystemVectorType& b,
const LocalSystemMatrixType& LHS_Contribution,
const LocalSystemVectorType& RHS_Contribution,
const Element::EquationIdVectorType& EquationId
#ifdef USE_LOCKS_IN_ASSEMBLY
,std::vector< omp_lock_t >& lock_array
#endif
)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
#ifdef USE_LOCKS_IN_ASSEMBLY
omp_set_lock(&lock_array[i_global]);
b[i_global] += RHS_Contribution(i_local);
#else
double& r_a = b[i_global];
const double& v_a = RHS_Contribution(i_local);
#pragma omp atomic
r_a += v_a;
#endif
AssembleRowContributionFreeDofs(A, LHS_Contribution, i_global, i_local, EquationId);
#ifdef USE_LOCKS_IN_ASSEMBLY
omp_unset_lock(&lock_array[i_global]);
#endif
}
//note that computation of reactions is not performed here!
}
}
//**************************************************************************
virtual void ConstructMatrixStructure(
typename TSchemeType::Pointer pScheme,
TSystemMatrixType& A,
ModelPart& rModelPart)
{
//filling with zero the matrix (creating the structure)
Timer::Start("MatrixStructure");
const std::size_t equation_size = BaseType::mEquationSystemSize;
std::vector<std::unordered_set<std::size_t> > indices(equation_size);
#pragma omp parallel for firstprivate(equation_size)
for (int iii = 0; iii < static_cast<int>(equation_size); iii++) {
indices[iii].reserve(40);
}
Element::EquationIdVectorType ids(3, 0);
#pragma omp parallel firstprivate(ids)
{
// The process info
ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// We repeat the same declaration for each thead
std::vector<std::unordered_set<std::size_t> > temp_indexes(equation_size);
#pragma omp for
for (int index = 0; index < static_cast<int>(equation_size); ++index)
temp_indexes[index].reserve(30);
// Getting the size of the array of elements from the model
const int number_of_elements = static_cast<int>(rModelPart.Elements().size());
// Element initial iterator
const auto el_begin = rModelPart.ElementsBegin();
// We iterate over the elements
#pragma omp for schedule(guided, 512) nowait
for (int i_elem = 0; i_elem<number_of_elements; ++i_elem) {
auto it_elem = el_begin + i_elem;
pScheme->EquationId( *(it_elem.base()), ids, r_current_process_info);
for (auto& id_i : ids) {
if (id_i < BaseType::mEquationSystemSize) {
auto& row_indices = temp_indexes[id_i];
for (auto& id_j : ids)
if (id_j < BaseType::mEquationSystemSize)
row_indices.insert(id_j);
}
}
}
// Getting the size of the array of the conditions
const int number_of_conditions = static_cast<int>(rModelPart.Conditions().size());
// Condition initial iterator
const auto cond_begin = rModelPart.ConditionsBegin();
// We iterate over the conditions
#pragma omp for schedule(guided, 512) nowait
for (int i_cond = 0; i_cond<number_of_conditions; ++i_cond) {
auto it_cond = cond_begin + i_cond;
pScheme->Condition_EquationId( *(it_cond.base()), ids, r_current_process_info);
for (auto& id_i : ids) {
if (id_i < BaseType::mEquationSystemSize) {
auto& row_indices = temp_indexes[id_i];
for (auto& id_j : ids)
if (id_j < BaseType::mEquationSystemSize)
row_indices.insert(id_j);
}
}
}
// Merging all the temporal indexes
#pragma omp critical
{
for (int i = 0; i < static_cast<int>(temp_indexes.size()); ++i) {
indices[i].insert(temp_indexes[i].begin(), temp_indexes[i].end());
}
}
}
//count the row sizes
unsigned int nnz = 0;
for (unsigned int i = 0; i < indices.size(); i++)
nnz += indices[i].size();
A = boost::numeric::ublas::compressed_matrix<double>(indices.size(), indices.size(), nnz);
double* Avalues = A.value_data().begin();
std::size_t* Arow_indices = A.index1_data().begin();
std::size_t* Acol_indices = A.index2_data().begin();
//filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP!
Arow_indices[0] = 0;
for (int i = 0; i < static_cast<int>(A.size1()); i++)
Arow_indices[i + 1] = Arow_indices[i] + indices[i].size();
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(A.size1()); i++)
{
const unsigned int row_begin = Arow_indices[i];
const unsigned int row_end = Arow_indices[i + 1];
unsigned int k = row_begin;
for (auto it = indices[i].begin(); it != indices[i].end(); it++)
{
Acol_indices[k] = *it;
Avalues[k] = 0.0;
k++;
}
std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]);
}
A.set_filled(indices.size() + 1, nnz);
Timer::Stop("MatrixStructure");
}
// virtual void ConstructMatrixStructure(
// TSystemMatrixType& A,
// ElementsContainerType& rElements,
// ConditionsArrayType& rConditions,
// ProcessInfo& CurrentProcessInfo)
// {
//
// std::size_t equation_size = A.size1();
// std::vector<std::vector<std::size_t> > indices(equation_size);
// // std::vector<std::vector<std::size_t> > dirichlet_indices(TSystemSpaceType::Size1(mDirichletMatrix));
//
// Element::EquationIdVectorType ids(3, 0);
// for (typename ElementsContainerType::iterator i_element = rElements.begin(); i_element != rElements.end(); i_element++)
// {
// (i_element)->EquationIdVector(ids, CurrentProcessInfo);
//
// for (std::size_t i = 0; i < ids.size(); i++)
// if (ids[i] < equation_size)
// {
// std::vector<std::size_t>& row_indices = indices[ids[i]];
// for (std::size_t j = 0; j < ids.size(); j++)
// if (ids[j] < equation_size)
// {
// AddUnique(row_indices, ids[j]);
// //indices[ids[i]].push_back(ids[j]);
// }
// }
//
// }
//
// for (typename ConditionsArrayType::iterator i_condition = rConditions.begin(); i_condition != rConditions.end(); i_condition++)
// {
// (i_condition)->EquationIdVector(ids, CurrentProcessInfo);
// for (std::size_t i = 0; i < ids.size(); i++)
// if (ids[i] < equation_size)
// {
// std::vector<std::size_t>& row_indices = indices[ids[i]];
// for (std::size_t j = 0; j < ids.size(); j++)
// if (ids[j] < equation_size)
// {
// AddUnique(row_indices, ids[j]);
// // indices[ids[i]].push_back(ids[j]);
// }
// }
// }
//
// //allocating the memory needed
// int data_size = 0;
// for (std::size_t i = 0; i < indices.size(); i++)
// {
// data_size += indices[i].size();
// }
// A.reserve(data_size, false);
//
// //filling with zero the matrix (creating the structure)
// Timer::Start("MatrixStructure");
//#ifndef _OPENMP
// for (std::size_t i = 0; i < indices.size(); i++)
// {
// std::vector<std::size_t>& row_indices = indices[i];
// std::sort(row_indices.begin(), row_indices.end());
//
// for (std::vector<std::size_t>::iterator it = row_indices.begin(); it != row_indices.end(); it++)
// {
// A.push_back(i, *it, 0.00);
// }
// row_indices.clear();
// }
//#else
// int number_of_threads = omp_get_max_threads();
// vector<unsigned int> matrix_partition;
// CreatePartition(number_of_threads, indices.size(), matrix_partition);
// if (this->GetEchoLevel() > 2)
// {
// KRATOS_WATCH(matrix_partition);
// }
// for (int k = 0; k < number_of_threads; k++)
// {
// #pragma omp parallel
// if (omp_get_thread_num() == k)
// {
// for (std::size_t i = matrix_partition[k]; i < matrix_partition[k + 1]; i++)
// {
// std::vector<std::size_t>& row_indices = indices[i];
// std::sort(row_indices.begin(), row_indices.end());
//
// for (std::vector<std::size_t>::iterator it = row_indices.begin(); it != row_indices.end(); it++)
// {
// A.push_back(i, *it, 0.00);
// }
// row_indices.clear();
// }
// }
// }
//#endif
// Timer::Stop("MatrixStructure");
// }
//**************************************************************************
void AssembleLHS(
TSystemMatrixType& A,
LocalSystemMatrixType& LHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
for (unsigned int j_local = 0; j_local < local_size; j_local++)
{
unsigned int j_global = EquationId[j_local];
if (j_global < BaseType::mEquationSystemSize)
A(i_global, j_global) += LHS_Contribution(i_local, j_local);
}
}
}
}
/**
* @brief This function is equivalent to the AssembleRowContribution of the block builder and solver
* @note The main difference respect the block builder and solver is the fact that the fixed DoFs are skipped
*/
inline void AssembleRowContributionFreeDofs(TSystemMatrixType& A, const Matrix& Alocal, const std::size_t i, const std::size_t i_local, const Element::EquationIdVectorType& EquationId)
{
double* values_vector = A.value_data().begin();
std::size_t* index1_vector = A.index1_data().begin();
std::size_t* index2_vector = A.index2_data().begin();
const std::size_t left_limit = index1_vector[i];
// Find the first entry
// We iterate over the equation ids until we find the first equation id to be considered
// We count in which component we find an ID
std::size_t last_pos = 0;
std::size_t last_found = 0;
std::size_t counter = 0;
for(std::size_t j=0; j < EquationId.size(); ++j) {
++counter;
const std::size_t j_global = EquationId[j];
if (j_global < BaseType::mEquationSystemSize) {
last_pos = ForwardFind(j_global,left_limit,index2_vector);
last_found = j_global;
break;
}
}
// If the counter is equal to the size of the EquationID vector that means that only one dof will be considered, if the number is greater means that all the dofs are fixed. If the number is below means that at we have several dofs free to be considered
if (counter <= EquationId.size()) {
#ifndef USE_LOCKS_IN_ASSEMBLY
double& r_a = values_vector[last_pos];
const double& v_a = Alocal(i_local,counter - 1);
#pragma omp atomic
r_a += v_a;
#else
values_vector[last_pos] += Alocal(i_local,counter - 1);
#endif
// Now find all of the other entries
std::size_t pos = 0;
for(std::size_t j = counter; j < EquationId.size(); ++j) {
std::size_t id_to_find = EquationId[j];
if (id_to_find < BaseType::mEquationSystemSize) {
if(id_to_find > last_found)
pos = ForwardFind(id_to_find,last_pos+1,index2_vector);
else if(id_to_find < last_found)
pos = BackwardFind(id_to_find,last_pos-1,index2_vector);
else
pos = last_pos;
#ifndef USE_LOCKS_IN_ASSEMBLY
double& r = values_vector[pos];
const double& v = Alocal(i_local,j);
#pragma omp atomic
r += v;
#else
values_vector[pos] += Alocal(i_local,j);
#endif
last_found = id_to_find;
last_pos = pos;
}
}
}
}
inline std::size_t ForwardFind(const std::size_t id_to_find,
const std::size_t start,
const std::size_t* index_vector)
{
std::size_t pos = start;
while(id_to_find != index_vector[pos]) pos++;
return pos;
}
inline std::size_t BackwardFind(const std::size_t id_to_find,
const std::size_t start,
const std::size_t* index_vector)
{
std::size_t pos = start;
while(id_to_find != index_vector[pos]) pos--;
return pos;
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
inline void AddUnique(std::vector<std::size_t>& v, const std::size_t& candidate)
{
std::vector<std::size_t>::iterator i = v.begin();
std::vector<std::size_t>::iterator endit = v.end();
while (i != endit && (*i) != candidate)
{
i++;
}
if (i == endit)
{
v.push_back(candidate);
}
}
void AssembleRHS(
TSystemVectorType& b,
const LocalSystemVectorType& RHS_Contribution,
const Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = RHS_Contribution.size();
if (BaseType::mCalculateReactionsFlag == false)
{
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
const unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) //free dof
{
// ASSEMBLING THE SYSTEM VECTOR
double& b_value = b[i_global];
const double& rhs_value = RHS_Contribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
}
}
else
{
TSystemVectorType& ReactionsVector = *BaseType::mpReactionsVector;
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
const unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) //free dof
{
// ASSEMBLING THE SYSTEM VECTOR
double& b_value = b[i_global];
const double& rhs_value = RHS_Contribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
else //fixed dof
{
double& b_value = ReactionsVector[i_global - BaseType::mEquationSystemSize];
const double& rhs_value = RHS_Contribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
}
}
}
//**************************************************************************
void AssembleLHS_CompleteOnFreeRows(
TSystemMatrixType& A,
LocalSystemMatrixType& LHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
for (unsigned int j_local = 0; j_local < local_size; j_local++)
{
int j_global = EquationId[j_local];
A(i_global, j_global) += LHS_Contribution(i_local, j_local);
}
}
}
}
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedEliminationBuilderAndSolver */
///@}
///@name Type Definitions
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER defined */
|
io.c |
/******************************************************************************
* INCLUDES
*****************************************************************************/
#include <stddef.h>
#include "base.h"
#include "io.h"
#include "sptensor.h"
#include "matrix.h"
#include "graph.h"
#include "timer.h"
/******************************************************************************
* FILE TYPES
*****************************************************************************/
struct ftype
{
char * extension;
splatt_file_type type;
};
static struct ftype file_extensions[] = {
{ ".tns", SPLATT_FILE_TEXT_COORD },
{ ".coo", SPLATT_FILE_TEXT_COORD },
{ ".bin", SPLATT_FILE_BIN_COORD },
{ NULL, 0}
};
splatt_file_type get_file_type(
char const * const fname)
{
/* find last . in filename */
char const * const suffix = strrchr(fname, '.');
if(suffix == NULL) {
goto NOT_FOUND;
}
size_t idx = 0;
do {
if(strcmp(suffix, file_extensions[idx].extension) == 0) {
return file_extensions[idx].type;
}
} while(file_extensions[++idx].extension != NULL);
/* default to text coordinate format */
NOT_FOUND:
fprintf(stderr, "SPLATT: extension for '%s' not recognized. "
"Defaulting to ASCII coordinate form.\n", fname);
return SPLATT_FILE_TEXT_COORD;
}
/******************************************************************************
* PRIVATE FUNCTIONS
*****************************************************************************/
static sptensor_t * p_tt_read_file(
FILE * fin)
{
char * ptr = NULL;
/* first count nnz in tensor */
idx_t nnz = 0;
idx_t nmodes = 0;
idx_t dims[MAX_NMODES];
idx_t offsets[MAX_NMODES];
tt_get_dims(fin, &nmodes, &nnz, dims, offsets);
if(nmodes > MAX_NMODES) {
fprintf(stderr, "SPLATT ERROR: maximum %"SPLATT_PF_IDX" modes supported. "
"Found %"SPLATT_PF_IDX". Please recompile with "
"MAX_NMODES=%"SPLATT_PF_IDX".\n",
(idx_t) MAX_NMODES, nmodes, nmodes);
return NULL;
}
/* allocate structures */
sptensor_t * tt = tt_alloc(nnz, nmodes);
memcpy(tt->dims, dims, nmodes * sizeof(*dims));
char * line = NULL;
int64_t read;
size_t len = 0;
/* fill in tensor data */
rewind(fin);
nnz = 0;
while((read = getline(&line, &len, fin)) != -1) {
/* skip empty and commented lines */
if(read > 1 && line[0] != '#') {
ptr = line;
for(idx_t m=0; m < nmodes; ++m) {
tt->ind[m][nnz] = strtoull(ptr, &ptr, 10) - offsets[m];
}
tt->vals[nnz++] = strtod(ptr, &ptr);
}
}
free(line);
return tt;
}
/**
* @brief Write a binary header to an input file.
*
* @param fout The file to write to.
* @param tt The tensor to form a header from.
* @param[out] header The header to write.
*/
static void p_write_tt_binary_header(
FILE * fout,
sptensor_t const * const tt,
bin_header * header)
{
int32_t type = SPLATT_BIN_COORD;
fwrite(&type, sizeof(type), 1, fout);
/* now see if all indices fit in 32bit values */
uint64_t idx = tt->nnz < UINT32_MAX ? sizeof(uint32_t) : sizeof(uint64_t);
for(idx_t m=0; m < tt->nmodes; ++m) {
if(tt->dims[m] > UINT32_MAX) {
idx = sizeof(uint64_t);
break;
}
}
/* now see if every value can exactly be represented as a float */
uint64_t val = sizeof(float);
for(idx_t n=0; n < tt->nnz; ++n) {
float conv = tt->vals[n];
if((splatt_val_t) conv != tt->vals[n]) {
val = sizeof(splatt_val_t);
}
}
header->magic = type;
header->idx_width = idx;
header->val_width = val;
fwrite(&idx, sizeof(idx), 1, fout);
fwrite(&val, sizeof(val), 1, fout);
}
/**
* @brief Read a COORD tensor from a binary file, converting from smaller idx or
* val precision if necessary.
*
* @param fin The file to read from.
*
* @return The parsed tensor.
*/
static sptensor_t * p_tt_read_binary_file(
FILE * fin)
{
bin_header header;
read_binary_header(fin, &header);
idx_t nnz = 0;
idx_t nmodes = 0;
idx_t dims[MAX_NMODES];
fill_binary_idx(&nmodes, 1, &header, fin);
fill_binary_idx(dims, nmodes, &header, fin);
fill_binary_idx(&nnz, 1, &header, fin);
if(nmodes > MAX_NMODES) {
fprintf(stderr, "SPLATT ERROR: maximum %"SPLATT_PF_IDX" modes supported. "
"Found %"SPLATT_PF_IDX". Please recompile with "
"MAX_NMODES=%"SPLATT_PF_IDX".\n",
(idx_t) MAX_NMODES, nmodes, nmodes);
return NULL;
}
/* allocate structures */
sptensor_t * tt = tt_alloc(nnz, nmodes);
memcpy(tt->dims, dims, nmodes * sizeof(*dims));
/* fill in tensor data */
for(idx_t m=0; m < nmodes; ++m) {
fill_binary_idx(tt->ind[m], nnz, &header, fin);
}
fill_binary_val(tt->vals, nnz, &header, fin);
return tt;
}
/******************************************************************************
* API FUNCTIONS
*****************************************************************************/
int splatt_load(
char const * const fname,
splatt_idx_t * nmodes,
splatt_idx_t ** dims,
splatt_idx_t * nnz,
splatt_idx_t *** inds,
splatt_val_t ** vals)
{
sptensor_t * tt = tt_read_file(fname);
if(tt == NULL) {
return SPLATT_ERROR_BADINPUT;
}
*nmodes = tt->nmodes;
*dims = tt->dims;
*nnz = tt->nnz;
*vals = tt->vals;
*inds = tt->ind;
free(tt);
return SPLATT_SUCCESS;
}
/******************************************************************************
* PUBLIC FUNCTIONS
*****************************************************************************/
sptensor_t * tt_read_file(
char const * const fname)
{
FILE * fin;
if((fin = fopen(fname, "r")) == NULL) {
fprintf(stderr, "SPLATT ERROR: failed to open '%s'\n", fname);
return NULL;
}
sptensor_t * tt = NULL;
timer_start(&timers[TIMER_IO]);
switch(get_file_type(fname)) {
case SPLATT_FILE_TEXT_COORD:
tt = p_tt_read_file(fin);
break;
case SPLATT_FILE_BIN_COORD:
tt = p_tt_read_binary_file(fin);
break;
}
timer_stop(&timers[TIMER_IO]);
fclose(fin);
return tt;
}
sptensor_t * tt_read_binary_file(
char const * const fname)
{
FILE * fin;
if((fin = fopen(fname, "r")) == NULL) {
fprintf(stderr, "SPLATT ERROR: failed to open '%s'\n", fname);
return NULL;
}
timer_start(&timers[TIMER_IO]);
sptensor_t * tt = p_tt_read_binary_file(fin);
timer_stop(&timers[TIMER_IO]);
fclose(fin);
return tt;
}
void tt_get_dims(
FILE * fin,
idx_t * const outnmodes,
idx_t * const outnnz,
idx_t * outdims,
idx_t * offset)
{
char * ptr = NULL;
idx_t nnz = 0;
char * line = NULL;
ssize_t read;
size_t len = 0;
/* first count modes in tensor */
idx_t nmodes = 0;
while((read = getline(&line, &len, fin)) != -1) {
if(read > 1 && line[0] != '#') {
/* get nmodes from first nnz line */
ptr = strtok(line, " \t");
while(ptr != NULL) {
++nmodes;
ptr = strtok(NULL, " \t");
}
break;
}
}
--nmodes;
for(idx_t m=0; m < nmodes; ++m) {
outdims[m] = 0;
offset[m] = 1;
}
/* fill in tensor dimensions */
rewind(fin);
while((read = getline(&line, &len, fin)) != -1) {
/* skip empty and commented lines */
if(read > 1 && line[0] != '#') {
ptr = line;
for(idx_t m=0; m < nmodes; ++m) {
idx_t ind = strtoull(ptr, &ptr, 10);
/* outdim is maximum */
outdims[m] = (ind > outdims[m]) ? ind : outdims[m];
/* offset is minimum */
offset[m] = (ind < offset[m]) ? ind : offset[m];
}
/* skip over tensor val */
strtod(ptr, &ptr);
++nnz;
}
}
*outnnz = nnz;
*outnmodes = nmodes;
/* only support 0 or 1 indexing */
for(idx_t m=0; m < nmodes; ++m) {
if(offset[m] != 0 && offset[m] != 1) {
fprintf(stderr, "SPLATT: ERROR tensors must be 0 or 1 indexed. "
"Mode %"SPLATT_PF_IDX" is %"SPLATT_PF_IDX" indexed.\n",
m, offset[m]);
exit(1);
}
}
/* adjust dims when zero-indexing */
for(idx_t m=0; m < nmodes; ++m) {
if(offset[m] == 0) {
++outdims[m];
}
}
rewind(fin);
free(line);
}
void tt_write(
sptensor_t const * const tt,
char const * const fname)
{
FILE * fout;
if(fname == NULL) {
fout = stdout;
} else {
if((fout = fopen(fname,"w")) == NULL) {
fprintf(stderr, "SPLATT ERROR: failed to open '%s'\n.", fname);
return;
}
}
tt_write_file(tt, fout);
if(fname != NULL) {
fclose(fout);
}
}
void tt_write_file(
sptensor_t const * const tt,
FILE * fout)
{
timer_start(&timers[TIMER_IO]);
for(idx_t n=0; n < tt->nnz; ++n) {
for(idx_t m=0; m < tt->nmodes; ++m) {
/* files are 1-indexed instead of 0 */
fprintf(fout, "%"SPLATT_PF_IDX" ", tt->ind[m][n] + 1);
}
fprintf(fout, "%"SPLATT_PF_VAL"\n", tt->vals[n]);
}
timer_stop(&timers[TIMER_IO]);
}
void tt_write_binary(
sptensor_t const * const tt,
char const * const fname)
{
FILE * fout;
if(fname == NULL) {
fout = stdout;
} else {
if((fout = fopen(fname,"w")) == NULL) {
fprintf(stderr, "SPLATT ERROR: failed to open '%s'\n.", fname);
return;
}
}
tt_write_binary_file(tt, fout);
if(fname != NULL) {
fclose(fout);
}
}
void tt_write_binary_file(
sptensor_t const * const tt,
FILE * fout)
{
timer_start(&timers[TIMER_IO]);
bin_header header;
p_write_tt_binary_header(fout, tt, &header);
/* WRITE INDICES */
/* if we are writing to the same precision they are stored in, just fwrite */
if(header.idx_width == sizeof(splatt_idx_t)) {
fwrite(&tt->nmodes, sizeof(tt->nmodes), 1, fout);
fwrite(tt->dims, sizeof(*tt->dims), tt->nmodes, fout);
fwrite(&tt->nnz, sizeof(tt->nnz), 1, fout);
for(idx_t m=0; m < tt->nmodes; ++m) {
fwrite(tt->ind[m], sizeof(*tt->ind[m]), tt->nnz, fout);
}
/* otherwise we convert (downwards) element-wise */
} else if(header.idx_width < sizeof(splatt_idx_t)) {
uint32_t buf = tt->nmodes;
fwrite(&buf, sizeof(buf), 1, fout);
for(idx_t m=0; m < tt->nmodes; ++m) {
buf = tt->dims[m];
fwrite(&buf, sizeof(buf), 1, fout);
}
buf = tt->nnz;
fwrite(&buf, sizeof(buf), 1, fout);
/* write inds */
for(idx_t m=0; m < tt->nmodes; ++m) {
for(idx_t n=0; n < tt->nnz; ++n) {
buf = tt->ind[m][n];
fwrite(&buf, sizeof(buf), 1, fout);
}
}
} else {
/* XXX this should never be reached */
fprintf(stderr, "SPLATT: the impossible happened, "
"idx_width > IDX_TYPEWIDTH.\n");
abort();
}
/* WRITE VALUES */
if(header.val_width == sizeof(splatt_val_t)) {
fwrite(tt->vals, sizeof(*tt->vals), tt->nnz, fout);
/* otherwise we convert (downwards) element-wise */
} else if(header.val_width < sizeof(splatt_val_t)) {
for(idx_t n=0; n < tt->nnz; ++n) {
float buf = tt->vals[n];
fwrite(&buf, sizeof(buf), 1, fout);
}
} else {
/* XXX this should never be reached */
fprintf(stderr, "SPLATT: the impossible happened, "
"val_width > VAL_TYPEWIDTH.\n");
abort();
}
timer_stop(&timers[TIMER_IO]);
}
void read_binary_header(
FILE * fin,
bin_header * header)
{
fread(&(header->magic), sizeof(header->magic), 1, fin);
fread(&(header->idx_width), sizeof(header->idx_width), 1, fin);
fread(&(header->val_width), sizeof(header->val_width), 1, fin);
if(header->idx_width > SPLATT_IDX_TYPEWIDTH / 8) {
fprintf(stderr, "SPLATT: ERROR input has %zu-bit integers. "
"Build with SPLATT_IDX_TYPEWIDTH %zu\n",
header->idx_width * 8, header->idx_width * 8);
exit(EXIT_FAILURE);
}
if(header->val_width > SPLATT_VAL_TYPEWIDTH / 8) {
fprintf(stderr, "SPLATT: WARNING input has %zu-bit floating-point values. "
"Build with SPLATT_VAL_TYPEWIDTH %zu for full precision\n",
header->val_width * 8, header->val_width * 8);
}
}
void fill_binary_idx(
idx_t * const buffer,
idx_t const count,
bin_header const * const header,
FILE * fin)
{
if(header->idx_width == sizeof(splatt_idx_t)) {
fread(buffer, sizeof(idx_t), count, fin);
} else {
/* read in uint32_t in a buffered fashion */
idx_t const BUF_LEN = 1024*1024;
uint32_t * ubuf = splatt_malloc(BUF_LEN * sizeof(*ubuf));
for(idx_t n=0; n < count; n += BUF_LEN) {
idx_t const read_count = SS_MIN(BUF_LEN, count - n);
fread(ubuf, sizeof(*ubuf), read_count, fin);
#pragma omp parallel for schedule(static)
for(idx_t i=0; i < read_count; ++i) {
buffer[n + i] = ubuf[i];
}
}
splatt_free(ubuf);
}
}
void fill_binary_val(
val_t * const buffer,
idx_t const count,
bin_header const * const header,
FILE * fin)
{
if(header->val_width == sizeof(splatt_val_t)) {
fread(buffer, sizeof(val_t), count, fin);
} else {
/* read in float in a buffered fashion */
idx_t const BUF_LEN = 1024*1024;
/* select whichever SPLATT *is not* configured with. */
#if SPLATT_VAL_TYPEWIDTH == 64
float * ubuf = splatt_malloc(BUF_LEN * sizeof(*ubuf));
#else
double * ubuf = splatt_malloc(BUF_LEN * sizeof(*ubuf));
#endif
for(idx_t n=0; n < count; n += BUF_LEN) {
idx_t const read_count = SS_MIN(BUF_LEN, count - n);
fread(ubuf, sizeof(*ubuf), read_count, fin);
#pragma omp parallel for schedule(static)
for(idx_t i=0; i < read_count; ++i) {
buffer[n + i] = ubuf[i];
}
}
splatt_free(ubuf);
}
}
void hgraph_write(
hgraph_t const * const hg,
char const * const fname)
{
FILE * fout;
if(fname == NULL || strcmp(fname, "-") == 0) {
fout = stdout;
} else {
if((fout = fopen(fname,"w")) == NULL) {
fprintf(stderr, "SPLATT ERROR: failed to open '%s'\n.", fname);
return;
}
}
hgraph_write_file(hg, fout);
fclose(fout);
}
void hgraph_write_file(
hgraph_t const * const hg,
FILE * fout)
{
timer_start(&timers[TIMER_IO]);
/* print header */
fprintf(fout, "%"SPLATT_PF_IDX" %"SPLATT_PF_IDX, hg->nhedges, hg->nvtxs);
if(hg->vwts != NULL) {
if(hg->hewts != NULL) {
fprintf(fout, " 11");
} else {
fprintf(fout, " 10");
}
} else if(hg->hewts != NULL) {
fprintf(fout, " 1");
}
fprintf(fout, "\n");
/* print hyperedges */
for(idx_t e=0; e < hg->nhedges; ++e) {
if(hg->hewts != NULL) {
fprintf(fout, "%"SPLATT_PF_IDX" ", hg->hewts[e]);
}
for(idx_t v=hg->eptr[e]; v < hg->eptr[e+1]; ++v) {
fprintf(fout, "%"SPLATT_PF_IDX" ", hg->eind[v]+1);
}
fprintf(fout, "\n");
}
/* print vertex weights */
if(hg->vwts != NULL) {
for(idx_t v=0; v < hg->nvtxs; ++v) {
fprintf(fout, "%"SPLATT_PF_IDX"\n", hg->vwts[v]);
}
}
timer_stop(&timers[TIMER_IO]);
}
void graph_write_file(
splatt_graph const * const graph,
FILE * fout)
{
timer_start(&timers[TIMER_IO]);
/* print header */
fprintf(fout, "%"SPLATT_PF_IDX" %"SPLATT_PF_IDX" 0%d%d", graph->nvtxs,
graph->nedges/2, graph->vwgts != NULL, graph->ewgts != NULL);
/* handle multi-constraint partitioning */
if(graph->nvwgts > 1) {
fprintf(fout, " %"SPLATT_PF_IDX, graph->nvwgts);
}
fprintf(fout, "\n");
/* now write adj list */
for(vtx_t v=0; v < graph->nvtxs; ++v) {
/* vertex weights */
if(graph->vwgts != NULL) {
for(idx_t x=0; x < graph->nvwgts; ++x) {
fprintf(fout, "%"SPLATT_PF_IDX" ", graph->vwgts[x+(v*graph->nvwgts)]);
}
}
for(adj_t e=graph->eptr[v]; e < graph->eptr[v+1]; ++e) {
fprintf(fout, "%"SPLATT_PF_IDX" ", graph->eind[e] + 1);
/* edge weight */
if(graph->ewgts != NULL) {
fprintf(fout, "%"SPLATT_PF_IDX" ", graph->ewgts[e]);
}
}
fprintf(fout, "\n");
}
timer_stop(&timers[TIMER_IO]);
}
void spmat_write(
spmatrix_t const * const mat,
char const * const fname)
{
FILE * fout;
if(fname == NULL || strcmp(fname, "-") == 0) {
fout = stdout;
} else {
if((fout = fopen(fname,"w")) == NULL) {
fprintf(stderr, "SPLATT ERROR: failed to open '%s'\n.", fname);
return;
}
}
spmat_write_file(mat, fout);
if(fout != stdout) {
fclose(fout);
}
}
void spmat_write_file(
spmatrix_t const * const mat,
FILE * fout)
{
timer_start(&timers[TIMER_IO]);
/* write CSR matrix */
for(idx_t i=0; i < mat->I; ++i) {
for(idx_t j=mat->rowptr[i]; j < mat->rowptr[i+1]; ++j) {
fprintf(fout, "%"SPLATT_PF_IDX" %"SPLATT_PF_VAL" ", mat->colind[j], mat->vals[j]);
}
fprintf(fout, "\n");
}
timer_stop(&timers[TIMER_IO]);
}
void mat_write(
matrix_t const * const mat,
char const * const fname)
{
FILE * fout;
if(fname == NULL) {
fout = stdout;
} else {
if((fout = fopen(fname,"w")) == NULL) {
fprintf(stderr, "SPLATT ERROR: failed to open '%s'\n.", fname);
return;
}
}
mat_write_file(mat, fout);
if(fout != stdout) {
fclose(fout);
}
}
void mat_write_file(
matrix_t const * const mat,
FILE * fout)
{
timer_start(&timers[TIMER_IO]);
idx_t const I = mat->I;
idx_t const J = mat->J;
val_t const * const vals = mat->vals;
if(mat->rowmajor) {
for(idx_t i=0; i < mat->I; ++i) {
for(idx_t j=0; j < J; ++j) {
fprintf(fout, "%+0.8le ", vals[j + (i*J)]);
}
fprintf(fout, "\n");
}
} else {
for(idx_t i=0; i < mat->I; ++i) {
for(idx_t j=0; j < J; ++j) {
fprintf(fout, "%+0.8le ", vals[i + (j*I)]);
}
fprintf(fout, "\n");
}
}
timer_stop(&timers[TIMER_IO]);
}
void mat_read(
idx_t I,
idx_t J,
matrix_t *mat,
char const * const fname)
{
FILE * fin;
if(fname == NULL) {
fin = stdin;
} else {
if((fin = fopen(fname,"r")) == NULL) {
fprintf(stderr, "SPLATT ERROR: failed to open '%s'\n.", fname);
return;
}
}
mat_read_file(I, J, mat, fin);
if(fin != stdin) {
fclose(fin);
}
}
void mat_read_file(
idx_t I,
idx_t J,
matrix_t *mat,
FILE * fin)
{
timer_start(&timers[TIMER_IO]);
if(mat->rowmajor) {
for(idx_t i=0; i < I; ++i) {
for(idx_t j=0; j < J; ++j) {
fscanf(fin, "%lf", &(mat->vals[j + (i*J)]));
}
}
} else {
for(idx_t i=0; i < mat->I; ++i) {
for(idx_t j=0; j < J; ++j) {
fscanf(fin, "%lf", &(mat->vals[i + (j*I)]));
}
}
}
timer_stop(&timers[TIMER_IO]);
}
void vec_write(
val_t const * const vec,
idx_t const len,
char const * const fname)
{
FILE * fout;
if(fname == NULL) {
fout = stdout;
} else {
if((fout = fopen(fname,"w")) == NULL) {
fprintf(stderr, "SPLATT ERROR: failed to open '%s'\n.", fname);
return;
}
}
vec_write_file(vec, len, fout);
if(fout != stdout) {
fclose(fout);
}
}
void vec_write_file(
val_t const * const vec,
idx_t const len,
FILE * fout)
{
timer_start(&timers[TIMER_IO]);
for(idx_t i=0; i < len; ++i) {
fprintf(fout, "%le\n", vec[i]);
}
timer_stop(&timers[TIMER_IO]);
}
idx_t * part_read(
char const * const ifname,
idx_t const nvtxs,
idx_t * nparts)
{
FILE * pfile;
if((pfile = fopen(ifname, "r")) == NULL) {
fprintf(stderr, "SPLATT ERROR: unable to open '%s'\n", ifname);
return NULL;
}
*nparts = 0;
idx_t ret;
idx_t * arr = (idx_t *) splatt_malloc(nvtxs * sizeof(idx_t));
for(idx_t i=0; i < nvtxs; ++i) {
if((ret = fscanf(pfile, "%"SPLATT_PF_IDX, &(arr[i]))) == 0) {
fprintf(stderr, "SPLATT ERROR: not enough elements in '%s'\n", ifname);
free(arr);
return NULL;
}
if(arr[i] > *nparts) {
*nparts = arr[i];
}
}
fclose(pfile);
/* increment to adjust for 0-indexing of partition ids */
*nparts += 1;
return arr;
}
/******************************************************************************
* PERMUTATION FUNCTIONS
*****************************************************************************/
void perm_write(
idx_t * perm,
idx_t const dim,
char const * const fname)
{
FILE * fout;
if(fname == NULL) {
fout = stdout;
} else {
if((fout = fopen(fname,"w")) == NULL) {
fprintf(stderr, "SPLATT ERROR: failed to open '%s'\n.", fname);
return;
}
}
perm_write_file(perm, dim, fout);
if(fname != NULL) {
fclose(fout);
}
}
void perm_write_file(
idx_t * perm,
idx_t const dim,
FILE * fout)
{
for(idx_t i=0; i < dim; ++i) {
fprintf(fout, "%"SPLATT_PF_IDX"\n", perm[i]);
}
}
|
ContaminationEstimator.h | /*The MIT License (MIT)
Copyright (c) 2017 Fan Zhang, Hyun Min Kang
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
/* Contact: Fan Zhang <fanzhang@umich.edu> */
#ifndef CONTAMINATIONESTIMATOR_H_
#define CONTAMINATIONESTIMATOR_H_
#include <string>
#include <unordered_map>
//#include <tkDecls.h>
#include "MathVector.h"
#include "MathGenMin.h"
#include "SimplePileupViewer.h"
#include <limits>
#ifdef _OPENMP
#include "omp.h"
#endif
class ContaminationEstimator {
public:
bool isPCFixed;
bool isAlphaFixed;
bool isAFknown;
bool isHeter;
bool isPileupInput;
bool isSanityCheckDisabled;
bool verbose;
int numPC;
int numThread;
int seed;
double epsilon;
#define PCtype double
//#define PHRED(x) pow(10.0,x/-10.0)
static double Phred(double x) {
return pow(10.0, x / -10.0);
}
class FullLLKFunc : public VectorFunc {
public:
double min_af;
double max_af;
double llk1;
double llk0;
ContaminationEstimator *ptr;
std::vector<double> fixPC;
std::vector<double> fixPC2;
double fixAlpha;
std::vector<double> globalPC;//best result holder
std::vector<double> globalPC2;//best result holder
double globalAlpha;//best result holder
const char *Base;
FullLLKFunc() {
FullLLKFunc::Base = "actg";
min_af = 0.00005;
max_af = 0.99995;
llk1 = 0;
ptr = nullptr;
fixAlpha = 0;
std::cerr << "Initialize from FullLLKFunc()" << std::endl;
}
FullLLKFunc(int dim, ContaminationEstimator *contPtr) : fixPC(dim, 0.), fixPC2(dim, 0.), globalPC(fixPC),
globalPC2(fixPC2) {
FullLLKFunc::Base = "actg";
min_af = 0.00005;
max_af = 0.99995;
llk1 = 0.;
ptr = contPtr;
fixAlpha = 0.;
globalAlpha = 0.;
std::cerr << "Initialize from FullLLKFunc(int dim, ContaminationEstimator* contPtr)" << std::endl;
}
~FullLLKFunc() {};
inline static double InvLogit(double &x) {
double e = exp(x);
return e / (1. + e);
};
inline static double Logit(double &x) {
return log(x / (1. - x));
};
inline int Normalize(std::vector<double> &tPC) {
for (int i = 0; i < tPC.size(); ++i) {
tPC[i] = (tPC[i] - ptr->muv[i]) / ptr->sdv[i];
}
return 0;
};
inline int InvNormalize(std::vector<double> &tPC) {
for (int i = 0; i < tPC.size(); ++i) {
tPC[i] = tPC[i] * ptr->sdv[i] + ptr->muv[i];
}
return 0;
};
inline char findAlt(std::vector<char> &tmpBase) {
int a[4];
int maxIndex(-1);
for (int i = 0; i < tmpBase.size(); ++i) {
if (tmpBase[i] == '.' || tmpBase[i] == ',') continue;
if (tmpBase[i] == 'A' || tmpBase[i] == 'a') a[0]++;
else if (tmpBase[i] == 'C' || tmpBase[i] == 'c') a[1]++;
else if (tmpBase[i] == 'T' || tmpBase[i] == 't') a[2]++;
else if (tmpBase[i] == 'G' || tmpBase[i] == 'g') a[3]++;
maxIndex = 0;
}
if (maxIndex == -1) return 0;
for (int j = 0; j < 4; ++j) {
if (a[j] > a[maxIndex]) maxIndex = j;
}
return Base[maxIndex];
}
inline double getConditionalBaseLK(char base, int genotype, char altBase, bool is_error) {
if (!is_error) {
if (genotype == 0) {
if (base == '.' || base == ',') {
return 1;
} else
return 0;
} else if (genotype == 1) {
if (base == '.' || base == ',') {
return 0.5;
} else if (toupper(base) == toupper(altBase)) {
return 0.5;
} else
return 0;
} else if (genotype == 2) {
if (toupper(base) == toupper(altBase)) {
return 1;
} else
return 0;
} else {
std::cerr << "genotype error!" << std::endl;
exit(EXIT_FAILURE);
}
} else {
if (genotype == 0) {
if (base == '.' || base == ',') {
return 0;
} else if (toupper(base) == toupper(altBase)) {
return 1. / 3.;
} else
return 2. / 3.;
} else if (genotype == 1) {
if (base == '.' || base == ',') {
return 1. / 6.;
} else if (toupper(base) == toupper(altBase)) {
return 1. / 6.;
} else
return 2. / 3.;
} else if (genotype == 2) {
if (base == '.' || base == ',') {
return 1. / 3.;
}
if (toupper(base) == toupper(altBase)) {
return 0;
} else
return 2. / 3.;
} else {
std::cerr << "genotype error!" << std::endl;
exit(EXIT_FAILURE);
}
}
}
void InitialGF(double AF, double *GF) const {
if (AF < min_af) AF = min_af;
if (AF > max_af) AF = max_af;
GF[0] = (1 - AF) * (1 - AF);
GF[1] = 2 * (AF) * (1 - AF);
GF[2] = AF * AF;
}
inline double
ComputeMixLLKs(const std::vector<double> &tPC1, const std::vector<double> &tPC2, const double alpha) {
double sumLLK(0);
#ifdef _OPENMP
omp_set_num_threads(ptr->numThread);
#pragma omp parallel for reduction (+:sumLLK)
#endif
for (size_t i = 0; i < ptr->NumMarker; ++i) {
std::string chr = ptr->PosVec[i].first;
int pos = ptr->PosVec[i].second;
if (ptr->viewer.posIndex.find(chr) == ptr->viewer.posIndex.end()) {
continue;
} else if (ptr->viewer.posIndex[chr].find(pos) == ptr->viewer.posIndex[chr].end()) {
continue;
}
std::vector<char> tmpBase = ptr->viewer.GetBaseInfoAt(chr, pos);
std::vector<char> tmpQual = ptr->viewer.GetQualInfoAt(chr, pos);
if (tmpBase.size() == 0) continue;
if (not ptr->isSanityCheckDisabled and
(tmpBase.size() < (ptr->viewer.avgDepth - 3 * ptr->viewer.sdDepth) or
tmpBase.size() > (ptr->viewer.avgDepth + 3 * ptr->viewer.sdDepth)))
continue;
if (ptr->isAFknown) {
ptr->AFs[i] = ptr->AF2s[i] = ptr->knownAF[chr][pos];
} else {
ptr->AFs[i] = 0.;
for (int k = 0; k < tPC1.size(); ++k) {
ptr->AFs[i] += ptr->UD[i][k] * tPC1[k];
}
ptr->AFs[i] += ptr->means[i];
ptr->AFs[i] /= 2.0;
ptr->AF2s[i] = 0.;
for (int k = 0; k < tPC2.size(); ++k) {
ptr->AF2s[i] += ptr->UD[i][k] * tPC2[k];
}
ptr->AF2s[i] += ptr->means[i];
ptr->AF2s[i] /= 2.0;
}
double markerLK(0);
double GF[3];
double GF2[3];
InitialGF(ptr->AFs[i], GF);
InitialGF(ptr->AF2s[i], GF2);
char altBase = ptr->ChooseBed[chr][pos].second;
for (int geno1 = 0; geno1 < 3; ++geno1)
for (int geno2 = 0; geno2 < 3; ++geno2) {
double baseLK(0);
for (int j = 0; j < tmpBase.size(); ++j) {
baseLK += log((alpha * getConditionalBaseLK(tmpBase[j], geno1, altBase, 1) +
(1. - alpha) * getConditionalBaseLK(tmpBase[j], geno2, altBase, 1)) *
Phred(tmpQual[j] - 33)
+ (alpha * getConditionalBaseLK(tmpBase[j], geno1, altBase, 0) +
(1. - alpha) * getConditionalBaseLK(tmpBase[j], geno2, altBase, 0)) *
(1 - Phred(tmpQual[j] - 33)));
// std::cerr <<i<<"th marker\t"<<tmpBase[j]<<"\t"<<tmpQual[j]<<"\t"<<altBase<<"\tlocalAlpha:"<<localAlpha<<"\tgeno1:"<<geno1<<"\tgeno2:"<<geno2
// <<"\tgetConditionalBaseLK1:"<<getConditionalBaseLK(tmpBase[j], geno1, altBase, 1)<<"\t"<< getConditionalBaseLK(tmpBase[j], geno2, altBase, 1)<<"\tPhred:"<<Phred(tmpQual[j] - 33)
// <<"\tgetConditionalBaseLK0:"<<getConditionalBaseLK(tmpBase[j], geno1, altBase, 0)<<"\t"<<getConditionalBaseLK(tmpBase[j], geno2, altBase, 0)<< std::endl;
}
markerLK += exp(baseLK) * GF[geno1] * GF2[geno2];
}
if (markerLK > 0)
sumLLK += log(markerLK);
}
return sumLLK;
}
int Initialize() {
globalPC = fixPC = globalPC2 = fixPC2 = ptr->PC[1];//only intended smaple has pre defined PCs
globalAlpha = fixAlpha = ptr->alpha;
llk1 = (0 - ComputeMixLLKs(fixPC, fixPC2, fixAlpha));
for (int k = 0; k < ptr->numPC; ++k) {
//ptr->PC[0][k] = static_cast <double> (rand()) / static_cast <double> (RAND_MAX);
ptr->PC[0][k] = 0.01;
}
for (int k = 0; k < ptr->numPC; ++k) {
//ptr->PC[1][k] = static_cast <double> (rand()) / static_cast <double> (RAND_MAX);
ptr->PC[1][k] = 0.01;
}
//ptr->alpha = fabs(static_cast <double> (rand()) / static_cast <double> (RAND_MAX));
ptr->alpha = 0.03;
return 0;
}
int CalculateLLK0() {
llk0 = (0 - ComputeMixLLKs(globalPC, globalPC, 0));
return 0;
}
virtual double Evaluate(Vector &v) {
double smLLK = 0;
if (!ptr->isHeter) {
if (ptr->isPCFixed) {
double tmpAlpha = InvLogit(v[0]);
smLLK = 0 - ComputeMixLLKs(fixPC, fixPC2, tmpAlpha);
if (smLLK < llk1) {
llk1 = smLLK;
globalAlpha = tmpAlpha;
}
} else if (ptr->isAlphaFixed) {
std::vector<double> tmpPC(ptr->numPC, 0.);
for (int i = 0; i < ptr->numPC; ++i) {
tmpPC[i] = v[i];
}
smLLK = 0 - ComputeMixLLKs(tmpPC, tmpPC, fixAlpha);
if (smLLK < llk1) {
llk1 = smLLK;
globalPC = tmpPC;
globalPC2 = tmpPC;
}
} else {
std::vector<double> tmpPC(ptr->numPC, 0.);
for (int i = 0; i < ptr->numPC; ++i) {
tmpPC[i] = v[i];
}
double tmpAlpha = InvLogit(v[ptr->numPC]);
smLLK = 0 - ComputeMixLLKs(tmpPC, tmpPC, tmpAlpha);
if (smLLK < llk1) {
llk1 = smLLK;
globalPC = tmpPC;
globalPC2 = tmpPC;
globalAlpha = tmpAlpha;
}
}
} else//contamination source from different population
{
if (ptr->isPCFixed) {//only fixed for intended sample
std::vector<double> tmpPC(ptr->numPC, 0.);
for (int i = 0; i < ptr->numPC; ++i) {
tmpPC[i] = v[i];
}
double tmpAlpha = InvLogit(v[ptr->numPC]);
smLLK = 0 - ComputeMixLLKs(tmpPC, fixPC2, tmpAlpha);
if (smLLK < llk1) {
llk1 = smLLK;
globalPC = tmpPC;
globalAlpha = tmpAlpha;
}
} else if (ptr->isAlphaFixed) {
std::vector<double> tmpPC(ptr->numPC, 0.);
std::vector<double> tmpPC2(ptr->numPC, 0.);
for (int k = 0; k < v.Length(); ++k) {
if (k < ptr->numPC)
tmpPC[k] = v[k];
else if (k < ptr->numPC * 2)
tmpPC2[k - (ptr->numPC)] = v[k];
else {
error("Simplex Vector dimension error!");
exit(EXIT_FAILURE);
}
}
smLLK = 0 - ComputeMixLLKs(tmpPC, tmpPC2, fixAlpha);
if (smLLK < llk1) {
llk1 = smLLK;
globalPC = tmpPC;
globalPC2 = tmpPC2;
}
} else {
std::vector<double> tmpPC(ptr->numPC, 0.);
std::vector<double> tmpPC2(ptr->numPC, 0.);
double tmpAlpha(0.);
for (int k = 0; k < v.Length(); ++k) {
if (k < ptr->numPC)
tmpPC[k] = v[k];
else if (k < ptr->numPC * 2)
tmpPC2[k - (ptr->numPC)] = v[k];
else if (k == ptr->numPC * 2)
tmpAlpha = InvLogit(v[k]);
else {
error("Simplex Vector dimension error!");
exit(EXIT_FAILURE);
}
}
smLLK = (0 - ComputeMixLLKs(tmpPC, tmpPC2, tmpAlpha));
if (smLLK < llk1) {
llk1 = smLLK;
globalPC = tmpPC;
globalPC2 = tmpPC2;
globalAlpha = tmpAlpha;
}
}
}
if (ptr->verbose)
std::cerr << "globalPC:" << globalPC[0] << "\tglobalPC:" << globalPC[1]
<< "\tglobalPC2:" << globalPC2[0] << "\tglobalPC2:" << globalPC2[1]
<< "\tglobalAlpha:" << globalAlpha << "\tllk:" << llk1 << std::endl;
return smLLK;
}
};
SimplePileupViewer viewer;
uint32_t NumMarker;
FullLLKFunc fn;
std::unordered_map<std::string, std::unordered_map<uint32_t, double> > knownAF;
double alpha;//input alpha
std::vector<std::vector<PCtype> > UD;//input UD
std::vector<std::vector<PCtype> > PC;//input PC
std::vector<PCtype> means;
////
std::vector<PCtype> muv;
std::vector<PCtype> sdv;
////
std::vector<double> AFs;
std::vector<double> AF2s;
typedef std::unordered_map<std::string, std::unordered_map<int, std::pair<char, char> > > BED;
BED ChooseBed;//pos is 1-based
std::vector<region_t> BedVec;//serialized BED info, convenient for bam reading
std::vector<std::pair<std::string, int> > PosVec;
ContaminationEstimator();
ContaminationEstimator(int nPC, const char *bedFile, int nThread, double ep);
/*Initialize from existed UD*/
/*This assumes the markers are the same as the selected vcf*/
/*ContaminationEstimator(const std::string &UDpath, const std::string &PCpath, const std::string &Mean,
const std::string &pileup, const std::string &GLpath, const std::string &Bed);
*/
int ReadMatrixUD(const std::string &path);
int ReadMatrixPC(const std::string &path);
/*Intersect marker sites*/
/*
int ReadMatrixGL(const std::string &path);
*/
int ReadChooseBed(const std::string &path);
int ReadMean(const std::string &path);
int ReadAF(const std::string &path);
int ReadBam(const char *bamFile, const char *faiFile, const char *bedFile);
int ReadPileup(const std::string &pileupFile);
bool IsSanityCheckOK();
/*
int CheckMarkerSetConsistency();
int FormatMarkerIntersection();
*/
/*Optimize*/
int OptimizeLLK(const std::string &OutputPrefix);
~ContaminationEstimator();
/*
int RunFromVCF(const std::string VcfSiteAFFile, const std::string CurrentMPU, const std::string ReadGroup,
const std::string Prefix);
int RunFromSVDMatrix(const std::string UDpath, const std::string PCpath, const std::string Mean,
const std::string &MPUpath, const std::string &Bed, const std::string &Prefix,
const std::string &ReadGroup);
*/
int ReadSVDMatrix(const std::string &UDpath, const std::string &PCpath, const std::string &Mean);
/*
int FromBamtoPileup();
*/
bool OptimizeHomoFixedPC(AmoebaMinimizer &myMinimizer);
bool OptimizeHomoFixedAlpha(AmoebaMinimizer &myMinimizer);
bool OptimizeHomo(AmoebaMinimizer &myMinimizer);
bool OptimizeHeterFixedPC(AmoebaMinimizer &myMinimizer);
bool OptimizeHeterFixedAlpha(AmoebaMinimizer &myMinimizer);
bool OptimizeHeter(AmoebaMinimizer &myMinimizer);
};
#endif /* CONTAMINATIONESTIMATOR_H_ */
|
resize.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% RRRR EEEEE SSSSS IIIII ZZZZZ EEEEE %
% R R E SS I ZZ E %
% RRRR EEE SSS I ZZZ EEE %
% R R E SS I ZZ E %
% R R EEEEE SSSSS IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Image Resize Methods %
% %
% Software Design %
% John Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2009 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/blob.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/draw.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/memory_.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel.h"
#include "magick/option.h"
#include "magick/resample.h"
#include "magick/resize.h"
#include "magick/resize-private.h"
#include "magick/string_.h"
#include "magick/utility.h"
#include "magick/version.h"
#if defined(MAGICKCORE_LQR_DELEGATE)
#include <lqr.h>
#endif
/*
Typedef declarations.
*/
struct _ResizeFilter
{
MagickRealType
(*filter)(const MagickRealType,const ResizeFilter *),
(*window)(const MagickRealType,const ResizeFilter *),
support, /* filter region of support - the filter support limit */
window_support, /* window support, usally equal to support (expert only) */
scale, /* dimension to scale to fit window support (usally 1.0) */
blur, /* x-scale (blur-sharpen) */
cubic[8]; /* cubic coefficents for smooth Cubic filters */
unsigned long
signature;
};
/*
Forward declaractions.
*/
static MagickRealType
I0(MagickRealType x),
BesselOrderOne(MagickRealType);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F i l t e r F u n c t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% These are the various filter and windowing functions that are provided,
%
% They are all internal to this module only. See AcquireResizeFilterInfo()
% for details of the access to these functions, via the
% GetResizeFilterSupport() and GetResizeFilterWeight() API interface.
%
% The individual filter functions have this format...
%
% static MagickRealtype *FilterName(const MagickRealType x,
% const MagickRealType support)
%
% o x: the distance from the sampling point
% generally in the range of 0 to support
% The GetResizeFilterWeight() ensures this a positive value.
%
% o resize_filter: Current Filter Information
% This allows function to access support, and posibly other
% pre-calculated information defineding the functions.
%
*/
static MagickRealType Bessel(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
See Pratt "Digital Image Processing" p.97 for Bessel functions
This function actually a X-scaled Jinc(x) function.
http://mathworld.wolfram.com/JincFunction.html
And on page 11 of...
http://www.ph.ed.ac.uk/%7ewjh/teaching/mo/slides/lens/lens.pdf
*/
if (x == 0.0)
return((MagickRealType) (MagickPI/4.0));
return(BesselOrderOne(MagickPI*x)/(2.0*x));
}
static MagickRealType Blackman(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Blackman: 2rd Order cosine windowing function.
*/
return(0.42+0.5*cos(MagickPI*(double) x)+0.08*cos(2.0*MagickPI*(double) x));
}
static MagickRealType Bohman(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Bohman: 2rd Order cosine windowing function.
*/
return((1-x)*cos(MagickPI*(double) x)+sin(MagickPI*(double) x)/MagickPI);
}
static MagickRealType Box(const MagickRealType magick_unused(x),
const ResizeFilter *magick_unused(resize_filter))
{
/*
Just return 1.0, filter will still be clipped by its support window.
*/
return(1.0);
}
static MagickRealType CubicBC(const MagickRealType x,
const ResizeFilter *resize_filter)
{
/*
Cubic Filters using B,C determined values:
Mitchell-Netravali B=1/3 C=1/3 Qualitively ideal Cubic Filter
Catmull-Rom B= 0 C=1/2 Cublic Interpolation Function
Cubic B-Spline B= 1 C= 0 Spline Approximation of Gaussian
Hermite B= 0 C= 0 Quadratic Spline (support = 1)
See paper by Mitchell and Netravali,
Reconstruction Filters in Computer Graphics
Computer Graphics, Volume 22, Number 4, August 1988
http://www.cs.utexas.edu/users/fussell/courses/cs384g/
lectures/mitchell/Mitchell.pdf
Coefficents are determined from B,C values
P0 = ( 6 - 2*B )/6
P1 = 0
P2 = (-18 +12*B + 6*C )/6
P3 = ( 12 - 9*B - 6*C )/6
Q0 = ( 8*B +24*C )/6
Q1 = ( -12*B -48*C )/6
Q2 = ( 6*B +30*C )/6
Q3 = ( - 1*B - 6*C )/6
Which is used to define the filter...
P0 + P1*x + P2*x^2 + P3*x^3 0 <= x < 1
Q0 + Q1*x + Q2*x^2 + Q3*x^3 1 <= x <= 2
Which ensures function is continuous in value and derivative (slope).
*/
if (x < 1.0)
return(resize_filter->cubic[0]+x*(resize_filter->cubic[1]+x*
(resize_filter->cubic[2]+x*resize_filter->cubic[3])));
if (x < 2.0)
return(resize_filter->cubic[4] +x*(resize_filter->cubic[5]+x*
(resize_filter->cubic[6] +x*resize_filter->cubic[7])));
return(0.0);
}
static MagickRealType Gaussian(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
return(exp((double) (-2.0*x*x))*sqrt(2.0/MagickPI));
}
static MagickRealType Hanning(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
A Cosine windowing function.
*/
return(0.5+0.5*cos(MagickPI*(double) x));
}
static MagickRealType Hamming(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
A offset Cosine windowing function.
*/
return(0.54+0.46*cos(MagickPI*(double) x));
}
static MagickRealType Kaiser(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
#define Alpha 6.5
#define I0A (1.0/I0(Alpha))
/*
Kaiser Windowing Function (bessel windowing):
Alpha is a free value from 5 to 8 (currently hardcoded to 6.5)
Future: make alphand the IOA pre-calculation, a 'expert' setting.
*/
return(I0A*I0(Alpha*sqrt((double) (1.0-x*x))));
}
static MagickRealType Lagrange(const MagickRealType x,
const ResizeFilter *resize_filter)
{
long
n,
order;
MagickRealType
value;
register long
i;
/*
Lagrange Piece-Wise polynomial fit of Sinc:
N is the 'order' of the lagrange function and depends on
the overall support window size of the filter. That is for
a support of 2, gives a lagrange-4 or piece-wise cubic functions
Note that n is the specific piece of the piece-wise function to calculate.
See Survey: Interpolation Methods, IEEE Transactions on Medical Imaging,
Vol 18, No 11, November 1999, p1049-1075, -- Equation 27 on p1064
*/
if (x > resize_filter->support)
return(0.0);
order=(long) (2.0*resize_filter->window_support); /* number of pieces */
n=(long) ((1.0*order)/2.0+x); /* which piece does x belong to */
value=1.0f;
for (i=0; i < order; i++)
if (i != n)
value*=(n-i-x)/(n-i);
return(value);
}
static MagickRealType Quadratic(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
2rd order (quadratic) B-Spline approximation of Gaussian.
*/
if (x < 0.5)
return(0.75-x*x);
if (x < 1.5)
return(0.5*(x-1.5)*(x-1.5));
return(0.0);
}
static MagickRealType Sinc(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
This function actually a X-scaled Sinc(x) function.
*/
if (x == 0.0)
return(1.0);
return(sin(MagickPI*(double) x)/(MagickPI*(double) x));
}
static MagickRealType Triangle(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
1rd order (linear) B-Spline, bilinear interpolation,
Tent 1D filter, or a Bartlett 2D Cone filter
*/
if (x < 1.0)
return(1.0-x);
return(0.0);
}
static MagickRealType Welsh(const MagickRealType x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Welsh parabolic windowing filter.
*/
if (x < 1.0)
return(1.0-x*x);
return(0.0);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e R e s i z e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireResizeFilter() allocates the ResizeFilter structure. Choose from
% these filters:
%
% FIR (Finite impulse Response) Filters
% Box Triangle Quadratic
% Cubic Hermite Catrom
% Mitchell
%
% IIR (Infinite impulse Response) Filters
% Gaussian Sinc Bessel
%
% Windowed Sinc/Bessel Method
% Blackman Hanning Hamming
% Kaiser Lancos (Sinc)
%
% FIR filters are used as is, and are limited by that filters support window
% (unless over-ridden). 'Gaussian' while classed as an IIR filter, is also
% simply clipped by its support size (1.5).
%
% Requesting a windowed filter will return either a windowed Sinc, for a one
% dimentional orthogonal filtering method, such as ResizeImage(), or a
% windowed Bessel for image operations requiring a two dimentional
% cylindrical filtering method, such a DistortImage(). Which function is
% is used set by the "cylindrical" boolean argument.
%
% Directly requesting 'Sinc' or 'Bessel' will force the use of that filter
% function, with a default 'Blackman' windowing method. This not however
% recommended as it removes the correct filter selection for different
% filtering image operations. Selecting a window filtering method is better.
%
% Lanczos is purely special case of a Sinc windowed Sinc, but defulting to
% a 3 lobe support, rather that the default 4 lobe support.
%
% Special options can be used to override specific, or all the filter
% settings. However doing so is not advisible unless you have expert
% knowledge of the use of resampling filtered techniques. Extreme caution is
% advised.
%
% "filter:filter" Select this function as the filter.
% If a "filter:window" operation is not provided, then no windowing
% will be performed on the selected filter, (support clipped)
%
% This can be used to force the use of a windowing method as filter,
% request a 'Sinc' filter in a radially filtered operation, or the
% 'Bessel' filter for a othogonal filtered operation.
%
% "filter:window" Select this windowing function for the filter.
% While any filter could be used as a windowing function,
% using that filters first lobe over the whole support window,
% using a non-windowing method is not advisible.
%
% "filter:lobes" Number of lobes to use for the Sinc/Bessel filter.
% This a simper method of setting filter support size that will
% correctly handle the Sinc/Bessel switch for an operators filtering
% requirements.
%
% "filter:support" Set the support size for filtering to the size given
% This not recomented for Sinc/Bessel windowed filters, but is
% used for simple filters like FIR filters, and the Gaussian Filter.
% This will override any 'filter:lobes' option.
%
% "filter:blur" Scale the filter and support window by this amount.
% A value >1 will generally result in a more burred image with
% more ringing effects, while a value <1 will sharpen the
% resulting image with more aliasing and Morie effects.
%
% "filter:win-support" Scale windowing function to this size instead.
% This causes the windowing (or self-windowing Lagrange filter)
% to act is if the support winodw it much much larger than what
% is actually supplied to the calling operator. The filter however
% is still clipped to the real support size given. If unset this
% will equal the normal filter support size.
%
% "filter:b"
% "filter:c" Override the preset B,C values for a Cubic type of filter
% If only one of these are given it is assumes to be a 'Keys'
% type of filter such that B+2C=1, where Keys 'alpha' value = C
%
% "filter:verbose" Output verbose plotting data for graphing the
% resulting filter over the whole support range (with blur effect).
%
% Set a true un-windowed Sinc filter with 10 lobes (very slow)
% -set option:filter:filter Sinc
% -set option:filter:lobes 8
%
% For example force an 8 lobe Lanczos (Sinc or Bessel) filter...
% -filter Lanczos
% -set option:filter:lobes 8
%
% The format of the AcquireResizeFilter method is:
%
% ResizeFilter *AcquireResizeFilter(const Image *image,
% const FilterTypes filter_type, const MagickBooleanType radial,
% ExceptionInfo *exception)
%
% o image: the image.
%
% o filter: the filter type, defining a preset filter, window and support.
%
% o blur: blur the filter by this amount, use 1.0 if unknown.
% Image artifact "filter:blur" will override this old usage
%
% o radial: 1D orthogonal filter (Sinc) or 2D radial filter (Bessel)
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ResizeFilter *AcquireResizeFilter(const Image *image,
const FilterTypes filter, const MagickRealType blur,
const MagickBooleanType cylindrical,ExceptionInfo *exception)
{
const char
*artifact;
FilterTypes
filter_type,
window_type;
long
filter_artifact;
MagickRealType
B,
C;
register ResizeFilter
*resize_filter;
/*
Table Mapping given Filter, into Weighting and Windowing functions.
A 'Box' windowing function means its a simble non-windowed filter.
A 'Sinc' filter function (must be windowed) could be upgraded to a
'Bessel' filter if a "cylindrical" filter is requested, unless a "Sinc"
filter specifically request.
*/
static struct
{
FilterTypes
filter,
window;
} const mapping[SentinelFilter] =
{
{ UndefinedFilter, BoxFilter }, /* undefined */
{ PointFilter, BoxFilter }, /* special, nearest-neighbour filter */
{ BoxFilter, BoxFilter }, /* Box averaging Filter */
{ TriangleFilter, BoxFilter }, /* Linear Interpolation Filter */
{ HermiteFilter, BoxFilter }, /* Hermite interpolation filter */
{ SincFilter, HanningFilter }, /* Hanning -- Cosine-Sinc */
{ SincFilter, HammingFilter }, /* Hamming -- '' variation */
{ SincFilter, BlackmanFilter }, /* Blackman -- 2*Cosine-Sinc */
{ GaussianFilter, BoxFilter }, /* Gaussain Blurring filter */
{ QuadraticFilter, BoxFilter }, /* Quadratic Gaussian approximation */
{ CubicFilter, BoxFilter }, /* Cubic Gaussian approximation */
{ CatromFilter, BoxFilter }, /* Cubic Interpolator */
{ MitchellFilter, BoxFilter }, /* 'ideal' Cubic Filter */
{ LanczosFilter, SincFilter }, /* Special, 3 lobed Sinc-Sinc */
{ BesselFilter, BlackmanFilter }, /* 3 lobed bessel -specific request */
{ SincFilter, BlackmanFilter }, /* 4 lobed sinc - specific request */
{ SincFilter, KaiserFilter }, /* Kaiser -- SqRoot-Sinc */
{ SincFilter, WelshFilter }, /* Welsh -- Parabolic-Sinc */
{ SincFilter, CubicFilter }, /* Parzen -- Cubic-Sinc */
{ LagrangeFilter, BoxFilter }, /* Lagrange self-windowing filter */
{ SincFilter, BohmanFilter }, /* Bohman -- 2*Cosine-Sinc */
{ SincFilter, TriangleFilter } /* Bartlett -- Triangle-Sinc */
};
/*
Table maping the filter/window function from the above table to the actual
filter/window function call to use. The default support size for that
filter as a weighting function, and the point to scale when that function
is used as a windowing function (typ 1.0).
*/
static struct
{
MagickRealType
(*function)(const MagickRealType, const ResizeFilter*),
support, /* default support size for function as a filter */
scale, /* size windowing function, for scaling windowing function */
B,
C; /* Cubic Filter factors for a CubicBC function, else ignored */
} const filters[SentinelFilter] =
{
{ Box, 0.0f, 0.5f, 0.0f, 0.0f }, /* Undefined */
{ Box, 0.0f, 0.5f, 0.0f, 0.0f }, /* Point */
{ Box, 0.5f, 0.5f, 0.0f, 0.0f }, /* Box */
{ Triangle, 1.0f, 1.0f, 0.0f, 0.0f }, /* Triangle */
{ CubicBC, 1.0f, 1.0f, 0.0f, 0.0f }, /* Hermite, Cubic B=C=0 */
{ Hanning, 1.0f, 1.0f, 0.0f, 0.0f }, /* Hanning, Cosine window */
{ Hamming, 1.0f, 1.0f, 0.0f, 0.0f }, /* Hamming, '' variation */
{ Blackman, 1.0f, 1.0f, 0.0f, 0.0f }, /* Blackman, 2*cos window */
{ Gaussian, 1.5f, 1.5f, 0.0f, 0.0f }, /* Gaussian */
{ Quadratic, 1.5f, 1.5f, 0.0f, 0.0f }, /* Quadratic Gaussian */
{ CubicBC, 2.0f, 2.0f, 1.0f, 0.0f }, /* B-Spline of Gaussian B=1 C=0 */
{ CubicBC, 2.0f, 1.0f, 0.0f, 0.5f }, /* Catmull-Rom B=0 C=1/2 */
{ CubicBC, 2.0f, 1.0f, 1.0f/3.0f, 1.0f/3.0f }, /* Mitchel B=C=1/3 */
{ Sinc, 3.0f, 1.0f, 0.0f, 0.0f }, /* Lanczos, 3 lobed Sinc-Sinc */
{ Bessel, 3.2383f,1.2197f,.0f,.0f }, /* 3 lobed Blackman-Bessel */
{ Sinc, 4.0f, 1.0f, 0.0f, 0.0f }, /* 4 lobed Blackman-Sinc */
{ Kaiser, 1.0f, 1.0f, 0.0f, 0.0f }, /* Kaiser, sq-root windowing */
{ Welsh, 1.0f, 1.0f, 0.0f, 0.0f }, /* Welsh, Parabolic windowing */
{ CubicBC, 2.0f, 2.0f, 1.0f, 0.0f }, /* Parzen, B-Spline windowing */
{ Lagrange, 2.0f, 1.0f, 0.0f, 0.0f }, /* Lagrangian Filter */
{ Bohman, 1.0f, 1.0f, 0.0f, 0.0f }, /* Bohman, 2*Cosine windowing */
{ Triangle, 1.0f, 1.0f, 0.0f, 0.0f } /* Bartlett, Triangle windowing */
};
/*
The known zero crossings of the Bessel() or the Jinc(x*PI) function
Found by using
http://cose.math.bas.bg/webMathematica/webComputing/BesselZeros.jsp
for Jv-function with v=1, then dividing X-roots by PI (tabled below)
*/
static MagickRealType
bessel_zeros[16] =
{
1.21966989126651f,
2.23313059438153f,
3.23831548416624f,
4.24106286379607f,
5.24276437687019f,
6.24392168986449f,
7.24475986871996f,
8.24539491395205f,
9.24589268494948f,
10.2462933487549f,
11.2466227948779f,
12.2468984611381f,
13.2471325221811f,
14.2473337358069f,
15.2475085630373f,
16.247661874701f
};
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(UndefinedFilter < filter && filter < SentinelFilter);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
resize_filter=(ResizeFilter *) AcquireMagickMemory(sizeof(*resize_filter));
if (resize_filter == (ResizeFilter *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
/* defaults for the requested filter */
filter_type = mapping[filter].filter;
window_type = mapping[filter].window;
/* Filter blur -- scaling both filter and support window */
resize_filter->blur = blur;
artifact=GetImageArtifact(image,"filter:blur");
if (artifact != (const char *) NULL)
resize_filter->blur = atof(artifact);
if ( resize_filter->blur < MagickEpsilon )
resize_filter->blur = (MagickRealType) MagickEpsilon;
/* Modifications for Cylindrical filter use */
if ( cylindrical != MagickFalse && filter != SincFilter ) {
/* promote 1D Sinc Filter to a 2D Bessel filter */
if ( filter_type == SincFilter )
filter_type = BesselFilter;
/* Prompote Lanczos (Sinc-Sinc) to Lanczos (Bessel-Bessel) */
else if ( filter_type == LanczosFilter ) {
filter_type = BesselFilter;
window_type = BesselFilter;
}
/* Blur other filters appropriatally correct cylindrical usage */
else if ( filter_type == GaussianFilter )
/* Gaussian is scaled by 4*ln(2) and not 4*sqrt(2/MagickPI)
- according to Paul Heckbert's paper on EWA resampling */
resize_filter->blur *= 2.0*log(2.0)/sqrt(2.0/MagickPI);
else if ( filter_type != BesselFilter )
/* filters with a 1.0 zero root crossing by the first bessel_zero */
resize_filter->blur *= bessel_zeros[0];
}
/* Override Filter Selection */
artifact=GetImageArtifact(image,"filter:filter");
if (artifact != (const char *) NULL) {
/* raw filter request - no window function */
filter_artifact=ParseMagickOption(MagickFilterOptions,
MagickFalse,artifact);
if ( UndefinedFilter < filter_artifact &&
filter_artifact < SentinelFilter ) {
filter_type = (FilterTypes) filter_artifact;
window_type = BoxFilter;
}
/* Lanczos is nor a real filter but a self windowing Sinc/Bessel */
if ( filter_artifact == LanczosFilter ) {
filter_type = (cylindrical!=MagickFalse) ? BesselFilter : LanczosFilter;
window_type = (cylindrical!=MagickFalse) ? BesselFilter : SincFilter;
}
/* Filter overwide with a specific window function? */
artifact=GetImageArtifact(image,"filter:window");
if (artifact != (const char *) NULL) {
filter_artifact=ParseMagickOption(MagickFilterOptions,
MagickFalse,artifact);
if ( UndefinedFilter < filter_artifact &&
filter_artifact < SentinelFilter ) {
if ( filter_artifact != LanczosFilter )
window_type = (FilterTypes) filter_artifact;
else
window_type = (cylindrical!=MagickFalse) ? BesselFilter : SincFilter;
}
}
}
else {
/* window specified, but no filter function? Assume Sinc/Bessel */
artifact=GetImageArtifact(image,"filter:window");
if (artifact != (const char *) NULL) {
filter_artifact=ParseMagickOption(MagickFilterOptions,MagickFalse,
artifact);
if ( UndefinedFilter < filter_artifact &&
filter_artifact < SentinelFilter ) {
filter_type = (cylindrical!=MagickFalse) ? BesselFilter : SincFilter;
if ( filter_artifact != LanczosFilter )
window_type = (FilterTypes) filter_artifact;
else
window_type = filter_type;
}
}
}
resize_filter->filter = filters[filter_type].function;
resize_filter->support = filters[filter_type].support;
resize_filter->window = filters[window_type].function;
resize_filter->scale = filters[window_type].scale;
resize_filter->signature=MagickSignature;
/* Filter support overrides */
artifact=GetImageArtifact(image,"filter:lobes");
if (artifact != (const char *) NULL) {
long lobes = atol(artifact);
if ( lobes < 1 ) lobes = 1;
resize_filter->support = (MagickRealType) lobes;
if ( filter_type == BesselFilter ) {
if ( lobes > 16 ) lobes = 16;
resize_filter->support = bessel_zeros[lobes-1];
}
}
artifact=GetImageArtifact(image,"filter:support");
if (artifact != (const char *) NULL)
resize_filter->support = fabs(atof(artifact));
/* Scale windowing function separatally to the support 'clipping' window
that calling operator is planning to actually use. - Expert Use Only
*/
resize_filter->window_support = resize_filter->support;
artifact=GetImageArtifact(image,"filter:win-support");
if (artifact != (const char *) NULL)
resize_filter->window_support = fabs(atof(artifact));
/* Set Cubic Spline B,C values, calculate Cubic coefficents */
B=0.0;
C=0.0;
if ( filters[filter_type].function == CubicBC
|| filters[window_type].function == CubicBC ) {
if ( filters[filter_type].function == CubicBC ) {
B=filters[filter_type].B;
C=filters[filter_type].C;
}
else if ( filters[window_type].function == CubicBC ) {
B=filters[window_type].B;
C=filters[window_type].C;
}
artifact=GetImageArtifact(image,"filter:b");
if (artifact != (const char *) NULL) {
B=atof(artifact);
C=1.0-2.0*B; /* Calculate C as if it is a Keys cubic filter */
artifact=GetImageArtifact(image,"filter:c");
if (artifact != (const char *) NULL)
C=atof(artifact);
}
else {
artifact=GetImageArtifact(image,"filter:c");
if (artifact != (const char *) NULL) {
C=atof(artifact);
B=(1.0-C)/2.0; /* Calculate B as if it is a Keys cubic filter */
}
}
/* Convert B,C values into Cubic Coefficents - See CubicBC() */
resize_filter->cubic[0]=( 6.0 -2.0*B )/6.0;
resize_filter->cubic[1]=0.0;
resize_filter->cubic[2]=(-18.0+12.0*B+ 6.0*C)/6.0;
resize_filter->cubic[3]=( 12.0- 9.0*B- 6.0*C)/6.0;
resize_filter->cubic[4]=( 8.0*B+24.0*C)/6.0;
resize_filter->cubic[5]=( -12.0*B-48.0*C)/6.0;
resize_filter->cubic[6]=( 6.0*B+30.0*C)/6.0;
resize_filter->cubic[7]=( - 1.0*B- 6.0*C)/6.0;
}
artifact=GetImageArtifact(image,"filter:verbose");
if (artifact != (const char *) NULL)
{
double
support,
x;
/*
Output filter graph -- for graphing filter result.
*/
support=GetResizeFilterSupport(resize_filter);
(void) printf("# support = %lg\n",support);
for (x=0.0; x <= support; x+=0.01f)
(void) printf("%5.2lf\t%lf\n",x,GetResizeFilterWeight(resize_filter,x));
(void) printf("%5.2lf\t%lf\n",support,0.0);
}
return(resize_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveResizeImage() adaptively resize image with pixel resampling.
%
% The format of the AdaptiveResizeImage method is:
%
% Image *AdaptiveResizeImage(const Image *image,
% const unsigned long columns,const unsigned long rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveResizeImage(const Image *image,
const unsigned long columns,const unsigned long rows,ExceptionInfo *exception)
{
#define AdaptiveResizeImageTag "Resize/Image"
Image
*resize_image;
long
y;
MagickBooleanType
proceed;
MagickPixelPacket
pixel;
PointInfo
offset;
register IndexPacket
*resize_indexes;
register long
x;
register PixelPacket
*q;
ResampleFilter
*resample_filter;
ViewInfo
*resize_view;
/*
Adaptively resize image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((columns == 0) || (rows == 0))
return((Image *) NULL);
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
resize_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (resize_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(resize_image,DirectClass) == MagickFalse)
{
InheritException(exception,&resize_image->exception);
resize_image=DestroyImage(resize_image);
return((Image *) NULL);
}
GetMagickPixelPacket(image,&pixel);
resample_filter=AcquireResampleFilter(image,exception);
if (image->interpolate == UndefinedInterpolatePixel)
(void) SetResampleFilterInterpolateMethod(resample_filter,
MeshInterpolatePixel);
resize_view=AcquireCacheView(resize_image);
for (y=0; y < (long) resize_image->rows; y++)
{
q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
resize_indexes=GetCacheViewAuthenticIndexQueue(resize_view);
offset.y=((MagickRealType) y*image->rows/resize_image->rows);
for (x=0; x < (long) resize_image->columns; x++)
{
offset.x=((MagickRealType) x*image->columns/resize_image->columns);
(void) ResamplePixelColor(resample_filter,offset.x-0.5,offset.y-0.5,
&pixel);
SetPixelPacket(resize_image,&pixel,q,resize_indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
break;
proceed=SetImageProgress(image,AdaptiveResizeImageTag,y,image->rows);
if (proceed == MagickFalse)
break;
}
resample_filter=DestroyResampleFilter(resample_filter);
resize_view=DestroyCacheView(resize_view);
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ B e s s e l O r d e r O n e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BesselOrderOne() computes the Bessel function of x of the first kind of
% order 0:
%
% Reduce x to |x| since j1(x)= -j1(-x), and for x in (0,8]
%
% j1(x) = x*j1(x);
%
% For x in (8,inf)
%
% j1(x) = sqrt(2/(pi*x))*(p1(x)*cos(x1)-q1(x)*sin(x1))
%
% where x1 = x-3*pi/4. Compute sin(x1) and cos(x1) as follow:
%
% cos(x1) = cos(x)cos(3pi/4)+sin(x)sin(3pi/4)
% = 1/sqrt(2) * (sin(x) - cos(x))
% sin(x1) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4)
% = -1/sqrt(2) * (sin(x) + cos(x))
%
% The format of the BesselOrderOne method is:
%
% MagickRealType BesselOrderOne(MagickRealType x)
%
% A description of each parameter follows:
%
% o x: MagickRealType value.
%
*/
#undef I0
static MagickRealType I0(MagickRealType x)
{
MagickRealType
sum,
t,
y;
register long
i;
/*
Zeroth order Bessel function of the first kind.
*/
sum=1.0;
y=x*x/4.0;
t=y;
for (i=2; t > MagickEpsilon; i++)
{
sum+=t;
t*=y/((MagickRealType) i*i);
}
return(sum);
}
#undef J1
static MagickRealType J1(MagickRealType x)
{
MagickRealType
p,
q;
register long
i;
static const double
Pone[] =
{
0.581199354001606143928050809e+21,
-0.6672106568924916298020941484e+20,
0.2316433580634002297931815435e+19,
-0.3588817569910106050743641413e+17,
0.2908795263834775409737601689e+15,
-0.1322983480332126453125473247e+13,
0.3413234182301700539091292655e+10,
-0.4695753530642995859767162166e+7,
0.270112271089232341485679099e+4
},
Qone[] =
{
0.11623987080032122878585294e+22,
0.1185770712190320999837113348e+20,
0.6092061398917521746105196863e+17,
0.2081661221307607351240184229e+15,
0.5243710262167649715406728642e+12,
0.1013863514358673989967045588e+10,
0.1501793594998585505921097578e+7,
0.1606931573481487801970916749e+4,
0.1e+1
};
p=Pone[8];
q=Qone[8];
for (i=7; i >= 0; i--)
{
p=p*x*x+Pone[i];
q=q*x*x+Qone[i];
}
return(p/q);
}
#undef P1
static MagickRealType P1(MagickRealType x)
{
MagickRealType
p,
q;
register long
i;
static const double
Pone[] =
{
0.352246649133679798341724373e+5,
0.62758845247161281269005675e+5,
0.313539631109159574238669888e+5,
0.49854832060594338434500455e+4,
0.2111529182853962382105718e+3,
0.12571716929145341558495e+1
},
Qone[] =
{
0.352246649133679798068390431e+5,
0.626943469593560511888833731e+5,
0.312404063819041039923015703e+5,
0.4930396490181088979386097e+4,
0.2030775189134759322293574e+3,
0.1e+1
};
p=Pone[5];
q=Qone[5];
for (i=4; i >= 0; i--)
{
p=p*(8.0/x)*(8.0/x)+Pone[i];
q=q*(8.0/x)*(8.0/x)+Qone[i];
}
return(p/q);
}
#undef Q1
static MagickRealType Q1(MagickRealType x)
{
MagickRealType
p,
q;
register long
i;
static const double
Pone[] =
{
0.3511751914303552822533318e+3,
0.7210391804904475039280863e+3,
0.4259873011654442389886993e+3,
0.831898957673850827325226e+2,
0.45681716295512267064405e+1,
0.3532840052740123642735e-1
},
Qone[] =
{
0.74917374171809127714519505e+4,
0.154141773392650970499848051e+5,
0.91522317015169922705904727e+4,
0.18111867005523513506724158e+4,
0.1038187585462133728776636e+3,
0.1e+1
};
p=Pone[5];
q=Qone[5];
for (i=4; i >= 0; i--)
{
p=p*(8.0/x)*(8.0/x)+Pone[i];
q=q*(8.0/x)*(8.0/x)+Qone[i];
}
return(p/q);
}
static MagickRealType BesselOrderOne(MagickRealType x)
{
MagickRealType
p,
q;
if (x == 0.0)
return(0.0);
p=x;
if (x < 0.0)
x=(-x);
if (x < 8.0)
return(p*J1(x));
q=sqrt((double) (2.0/(MagickPI*x)))*(P1(x)*(1.0/sqrt(2.0)*(sin((double) x)-
cos((double) x)))-8.0/x*Q1(x)*(-1.0/sqrt(2.0)*(sin((double) x)+
cos((double) x))));
if (p < 0.0)
q=(-q);
return(q);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y R e s i z e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyResizeFilter() destroy the resize filter.
%
% The format of the AcquireResizeFilter method is:
%
% ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter)
%
% A description of each parameter follows:
%
% o resize_filter: the resize filter.
%
*/
MagickExport ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickSignature);
resize_filter->signature=(~MagickSignature);
resize_filter=(ResizeFilter *) RelinquishMagickMemory(resize_filter);
return(resize_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t R e s i z e F i l t e r S u p p o r t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetResizeFilterSupport() return the current support window size for this
% filter. Note that this may have been enlarged by filter:blur factor.
%
% The format of the GetResizeFilterSupport method is:
%
% MagickRealType GetResizeFilterSupport(const ResizeFilter *resize_filter)
%
% A description of each parameter follows:
%
% o filter: Image filter to use.
%
*/
MagickExport MagickRealType GetResizeFilterSupport(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickSignature);
return(resize_filter->support*resize_filter->blur);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t R e s i z e F i l t e r W e i g h t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetResizeFilterWeight evaluates the specified resize filter at the point x
% which usally lies between zero and the filters current 'support' and
% returns the weight of the filter function at that point.
%
% The format of the GetResizeFilterWeight method is:
%
% MagickRealType GetResizeFilterWeight(const ResizeFilter *resize_filter,
% const MagickRealType x)
%
% A description of each parameter follows:
%
% o filter: the filter type.
%
% o x: the point.
%
*/
MagickExport MagickRealType GetResizeFilterWeight(
const ResizeFilter *resize_filter,const MagickRealType x)
{
MagickRealType
blur,
scale;
/*
Windowing function - scale the weighting filter by this amount.
*/
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickSignature);
blur=fabs(x)/resize_filter->blur; /* X position with blur scaling */
if ((resize_filter->window_support < MagickEpsilon) ||
(resize_filter->window == Box))
scale=1.0; /* Point/Box Filter -- avoid division by zero */
else
{
scale=resize_filter->scale/resize_filter->window_support;
scale=resize_filter->window(blur*scale,resize_filter);
}
/*
Weighting for the filter at this position.
*/
return(scale*resize_filter->filter(blur,resize_filter));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g n i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagnifyImage() is a convenience method that scales an image proportionally
% to twice its size.
%
% The format of the MagnifyImage method is:
%
% Image *MagnifyImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MagnifyImage(const Image *image,ExceptionInfo *exception)
{
Image
*magnify_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
magnify_image=ResizeImage(image,2*image->columns,2*image->rows,CubicFilter,
1.0,exception);
return(magnify_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M i n i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MinifyImage() is a convenience method that scales an image proportionally
% to half its size.
%
% The format of the MinifyImage method is:
%
% Image *MinifyImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MinifyImage(const Image *image,ExceptionInfo *exception)
{
Image
*minify_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
minify_image=ResizeImage(image,image->columns/2,image->rows/2,CubicFilter,
1.0,exception);
return(minify_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s a m p l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResampleImage() resize image in terms of its pixel size, so that when
% displayed at the given resolution it will be the same size in terms of
% real world units as the original image at the original resolution.
%
% The format of the ResampleImage method is:
%
% Image *ResampleImage(Image *image,const double x_resolution,
% const double y_resolution,const FilterTypes filter,const double blur,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be resized to fit the given resolution.
%
% o x_resolution: the new image x resolution.
%
% o y_resolution: the new image y resolution.
%
% o filter: Image filter to use.
%
% o blur: the blur factor where > 1 is blurry, < 1 is sharp.
%
*/
MagickExport Image *ResampleImage(const Image *image,const double x_resolution,
const double y_resolution,const FilterTypes filter,const double blur,
ExceptionInfo *exception)
{
#define ResampleImageTag "Resample/Image"
Image
*resample_image;
unsigned long
height,
width;
/*
Initialize sampled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
width=(unsigned long) (x_resolution*image->columns/
(image->x_resolution == 0.0 ? 72.0 : image->x_resolution)+0.5);
height=(unsigned long) (y_resolution*image->rows/
(image->y_resolution == 0.0 ? 72.0 : image->y_resolution)+0.5);
resample_image=ResizeImage(image,width,height,filter,blur,exception);
if (resample_image != (Image *) NULL)
{
resample_image->x_resolution=x_resolution;
resample_image->y_resolution=y_resolution;
}
return(resample_image);
}
#if defined(MAGICKCORE_LQR_DELEGATE)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i q u i d R e s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LiquidRescaleImage() rescales image with seam carving.
%
% The format of the LiquidRescaleImage method is:
%
% Image *LiquidRescaleImage(const Image *image,
% const unsigned long columns,const unsigned long rows,
% const double delta_x,const double rigidity,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the rescaled image.
%
% o rows: the number of rows in the rescaled image.
%
% o delta_x: maximum seam transversal step (0 means straight seams).
%
% o rigidity: introduce a bias for non-straight seams (typically 0).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *LiquidRescaleImage(const Image *image,
const unsigned long columns,const unsigned long rows,
const double delta_x,const double rigidity,ExceptionInfo *exception)
{
#define LiquidRescaleImageTag "Rescale/Image"
const char
*map;
guchar
*packet;
Image
*rescale_image;
int
x,
y;
LqrCarver
*carver;
LqrRetVal
lqr_status;
MagickBooleanType
status;
MagickPixelPacket
pixel;
register IndexPacket
*rescale_indexes;
register PixelPacket
*q;
unsigned char
*pixels;
/*
Liquid rescale image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((columns == 0) || (rows == 0))
return((Image *) NULL);
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
if ((columns <= 2) || (rows <= 2))
return(ZoomImage(image,columns,rows,exception));
if ((columns >= (2*image->columns)) || (rows >= (2*image->rows)))
{
Image
*resize_image;
unsigned long
height,
width;
/*
Honor liquid resize size limitations.
*/
for (width=image->columns; columns >= (2*width-1); width*=2);
for (height=image->rows; rows >= (2*height-1); height*=2);
resize_image=ResizeImage(image,width,height,image->filter,image->blur,
exception);
if (resize_image == (Image *) NULL)
return((Image *) NULL);
rescale_image=LiquidRescaleImage(resize_image,columns,rows,delta_x,
rigidity,exception);
resize_image=DestroyImage(resize_image);
return(rescale_image);
}
map="RGB";
if (image->matte == MagickFalse)
map="RGBA";
if (image->colorspace == CMYKColorspace)
{
map="CMYK";
if (image->matte == MagickFalse)
map="CMYKA";
}
pixels=(unsigned char *) AcquireQuantumMemory(image->columns,image->rows*
strlen(map)*sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
return((Image *) NULL);
status=ExportImagePixels(image,0,0,image->columns,image->rows,map,CharPixel,
pixels,exception);
if (status == MagickFalse)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
carver=lqr_carver_new(pixels,image->columns,image->rows,strlen(map));
if (carver == (LqrCarver *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
lqr_status=lqr_carver_init(carver,(int) delta_x,rigidity);
lqr_status=lqr_carver_resize(carver,columns,rows);
rescale_image=CloneImage(image,lqr_carver_get_width(carver),
lqr_carver_get_height(carver),MagickTrue,exception);
if (rescale_image == (Image *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return((Image *) NULL);
}
if (SetImageStorageClass(rescale_image,DirectClass) == MagickFalse)
{
InheritException(exception,&rescale_image->exception);
rescale_image=DestroyImage(rescale_image);
return((Image *) NULL);
}
GetMagickPixelPacket(rescale_image,&pixel);
(void) lqr_carver_scan_reset(carver);
while (lqr_carver_scan(carver,&x,&y,&packet) != 0)
{
q=QueueAuthenticPixels(rescale_image,x,y,1,1,exception);
if (q == (PixelPacket *) NULL)
break;
rescale_indexes=GetAuthenticIndexQueue(rescale_image);
pixel.red=QuantumRange*(packet[0]/255.0);
pixel.green=QuantumRange*(packet[1]/255.0);
pixel.blue=QuantumRange*(packet[2]/255.0);
if (image->colorspace != CMYKColorspace)
{
if (image->matte == MagickFalse)
pixel.opacity=QuantumRange*(packet[3]/255.0);
}
else
{
pixel.index=QuantumRange*(packet[3]/255.0);
if (image->matte == MagickFalse)
pixel.opacity=QuantumRange*(packet[4]/255.0);
}
SetPixelPacket(rescale_image,&pixel,q,rescale_indexes);
if (SyncAuthenticPixels(rescale_image,exception) == MagickFalse)
break;
}
/*
Relinquish resources.
*/
lqr_carver_destroy(carver);
return(rescale_image);
}
#else
MagickExport Image *LiquidRescaleImage(const Image *image,
const unsigned long magick_unused(columns),
const unsigned long magick_unused(rows),const double magick_unused(delta_x),
const double magick_unused(rigidity),ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
(void) ThrowMagickException(exception,GetMagickModule(),MissingDelegateError,
"DelegateLibrarySupportNotBuiltIn","`%s' (LQR)",image->filename);
return((Image *) NULL);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResizeImage() scales an image to the desired dimensions, using
% the given filter (see AcquireFilterInfo() ).
%
% If an undefined filter is given the filter defaults to Mitchell for a
% colormapped image, a image with a matte channel, or if the image is
% enlarged. Otherwise the filter defaults to a Lanczos.
%
% ResizeImage() was inspired by Paul Heckbert's "zoom" program.
%
% The format of the ResizeImage method is:
%
% Image *ResizeImage(Image *image,const unsigned long columns,
% const unsigned long rows,const FilterTypes filter,const double blur,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o filter: Image filter to use.
%
% o blur: the blur factor where > 1 is blurry, < 1 is sharp.
% Typically set this to 1.0.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _ContributionInfo
{
MagickRealType
weight;
long
pixel;
} ContributionInfo;
static ContributionInfo **DestroyContributionThreadSet(
ContributionInfo **contribution)
{
register long
i;
assert(contribution != (ContributionInfo **) NULL);
for (i=0; i < (long) GetPixelCacheMaximumThreads(); i++)
if (contribution[i] != (ContributionInfo *) NULL)
contribution[i]=(ContributionInfo *) RelinquishMagickMemory(
contribution[i]);
return((ContributionInfo **) RelinquishMagickMemory(contribution));
}
static ContributionInfo **AcquireContributionThreadSet(const size_t count)
{
register long
i;
ContributionInfo
**contribution;
unsigned long
number_threads;
number_threads=GetPixelCacheMaximumThreads();
contribution=(ContributionInfo **) AcquireQuantumMemory(number_threads,
sizeof(*contribution));
if (contribution == (ContributionInfo **) NULL)
return((ContributionInfo **) NULL);
(void) ResetMagickMemory(contribution,0,number_threads*sizeof(*contribution));
for (i=0; i < (long) number_threads; i++)
{
contribution[i]=(ContributionInfo *) AcquireQuantumMemory(count,
sizeof(**contribution));
if (contribution[i] == (ContributionInfo *) NULL)
return(DestroyContributionThreadSet(contribution));
}
return(contribution);
}
static inline double MagickMax(const double x,const double y)
{
if (x > y)
return(x);
return(y);
}
static inline double MagickMin(const double x,const double y)
{
if (x < y)
return(x);
return(y);
}
static MagickBooleanType HorizontalFilter(const ResizeFilter *resize_filter,
const Image *image,Image *resize_image,const MagickRealType x_factor,
const MagickSizeType span,MagickOffsetType *quantum,ExceptionInfo *exception)
{
#define ResizeImageTag "Resize/Image"
ClassType
storage_class;
ContributionInfo
**contribution;
long
x;
MagickBooleanType
status;
MagickPixelPacket
zero;
MagickRealType
scale,
support;
ViewInfo
*image_view,
*resize_view;
/*
Apply filter to resize horizontally from image to resize image.
*/
scale=MagickMax(1.0/x_factor,1.0);
support=scale*GetResizeFilterSupport(resize_filter);
storage_class=support > 0.5 ? DirectClass : image->storage_class;
if (SetImageStorageClass(resize_image,storage_class) == MagickFalse)
{
InheritException(exception,&resize_image->exception);
return(MagickFalse);
}
if (support < 0.5)
{
/*
Support too small even for nearest neighbour: reduce to point sampling.
*/
support=(MagickRealType) 0.5;
scale=1.0;
}
contribution=AcquireContributionThreadSet((size_t) (2.0*support+3.0));
if (contribution == (ContributionInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
status=MagickTrue;
scale=1.0/scale;
(void) ResetMagickMemory(&zero,0,sizeof(zero));
image_view=AcquireCacheView(image);
resize_view=AcquireCacheView(resize_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (x=0; x < (long) resize_image->columns; x++)
{
long
id,
n,
start,
stop;
MagickRealType
center,
density;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register IndexPacket
*resize_indexes;
register long
y;
register PixelPacket
*q;
center=(MagickRealType) (x+0.5)/x_factor;
start=(long) (MagickMax(center-support-MagickEpsilon,0.0)+0.5);
stop=(long) (MagickMin(center+support,(double) image->columns)+0.5);
density=0.0;
if (status == MagickFalse)
continue;
id=GetPixelCacheThreadId();
for (n=0; n < (stop-start); n++)
{
contribution[id][n].pixel=start+n;
contribution[id][n].weight=GetResizeFilterWeight(resize_filter,scale*
((MagickRealType) (start+n)-center+0.5));
density+=contribution[id][n].weight;
}
if ((density != 0.0) && (density != 1.0))
{
register long
i;
/*
Normalize.
*/
density=1.0/density;
for (i=0; i < n; i++)
contribution[id][i].weight*=density;
}
p=GetCacheViewVirtualPixels(image_view,contribution[id][0].pixel,0,
(unsigned long) (contribution[id][n-1].pixel-contribution[id][0].pixel+1),
image->rows,exception);
q=QueueCacheViewAuthenticPixels(resize_view,x,0,1,resize_image->rows,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
resize_indexes=GetCacheViewAuthenticIndexQueue(resize_view);
for (y=0; y < (long) resize_image->rows; y++)
{
long
j;
MagickPixelPacket
pixel;
MagickRealType
alpha;
register long
i;
pixel=zero;
if (image->matte == MagickFalse)
{
for (i=0; i < n; i++)
{
j=y*(contribution[id][n-1].pixel-contribution[id][0].pixel+1)+
(contribution[id][i].pixel-contribution[id][0].pixel);
alpha=contribution[id][i].weight;
pixel.red+=alpha*(p+j)->red;
pixel.green+=alpha*(p+j)->green;
pixel.blue+=alpha*(p+j)->blue;
pixel.opacity+=alpha*(p+j)->opacity;
}
q->red=RoundToQuantum(pixel.red);
q->green=RoundToQuantum(pixel.green);
q->blue=RoundToQuantum(pixel.blue);
q->opacity=RoundToQuantum(pixel.opacity);
if ((image->colorspace == CMYKColorspace) &&
(resize_image->colorspace == CMYKColorspace))
{
for (i=0; i < n; i++)
{
j=y*(contribution[id][n-1].pixel-contribution[id][0].pixel+1)+
(contribution[id][i].pixel-contribution[id][0].pixel);
alpha=contribution[id][i].weight;
pixel.index+=alpha*indexes[j];
}
resize_indexes[y]=(IndexPacket) RoundToQuantum(pixel.index);
}
}
else
{
MagickRealType
gamma;
gamma=0.0;
for (i=0; i < n; i++)
{
j=y*(contribution[id][n-1].pixel-contribution[id][0].pixel+1)+
(contribution[id][i].pixel-contribution[id][0].pixel);
alpha=contribution[id][i].weight*QuantumScale*((MagickRealType)
QuantumRange-(p+j)->opacity);
pixel.red+=alpha*(p+j)->red;
pixel.green+=alpha*(p+j)->green;
pixel.blue+=alpha*(p+j)->blue;
pixel.opacity+=contribution[id][i].weight*(p+j)->opacity;
gamma+=alpha;
}
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
q->red=RoundToQuantum(gamma*pixel.red);
q->green=RoundToQuantum(gamma*pixel.green);
q->blue=RoundToQuantum(gamma*pixel.blue);
q->opacity=RoundToQuantum(pixel.opacity);
if ((image->colorspace == CMYKColorspace) &&
(resize_image->colorspace == CMYKColorspace))
{
for (i=0; i < n; i++)
{
j=y*(contribution[id][n-1].pixel-contribution[id][0].pixel+1)+
(contribution[id][i].pixel-contribution[id][0].pixel);
alpha=contribution[id][i].weight*QuantumScale*((MagickRealType)
QuantumRange-(p+j)->opacity);
gamma+=alpha;
}
resize_indexes[y]=(IndexPacket) RoundToQuantum(gamma*pixel.index);
}
}
if ((resize_image->storage_class == PseudoClass) &&
(image->storage_class == PseudoClass))
{
i=(long) (MagickMin(MagickMax(center,(double) start),(double) stop-
1.0)+0.5);
j=y*(contribution[id][n-1].pixel-contribution[id][0].pixel+1)+
(contribution[id][i-start].pixel-contribution[id][0].pixel);
resize_indexes[y]=indexes[j];
}
q++;
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,ResizeImageTag,(*quantum)++,span);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
contribution=DestroyContributionThreadSet(contribution);
return(status);
}
static MagickBooleanType VerticalFilter(const ResizeFilter *resize_filter,
const Image *image,Image *resize_image,const MagickRealType y_factor,
const MagickSizeType span,MagickOffsetType *quantum,ExceptionInfo *exception)
{
ClassType
storage_class;
ContributionInfo
**contribution;
long
y;
MagickBooleanType
status;
MagickPixelPacket
zero;
MagickRealType
scale,
support;
ViewInfo
*image_view,
*resize_view;
/*
Apply filter to resize vertically from image to resize_image.
*/
scale=MagickMax(1.0/y_factor,1.0);
support=scale*GetResizeFilterSupport(resize_filter);
storage_class=support > 0.5 ? DirectClass : image->storage_class;
if (SetImageStorageClass(resize_image,storage_class) == MagickFalse)
{
InheritException(exception,&resize_image->exception);
return(MagickFalse);
}
if (support < 0.5)
{
/*
Support too small even for nearest neighbour: reduce to point sampling.
*/
support=(MagickRealType) 0.5;
scale=1.0;
}
contribution=AcquireContributionThreadSet((size_t) (2.0*support+3.0));
if (contribution == (ContributionInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
status=MagickTrue;
scale=1.0/scale;
(void) ResetMagickMemory(&zero,0,sizeof(zero));
image_view=AcquireCacheView(image);
resize_view=AcquireCacheView(resize_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (long) resize_image->rows; y++)
{
long
id,
n,
start,
stop;
MagickRealType
center,
density;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register IndexPacket
*resize_indexes;
register long
x;
register PixelPacket
*q;
center=(MagickRealType) (y+0.5)/y_factor;
start=(long) (MagickMax(center-support-MagickEpsilon,0.0)+0.5);
stop=(long) (MagickMin(center+support,(double) image->rows)+0.5);
density=0.0;
if (status == MagickFalse)
continue;
id=GetPixelCacheThreadId();
for (n=0; n < (stop-start); n++)
{
contribution[id][n].pixel=start+n;
contribution[id][n].weight=GetResizeFilterWeight(resize_filter,scale*
((MagickRealType) (start+n)-center+0.5));
density+=contribution[id][n].weight;
}
if ((density != 0.0) && (density != 1.0))
{
register long
i;
/*
Normalize.
*/
density=1.0/density;
for (i=0; i < n; i++)
contribution[id][i].weight*=density;
}
p=GetCacheViewVirtualPixels(image_view,0,contribution[id][0].pixel,
image->columns,(unsigned long) (contribution[id][n-1].pixel-
contribution[id][0].pixel+1),exception);
q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,
1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
resize_indexes=GetCacheViewAuthenticIndexQueue(resize_view);
for (x=0; x < (long) resize_image->columns; x++)
{
long
j;
MagickPixelPacket
pixel;
MagickRealType
alpha;
register long
i;
pixel=zero;
if (image->matte == MagickFalse)
{
for (i=0; i < n; i++)
{
j=(long) ((contribution[id][i].pixel-contribution[id][0].pixel)*
image->columns+x);
alpha=contribution[id][i].weight;
pixel.red+=alpha*(p+j)->red;
pixel.green+=alpha*(p+j)->green;
pixel.blue+=alpha*(p+j)->blue;
pixel.opacity+=alpha*(p+j)->opacity;
}
q->red=RoundToQuantum(pixel.red);
q->green=RoundToQuantum(pixel.green);
q->blue=RoundToQuantum(pixel.blue);
q->opacity=RoundToQuantum(pixel.opacity);
if ((image->colorspace == CMYKColorspace) &&
(resize_image->colorspace == CMYKColorspace))
{
for (i=0; i < n; i++)
{
j=(long) ((contribution[id][i].pixel-contribution[id][0].pixel)*
image->columns+x);
alpha=contribution[id][i].weight;
pixel.index+=alpha*indexes[j];
}
resize_indexes[x]=(IndexPacket) RoundToQuantum(pixel.index);
}
}
else
{
MagickRealType
gamma;
gamma=0.0;
for (i=0; i < n; i++)
{
j=(long) ((contribution[id][i].pixel-contribution[id][0].pixel)*
image->columns+x);
alpha=contribution[id][i].weight*QuantumScale*((MagickRealType)
QuantumRange-(p+j)->opacity);
pixel.red+=alpha*(p+j)->red;
pixel.green+=alpha*(p+j)->green;
pixel.blue+=alpha*(p+j)->blue;
pixel.opacity+=contribution[id][i].weight*(p+j)->opacity;
gamma+=alpha;
}
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
q->red=RoundToQuantum(gamma*pixel.red);
q->green=RoundToQuantum(gamma*pixel.green);
q->blue=RoundToQuantum(gamma*pixel.blue);
q->opacity=RoundToQuantum(pixel.opacity);
if ((image->colorspace == CMYKColorspace) &&
(resize_image->colorspace == CMYKColorspace))
{
for (i=0; i < n; i++)
{
j=(long) ((contribution[id][i].pixel-contribution[id][0].pixel)*
image->columns+x);
alpha=contribution[id][i].weight*QuantumScale*((MagickRealType)
QuantumRange-(p+j)->opacity);
pixel.index+=alpha*indexes[j];
}
resize_indexes[x]=(IndexPacket) RoundToQuantum(gamma*pixel.index);
}
}
if ((resize_image->storage_class == PseudoClass) &&
(image->storage_class == PseudoClass))
{
i=(long) (MagickMin(MagickMax(center,(double) start),(double) stop-
1.0)+0.5);
j=(long) ((contribution[id][i-start].pixel-contribution[id][0].pixel)*
image->columns+x);
resize_indexes[x]=indexes[j];
}
q++;
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical
#endif
proceed=SetImageProgress(image,ResizeImageTag,(*quantum)++,span);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
contribution=DestroyContributionThreadSet(contribution);
return(status);
}
MagickExport Image *ResizeImage(const Image *image,const unsigned long columns,
const unsigned long rows,const FilterTypes filter,const double blur,
ExceptionInfo *exception)
{
FilterTypes
filter_type;
Image
*filter_image,
*resize_image;
MagickRealType
x_factor,
y_factor;
MagickSizeType
span;
MagickStatusType
status;
ResizeFilter
*resize_filter;
MagickOffsetType
quantum;
/*
Acquire resize filter.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows) &&
(filter == UndefinedFilter) && (blur == 1.0))
return(CloneImage(image,0,0,MagickTrue,exception));
x_factor=(MagickRealType) columns/(MagickRealType) image->columns;
y_factor=(MagickRealType) rows/(MagickRealType) image->rows;
filter_type=LanczosFilter;
if (filter != UndefinedFilter)
filter_type=filter;
else
if ((x_factor == 1.0) && (y_factor == 1.0))
filter_type=PointFilter;
else
if ((image->storage_class == PseudoClass) ||
(image->matte != MagickFalse) || ((x_factor*y_factor) > 1.0))
filter_type=MitchellFilter;
resize_filter=AcquireResizeFilter(image,filter_type,blur,MagickFalse,
exception);
/*
Resize image.
*/
quantum=0;
if ((columns*((MagickSizeType) image->rows+rows)) >
(rows*((MagickSizeType) image->columns+columns)))
{
filter_image=CloneImage(image,columns,image->rows,MagickTrue,exception);
if (filter_image == (Image *) NULL)
{
resize_filter=DestroyResizeFilter(resize_filter);
return((Image *) NULL);
}
span=(MagickSizeType) (filter_image->columns+rows);
status=HorizontalFilter(resize_filter,image,filter_image,x_factor,span,
&quantum,exception);
resize_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (resize_image != (Image *) NULL)
status|=VerticalFilter(resize_filter,filter_image,resize_image,y_factor,
span,&quantum,exception);
}
else
{
filter_image=CloneImage(image,image->columns,rows,MagickTrue,exception);
if (filter_image == (Image *) NULL)
{
resize_filter=DestroyResizeFilter(resize_filter);
return((Image *) NULL);
}
span=(MagickSizeType) (filter_image->rows+columns);
status=VerticalFilter(resize_filter,image,filter_image,y_factor,span,
&quantum,exception);
resize_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (resize_image != (Image *) NULL)
status|=HorizontalFilter(resize_filter,filter_image,resize_image,
x_factor,span,&quantum,exception);
}
/*
Free resources.
*/
filter_image=DestroyImage(filter_image);
resize_filter=DestroyResizeFilter(resize_filter);
if (status == MagickFalse)
{
if (resize_image != (Image *) NULL)
resize_image=DestroyImage(resize_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
resize_image->type=image->type;
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S a m p l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SampleImage() scales an image to the desired dimensions with pixel
% sampling. Unlike other scaling methods, this method does not introduce
% any additional color into the scaled image.
%
% The format of the SampleImage method is:
%
% Image *SampleImage(const Image *image,const unsigned long columns,
% const unsigned long rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the sampled image.
%
% o rows: the number of rows in the sampled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SampleImage(const Image *image,const unsigned long columns,
const unsigned long rows,ExceptionInfo *exception)
{
#define SampleImageTag "Sample/Image"
Image
*sample_image;
long
j,
*x_offset,
y,
*y_offset;
MagickBooleanType
proceed;
register const IndexPacket
*indexes;
register const PixelPacket
*pixels;
register IndexPacket
*sample_indexes;
register long
x;
register PixelPacket
*sample_pixels;
/*
Initialize sampled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
sample_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (sample_image == (Image *) NULL)
return((Image *) NULL);
/*
Allocate scan line buffer and column offset buffers.
*/
x_offset=(long *) AcquireQuantumMemory((size_t) sample_image->columns,
sizeof(*x_offset));
y_offset=(long *) AcquireQuantumMemory((size_t) sample_image->rows,
sizeof(*y_offset));
if ((x_offset == (long *) NULL) || (y_offset == (long *) NULL))
{
sample_image=DestroyImage(sample_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Initialize pixel offsets.
*/
for (x=0; x < (long) sample_image->columns; x++)
x_offset[x]=(long) (((MagickRealType) x+0.5)*image->columns/
sample_image->columns);
for (y=0; y < (long) sample_image->rows; y++)
y_offset[y]=(long) (((MagickRealType) y+0.5)*image->rows/
sample_image->rows);
/*
Sample each row.
*/
j=(-1);
pixels=GetVirtualPixels(image,0,0,image->columns,1,exception);
indexes=GetVirtualIndexQueue(image);
for (y=0; y < (long) sample_image->rows; y++)
{
sample_pixels=QueueAuthenticPixels(sample_image,0,y,sample_image->columns,1,
exception);
if (sample_pixels == (PixelPacket *) NULL)
break;
sample_indexes=GetAuthenticIndexQueue(sample_image);
if (j != y_offset[y])
{
/*
Read a scan line.
*/
j=y_offset[y];
pixels=GetVirtualPixels(image,0,j,image->columns,1,exception);
if (pixels == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
}
/*
Sample each column.
*/
for (x=0; x < (long) sample_image->columns; x++)
sample_pixels[x]=pixels[x_offset[x]];
if ((image->storage_class == PseudoClass) ||
(image->colorspace == CMYKColorspace))
for (x=0; x < (long) sample_image->columns; x++)
sample_indexes[x]=indexes[x_offset[x]];
if (SyncAuthenticPixels(sample_image,exception) == MagickFalse)
break;
proceed=SetImageProgress(image,SampleImageTag,y,image->rows);
if (proceed == MagickFalse)
break;
}
y_offset=(long *) RelinquishMagickMemory(y_offset);
x_offset=(long *) RelinquishMagickMemory(x_offset);
sample_image->type=image->type;
return(sample_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleImage() changes the size of an image to the given dimensions.
%
% The format of the ScaleImage method is:
%
% Image *ScaleImage(const Image *image,const unsigned long columns,
% const unsigned long rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ScaleImage(const Image *image,const unsigned long columns,
const unsigned long rows,ExceptionInfo *exception)
{
#define ScaleImageTag "Scale/Image"
Image
*scale_image;
long
number_rows,
y;
MagickBooleanType
next_column,
next_row,
proceed;
MagickPixelPacket
pixel,
*scale_scanline,
*scanline,
*x_vector,
*y_vector,
zero;
MagickRealType
alpha,
gamma;
PointInfo
scale,
span;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register IndexPacket
*scale_indexes;
register long
i,
x;
register MagickPixelPacket
*s,
*t;
register PixelPacket
*q;
/*
Initialize scaled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((columns == 0) || (rows == 0))
return((Image *) NULL);
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
scale_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (scale_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(scale_image,DirectClass) == MagickFalse)
{
InheritException(exception,&scale_image->exception);
scale_image=DestroyImage(scale_image);
return((Image *) NULL);
}
/*
Allocate memory.
*/
x_vector=(MagickPixelPacket *) AcquireQuantumMemory((size_t) image->columns,
sizeof(*x_vector));
scanline=x_vector;
if (image->rows != scale_image->rows)
scanline=(MagickPixelPacket *) AcquireQuantumMemory((size_t) image->columns,
sizeof(*scanline));
scale_scanline=(MagickPixelPacket *) AcquireQuantumMemory((size_t)
scale_image->columns,sizeof(*scale_scanline));
y_vector=(MagickPixelPacket *) AcquireQuantumMemory((size_t) image->columns,
sizeof(*y_vector));
if ((scanline == (MagickPixelPacket *) NULL) ||
(scale_scanline == (MagickPixelPacket *) NULL) ||
(x_vector == (MagickPixelPacket *) NULL) ||
(y_vector == (MagickPixelPacket *) NULL))
{
scale_image=DestroyImage(scale_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Scale image.
*/
number_rows=0;
next_row=MagickTrue;
span.y=1.0;
scale.y=(double) scale_image->rows/(double) image->rows;
(void) ResetMagickMemory(y_vector,0,(size_t) image->columns*
sizeof(*y_vector));
GetMagickPixelPacket(image,&pixel);
(void) ResetMagickMemory(&zero,0,sizeof(zero));
i=0;
for (y=0; y < (long) scale_image->rows; y++)
{
q=QueueAuthenticPixels(scale_image,0,y,scale_image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
scale_indexes=GetAuthenticIndexQueue(scale_image);
if (scale_image->rows == image->rows)
{
/*
Read a new scanline.
*/
p=GetVirtualPixels(image,0,i++,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (long) image->columns; x++)
{
x_vector[x].red=(MagickRealType) p->red;
x_vector[x].green=(MagickRealType) p->green;
x_vector[x].blue=(MagickRealType) p->blue;
if (image->matte != MagickFalse)
x_vector[x].opacity=(MagickRealType) p->opacity;
if (indexes != (IndexPacket *) NULL)
x_vector[x].index=(MagickRealType) indexes[x];
p++;
}
}
else
{
/*
Scale Y direction.
*/
while (scale.y < span.y)
{
if ((next_row != MagickFalse) && (number_rows < (long) image->rows))
{
/*
Read a new scanline.
*/
p=GetVirtualPixels(image,0,i++,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (long) image->columns; x++)
{
x_vector[x].red=(MagickRealType) p->red;
x_vector[x].green=(MagickRealType) p->green;
x_vector[x].blue=(MagickRealType) p->blue;
if (image->matte != MagickFalse)
x_vector[x].opacity=(MagickRealType) p->opacity;
if (indexes != (IndexPacket *) NULL)
x_vector[x].index=(MagickRealType) indexes[x];
p++;
}
number_rows++;
}
for (x=0; x < (long) image->columns; x++)
{
y_vector[x].red+=scale.y*x_vector[x].red;
y_vector[x].green+=scale.y*x_vector[x].green;
y_vector[x].blue+=scale.y*x_vector[x].blue;
if (scale_image->matte != MagickFalse)
y_vector[x].opacity+=scale.y*x_vector[x].opacity;
if (scale_indexes != (IndexPacket *) NULL)
y_vector[x].index+=scale.y*x_vector[x].index;
}
span.y-=scale.y;
scale.y=(double) scale_image->rows/(double) image->rows;
next_row=MagickTrue;
}
if ((next_row != MagickFalse) && (number_rows < (long) image->rows))
{
/*
Read a new scanline.
*/
p=GetVirtualPixels(image,0,i++,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (long) image->columns; x++)
{
x_vector[x].red=(MagickRealType) p->red;
x_vector[x].green=(MagickRealType) p->green;
x_vector[x].blue=(MagickRealType) p->blue;
if (image->matte != MagickFalse)
x_vector[x].opacity=(MagickRealType) p->opacity;
if (indexes != (IndexPacket *) NULL)
x_vector[x].index=(MagickRealType) indexes[x];
p++;
}
number_rows++;
next_row=MagickFalse;
}
s=scanline;
for (x=0; x < (long) image->columns; x++)
{
pixel.red=y_vector[x].red+span.y*x_vector[x].red;
pixel.green=y_vector[x].green+span.y*x_vector[x].green;
pixel.blue=y_vector[x].blue+span.y*x_vector[x].blue;
if (image->matte != MagickFalse)
pixel.opacity=y_vector[x].opacity+span.y*x_vector[x].opacity;
if (scale_indexes != (IndexPacket *) NULL)
pixel.index=y_vector[x].index+span.y*x_vector[x].index;
s->red=pixel.red;
s->green=pixel.green;
s->blue=pixel.blue;
if (scale_image->matte != MagickFalse)
s->opacity=pixel.opacity;
if (scale_indexes != (IndexPacket *) NULL)
s->index=pixel.index;
s++;
y_vector[x]=zero;
}
scale.y-=span.y;
if (scale.y <= 0)
{
scale.y=(double) scale_image->rows/(double) image->rows;
next_row=MagickTrue;
}
span.y=1.0;
}
if (scale_image->columns == image->columns)
{
/*
Transfer scanline to scaled image.
*/
s=scanline;
for (x=0; x < (long) scale_image->columns; x++)
{
q->red=RoundToQuantum(s->red);
q->green=RoundToQuantum(s->green);
q->blue=RoundToQuantum(s->blue);
if (scale_image->matte != MagickFalse)
q->opacity=RoundToQuantum(s->opacity);
if (scale_indexes != (IndexPacket *) NULL)
scale_indexes[x]=(IndexPacket) RoundToQuantum(s->index);
q++;
s++;
}
}
else
{
/*
Scale X direction.
*/
pixel=zero;
next_column=MagickFalse;
span.x=1.0;
s=scanline;
t=scale_scanline;
for (x=0; x < (long) image->columns; x++)
{
scale.x=(double) scale_image->columns/(double) image->columns;
while (scale.x >= span.x)
{
if (next_column != MagickFalse)
{
pixel=zero;
t++;
}
pixel.red+=span.x*s->red;
pixel.green+=span.x*s->green;
pixel.blue+=span.x*s->blue;
if (image->matte != MagickFalse)
pixel.opacity+=span.x*s->opacity;
if (scale_indexes != (IndexPacket *) NULL)
pixel.index+=span.x*s->index;
t->red=pixel.red;
t->green=pixel.green;
t->blue=pixel.blue;
if (scale_image->matte != MagickFalse)
t->opacity=pixel.opacity;
if (scale_indexes != (IndexPacket *) NULL)
t->index=pixel.index;
scale.x-=span.x;
span.x=1.0;
next_column=MagickTrue;
}
if (scale.x > 0)
{
if (next_column != MagickFalse)
{
pixel=zero;
next_column=MagickFalse;
t++;
}
pixel.red+=scale.x*s->red;
pixel.green+=scale.x*s->green;
pixel.blue+=scale.x*s->blue;
if (scale_image->matte != MagickFalse)
pixel.opacity+=scale.x*s->opacity;
if (scale_indexes != (IndexPacket *) NULL)
pixel.index+=scale.x*s->index;
span.x-=scale.x;
}
s++;
}
if (span.x > 0)
{
s--;
pixel.red+=span.x*s->red;
pixel.green+=span.x*s->green;
pixel.blue+=span.x*s->blue;
if (scale_image->matte != MagickFalse)
pixel.opacity+=span.x*s->opacity;
if (scale_indexes != (IndexPacket *) NULL)
pixel.index+=span.x*s->index;
}
if ((next_column == MagickFalse) &&
((long) (t-scale_scanline) < (long) scale_image->columns))
{
t->red=pixel.red;
t->green=pixel.green;
t->blue=pixel.blue;
if (scale_image->matte != MagickFalse)
t->opacity=pixel.opacity;
if (scale_indexes != (IndexPacket *) NULL)
t->index=pixel.index;
}
/*
Transfer scanline to scaled image.
*/
t=scale_scanline;
for (x=0; x < (long) scale_image->columns; x++)
{
alpha=1.0;
if (image->matte != MagickFalse)
alpha=(MagickRealType) (QuantumScale*(QuantumRange-t->opacity));
gamma=1.0/(fabs((double) alpha) <= MagickEpsilon ? 1.0 : alpha);
q->red=RoundToQuantum(gamma*t->red);
q->green=RoundToQuantum(gamma*t->green);
q->blue=RoundToQuantum(gamma*t->blue);
if (scale_image->matte != MagickFalse)
q->opacity=RoundToQuantum(t->opacity);
if (scale_indexes != (IndexPacket *) NULL)
scale_indexes[x]=(IndexPacket) RoundToQuantum(gamma*t->index);
t++;
q++;
}
}
if (SyncAuthenticPixels(scale_image,exception) == MagickFalse)
break;
proceed=SetImageProgress(image,ScaleImageTag,y,image->rows);
if (proceed == MagickFalse)
break;
}
/*
Free allocated memory.
*/
y_vector=(MagickPixelPacket *) RelinquishMagickMemory(y_vector);
scale_scanline=(MagickPixelPacket *) RelinquishMagickMemory(scale_scanline);
if (scale_image->rows != image->rows)
scanline=(MagickPixelPacket *) RelinquishMagickMemory(scanline);
x_vector=(MagickPixelPacket *) RelinquishMagickMemory(x_vector);
scale_image->type=image->type;
return(scale_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t R e s i z e F i l t e r S u p p o r t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetResizeFilterSupport() specifies which IR filter to use to window
%
% The format of the SetResizeFilterSupport method is:
%
% void SetResizeFilterSupport(ResizeFilter *resize_filter,
% const MagickRealType support)
%
% A description of each parameter follows:
%
% o resize_filter: the resize filter.
%
% o support: the filter spport radius.
%
*/
MagickExport void SetResizeFilterSupport(ResizeFilter *resize_filter,
const MagickRealType support)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickSignature);
resize_filter->support=support;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T h u m b n a i l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ThumbnailImage() changes the size of an image to the given dimensions and
% removes any associated profiles. The goal is to produce small low cost
% thumbnail images suited for display on the Web.
%
% The format of the ThumbnailImage method is:
%
% Image *ThumbnailImage(const Image *image,const unsigned long columns,
% const unsigned long rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ThumbnailImage(const Image *image,
const unsigned long columns,const unsigned long rows,ExceptionInfo *exception)
{
char
value[MaxTextExtent];
const char
*attribute;
Image
*sample_image,
*thumbnail_image;
MagickRealType
x_factor,
y_factor;
struct stat
attributes;
unsigned long
version;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
x_factor=(MagickRealType) columns/(MagickRealType) image->columns;
y_factor=(MagickRealType) rows/(MagickRealType) image->rows;
if ((x_factor*y_factor) > 0.1)
{
thumbnail_image=ZoomImage(image,columns,rows,exception);
if (thumbnail_image != (Image *) NULL)
(void) StripImage(thumbnail_image);
return(thumbnail_image);
}
sample_image=SampleImage(image,5*columns,5*rows,exception);
if (sample_image == (Image *) NULL)
return((Image *) NULL);
thumbnail_image=ZoomImage(sample_image,columns,rows,exception);
sample_image=DestroyImage(sample_image);
if (thumbnail_image == (Image *) NULL)
return(thumbnail_image);
if (thumbnail_image->matte == MagickFalse)
(void) SetImageAlphaChannel(thumbnail_image,OpaqueAlphaChannel);
thumbnail_image->depth=8;
thumbnail_image->interlace=NoInterlace;
(void) StripImage(thumbnail_image);
(void) CopyMagickString(value,image->magick_filename,MaxTextExtent);
if (strstr(image->magick_filename,"///") == (char *) NULL)
(void) FormatMagickString(value,MaxTextExtent,"file:///%s",
image->magick_filename);
(void) SetImageProperty(thumbnail_image,"Thumb::URI",value);
(void) CopyMagickString(value,image->magick_filename,MaxTextExtent);
if (GetPathAttributes(image->filename,&attributes) != MagickFalse)
{
(void) FormatMagickString(value,MaxTextExtent,"%ld",(long)
attributes.st_mtime);
(void) SetImageProperty(thumbnail_image,"Thumb::MTime",value);
}
(void) FormatMagickString(value,MaxTextExtent,"%ld",(long)
attributes.st_mtime);
(void) FormatMagickSize(GetBlobSize(image),value);
(void) SetImageProperty(thumbnail_image,"Thumb::Size",value);
(void) FormatMagickString(value,MaxTextExtent,"image/%s",image->magick);
LocaleLower(value);
(void) SetImageProperty(thumbnail_image,"Thumb::Mimetype",value);
attribute=GetImageProperty(image,"comment");
if ((attribute != (const char *) NULL) &&
(value != (char *) NULL))
(void) SetImageProperty(thumbnail_image,"description",value);
(void) SetImageProperty(thumbnail_image,"software",
GetMagickVersion(&version));
(void) FormatMagickString(value,MaxTextExtent,"%lu",image->magick_columns);
(void) SetImageProperty(thumbnail_image,"Thumb::Image::Width",value);
(void) FormatMagickString(value,MaxTextExtent,"%lu",image->magick_rows);
(void) SetImageProperty(thumbnail_image,"Thumb::Image::height",value);
(void) FormatMagickString(value,MaxTextExtent,"%lu",
GetImageListLength(image));
(void) SetImageProperty(thumbnail_image,"Thumb::Document::Pages",value);
return(thumbnail_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Z o o m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZoomImage() creates a new image that is a scaled size of an existing one.
% It allocates the memory necessary for the new Image structure and returns a
% pointer to the new image. The Point filter gives fast pixel replication,
% Triangle is equivalent to bi-linear interpolation, and Mitchel giver slower,
% very high-quality results. See Graphic Gems III for details on this
% algorithm.
%
% The filter member of the Image structure specifies which image filter to
% use. Blur specifies the blur factor where > 1 is blurry, < 1 is sharp.
%
% The format of the ZoomImage method is:
%
% Image *ZoomImage(const Image *image,const unsigned long columns,
% const unsigned long rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: An integer that specifies the number of columns in the zoom
% image.
%
% o rows: An integer that specifies the number of rows in the scaled
% image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ZoomImage(const Image *image,const unsigned long columns,
const unsigned long rows,ExceptionInfo *exception)
{
Image
*zoom_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
zoom_image=ResizeImage(image,columns,rows,image->filter,image->blur,
exception);
return(zoom_image);
}
|
GB_unaryop__identity_bool_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_bool_bool
// op(A') function: GB_tran__identity_bool_bool
// C type: bool
// A type: bool
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
bool z = (bool) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_bool_bool
(
bool *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_bool_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
c_jacobi03.c | /* ***********************************************************************
This program is part of the
OpenMP Source Code Repository
http://www.pcg.ull.es/ompscr/
e-mail: ompscr@etsii.ull.es
Copyright (c) 2004, OmpSCR Group
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the University of La Laguna nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
FILE: c_jacobi03.c
VERSION: 1.0
DATE: Oct 2004
AUTHORS: Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998
Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998
This version: Dieter an Mey, Aachen University (RWTH), 1999 - 2003
anmey@rz.rwth-aachen.de
http://www.rwth-aachen.de/People/D.an.Mey.html
COMMENTS TO: ompscr@etsii.ull.es
DESCRIPTION: program to solve a finite difference discretization of Helmholtz equation :
(d2/dx2)u + (d2/dy2)u - alpha u = f using Jacobi iterative method.
COMMENTS: OpenMP version 3: 1 PR outside the iteration loop, 4 Barriers
Directives are used in this code to achieve paralleism.
All do loops are parallized with default 'static' scheduling.
REFERENCES: http://www.rz.rwth-aachen.de/computing/hpc/prog/par/openmp/jacobi.html
BASIC PRAGMAS: parallel for
USAGE: ./c_jacobi03.par 5000 5000 0.8 1.0 1000
INPUT: n - grid dimension in x direction
m - grid dimension in y direction
alpha - Helmholtz constant (always greater than 0.0)
tol - error tolerance for iterative solver
relax - Successice over relaxation parameter
mits - Maximum iterations for iterative solver
OUTPUT: Residual and error
u(n,m) - Dependent variable (solutions)
f(n,m) - Right hand side function
FILE FORMATS: -
RESTRICTIONS: -
REVISION HISTORY:
**************************************************************************/
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include "OmpSCR.h"
#define U(i,j) u[(i)*n+(j)]
#define F(i,j) f[(i)*n+(j)]
#define NUM_ARGS 6
#define NUM_TIMERS 1
int n, m, mits;
double tol, relax, alpha;
void jacobi (int n, int m, double dx, double dy,
double alpha, double omega,
double *u, double *f,
double tol, int maxit );
/******************************************************
* Initializes data
* Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2)
*
******************************************************/
void initialize(
int n,
int m,
double alpha,
double *dx,
double *dy,
double *u,
double *f)
{
int i,j,xx,yy;
*dx = 2.0 / (n-1);
*dy = 2.0 / (m-1);
/* Initilize initial condition and RHS */
for (j=0; j<m; j++){
for (i=0; i<n; i++){
xx = -1.0 + *dx * (i-1);
yy = -1.0 + *dy * (j-1);
U(j,i) = 0.0;
F(j,i) = -alpha * (1.0 - xx*xx) * (1.0 - yy*yy)
- 2.0 * (1.0 - xx*xx) - 2.0 * (1.0 - yy*yy);
}
}
}
/************************************************************
* Checks error between numerical and exact solution
*
************************************************************/
void error_check(
int n,
int m,
double alpha,
double dx,
double dy,
double *u,
double *f)
{
int i,j;
double xx, yy, temp, error;
dx = 2.0 / (n-1);
dy = 2.0 / (n-2);
error = 0.0;
for (j=0; j<m; j++){
for (i=0; i<n; i++){
xx = -1.0 + dx * (i-1);
yy = -1.0 + dy * (j-1);
temp = U(j,i) - (1.0 - xx*xx) * (1.0 - yy*yy);
error += temp*temp;
}
}
error = sqrt(error)/(n*m);
printf("Solution Error : %g\n", error);
}
int main(int argc, char **argv){
double *u, *f, dx, dy;
double dt, mflops;
int NUMTHREADS;
char *PARAM_NAMES[NUM_ARGS] = {"Grid dimension: X dir =", "Grid dimension: Y dir =", "Helmhotlz constant =",
"Successive over-relaxation parameter =",
"error tolerance for iterative solver =", "Maximum iterations for solver ="};
char *TIMERS_NAMES[NUM_TIMERS] = {"Total_time"};
char *DEFAULT_VALUES[NUM_ARGS] = {"5000", "5000", "0.8", "1.0", "1e-7", "1000"};
NUMTHREADS = omp_get_max_threads();
OSCR_init (NUMTHREADS, "Jacobi Solver v1", "Use 'jacoib03' <n> <m> <alpha> <relax> <tol> <mits>", NUM_ARGS,
PARAM_NAMES, DEFAULT_VALUES , NUM_TIMERS, NUM_TIMERS, TIMERS_NAMES,
argc, argv);
n = OSCR_getarg_int(1);
m = OSCR_getarg_int(2);
alpha = OSCR_getarg_double(3);
relax = OSCR_getarg_double(4);
tol = OSCR_getarg_double(5);
mits = OSCR_getarg_int(6);
printf("-> %d, %d, %g, %g, %g, %d\n",
n, m, alpha, relax, tol, mits);
u = (double *) OSCR_malloc(n*m*sizeof(double));
f = (double *) OSCR_malloc(n*m*sizeof(double));
/* arrays are allocated and initialzed */
initialize(n, m, alpha, &dx, &dy, u, f);
/* Solve Helmholtz eqiation */
OSCR_timer_start(0);
jacobi(n, m, dx, dy, alpha, relax, u,f, tol, mits);
OSCR_timer_stop(0);
dt = OSCR_timer_read(0);
printf(" elapsed time : %12.6f\n", dt);
mflops = (0.000001*mits*(m-2)*(n-2)*13) / dt;
printf(" MFlops : %12.6g (%d, %d, %d, %g)\n",mflops, mits, m, n, dt);
error_check(n, m, alpha, dx, dy, u, f);
OSCR_report(1, TIMERS_NAMES);
return 0;
}
/*
subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit)
******************************************************************
* Subroutine HelmholtzJ
* Solves poisson equation on rectangular grid assuming :
* (1) Uniform discretization in each direction, and
* (2) Dirichlect boundary conditions
*
* Jacobi method is used in this routine
*
* Input : n,m Number of grid points in the X/Y directions
* dx,dy Grid spacing in the X/Y directions
* alpha Helmholtz eqn. coefficient
* omega Relaxation factor
* f(n,m) Right hand side function
* u(n,m) Dependent variable/Solution
* tol Tolerance for iterative solver
* maxit Maximum number of iterations
*
* Output : u(n,m) - Solution
*****************************************************************
*/
void jacobi ( const int n, const int m, double dx, double dy, double alpha,
double omega, double *u, double *f, double tol, int maxit )
{
int i,j,k;
double error, resid, ax, ay, b;
double *uold;
/* wegen Array-Kompatibilitaet, werden die Zeilen und Spalten (im Kopf)
getauscht, zB uold[spalten_num][zeilen_num]; bzw. wir tuen so, als ob wir das
gespiegelte Problem loesen wollen */
uold = (double *)OSCR_malloc(sizeof(double) * n *m);
ax = 1.0/(dx * dx); /* X-direction coef */
ay = 1.0/(dy*dy); /* Y_direction coef */
b = -2.0/(dx*dx)-2.0/(dy*dy) - alpha; /* Central coeff */
error = 10.0 * tol;
k = 1;
#pragma omp parallel private(resid, i)
{
while (k <= maxit && error > tol) {
/* copy new solution into old */
#pragma omp for
for (j=0; j<m; j++)
for (i=0; i<n; i++)
uold[i + m*j] = u[i + m*j];
/* compute stencil, residual and update */
#pragma omp for reduction(+:error)
for (i=1; i<n-1; i++){
resid =(
ax * (uold[i-1 + m*j] + uold[i+1 + m*j])
+ ay * (uold[i + m*(j-1)] + uold[i + m*(j+1)])
+ b * uold[i + m*j] - f[i + m*j]
) / b;
/* update solution */
u[i + m*j] = uold[i + m*j] - omega * resid;
/* accumulate residual error */
error =error + resid*resid;
} /* end for */
/* error check */
#pragma omp master
{
k++;
error = sqrt(error) /(n*m);
}
} /* while */
} /* end parallel */
printf("Total Number of Iteratuons %d\n", k);
printf("Residual %.15f\n", error);
free(uold);
}
|
nanort.h | //
// NanoRT, single header only modern ray tracing kernel.
//
/*
The MIT License (MIT)
Copyright (c) 2015 Light Transport Entertainment, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#ifndef __NANORT_H__
#define __NANORT_H__
#include <vector>
#include <queue>
#include <cmath>
#include <limits>
#include <cstdlib>
#include <cstring>
#include <string>
namespace nanort {
// Parallelized BVH build is not yet fully tested,
// thus turn off if you face a problem when building BVH.
#define NANORT_ENABLE_PARALLEL_BUILD (0)
// Small vector class useful for multi-threaded environment.
//
// stack_container.h
//
// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//#include "base/basictypes.h"
// This allocator can be used with STL containers to provide a stack buffer
// from which to allocate memory and overflows onto the heap. This stack buffer
// would be allocated on the stack and allows us to avoid heap operations in
// some situations.
//
// STL likes to make copies of allocators, so the allocator itself can't hold
// the data. Instead, we make the creator responsible for creating a
// StackAllocator::Source which contains the data. Copying the allocator
// merely copies the pointer to this shared source, so all allocators created
// based on our allocator will share the same stack buffer.
//
// This stack buffer implementation is very simple. The first allocation that
// fits in the stack buffer will use the stack buffer. Any subsequent
// allocations will not use the stack buffer, even if there is unused room.
// This makes it appropriate for array-like containers, but the caller should
// be sure to reserve() in the container up to the stack buffer size. Otherwise
// the container will allocate a small array which will "use up" the stack
// buffer.
template <typename T, size_t stack_capacity>
class StackAllocator : public std::allocator<T> {
public:
typedef typename std::allocator<T>::pointer pointer;
typedef typename std::allocator<T>::size_type size_type;
// Backing store for the allocator. The container owner is responsible for
// maintaining this for as long as any containers using this allocator are
// live.
struct Source {
Source() : used_stack_buffer_(false) {}
// Casts the buffer in its right type.
T *stack_buffer() { return reinterpret_cast<T *>(stack_buffer_); }
const T *stack_buffer() const {
return reinterpret_cast<const T *>(stack_buffer_);
}
//
// IMPORTANT: Take care to ensure that stack_buffer_ is aligned
// since it is used to mimic an array of T.
// Be careful while declaring any unaligned types (like bool)
// before stack_buffer_.
//
// The buffer itself. It is not of type T because we don't want the
// constructors and destructors to be automatically called. Define a POD
// buffer of the right size instead.
char stack_buffer_[sizeof(T[stack_capacity])];
// Set when the stack buffer is used for an allocation. We do not track
// how much of the buffer is used, only that somebody is using it.
bool used_stack_buffer_;
};
// Used by containers when they want to refer to an allocator of type U.
template <typename U> struct rebind {
typedef StackAllocator<U, stack_capacity> other;
};
// For the straight up copy c-tor, we can share storage.
StackAllocator(const StackAllocator<T, stack_capacity> &rhs)
: source_(rhs.source_) {}
// ISO C++ requires the following constructor to be defined,
// and std::vector in VC++2008SP1 Release fails with an error
// in the class _Container_base_aux_alloc_real (from <xutility>)
// if the constructor does not exist.
// For this constructor, we cannot share storage; there's
// no guarantee that the Source buffer of Ts is large enough
// for Us.
// TODO: If we were fancy pants, perhaps we could share storage
// iff sizeof(T) == sizeof(U).
template <typename U, size_t other_capacity>
StackAllocator(const StackAllocator<U, other_capacity> &other)
: source_(NULL) {}
explicit StackAllocator(Source *source) : source_(source) {}
// Actually do the allocation. Use the stack buffer if nobody has used it yet
// and the size requested fits. Otherwise, fall through to the standard
// allocator.
pointer allocate(size_type n, void *hint = 0) {
if (source_ != NULL && !source_->used_stack_buffer_ &&
n <= stack_capacity) {
source_->used_stack_buffer_ = true;
return source_->stack_buffer();
} else {
return std::allocator<T>::allocate(n, hint);
}
}
// Free: when trying to free the stack buffer, just mark it as free. For
// non-stack-buffer pointers, just fall though to the standard allocator.
void deallocate(pointer p, size_type n) {
if (source_ != NULL && p == source_->stack_buffer())
source_->used_stack_buffer_ = false;
else
std::allocator<T>::deallocate(p, n);
}
private:
Source *source_;
};
// A wrapper around STL containers that maintains a stack-sized buffer that the
// initial capacity of the vector is based on. Growing the container beyond the
// stack capacity will transparently overflow onto the heap. The container must
// support reserve().
//
// WATCH OUT: the ContainerType MUST use the proper StackAllocator for this
// type. This object is really intended to be used only internally. You'll want
// to use the wrappers below for different types.
template <typename TContainerType, int stack_capacity> class StackContainer {
public:
typedef TContainerType ContainerType;
typedef typename ContainerType::value_type ContainedType;
typedef StackAllocator<ContainedType, stack_capacity> Allocator;
// Allocator must be constructed before the container!
StackContainer() : allocator_(&stack_data_), container_(allocator_) {
// Make the container use the stack allocation by reserving our buffer size
// before doing anything else.
container_.reserve(stack_capacity);
}
// Getters for the actual container.
//
// Danger: any copies of this made using the copy constructor must have
// shorter lifetimes than the source. The copy will share the same allocator
// and therefore the same stack buffer as the original. Use std::copy to
// copy into a "real" container for longer-lived objects.
ContainerType &container() { return container_; }
const ContainerType &container() const { return container_; }
// Support operator-> to get to the container. This allows nicer syntax like:
// StackContainer<...> foo;
// std::sort(foo->begin(), foo->end());
ContainerType *operator->() { return &container_; }
const ContainerType *operator->() const { return &container_; }
#ifdef UNIT_TEST
// Retrieves the stack source so that that unit tests can verify that the
// buffer is being used properly.
const typename Allocator::Source &stack_data() const { return stack_data_; }
#endif
protected:
typename Allocator::Source stack_data_;
Allocator allocator_;
ContainerType container_;
// DISALLOW_EVIL_CONSTRUCTORS(StackContainer);
StackContainer(const StackContainer &);
void operator=(const StackContainer &);
};
// StackString
template <size_t stack_capacity>
class StackString
: public StackContainer<
std::basic_string<char, std::char_traits<char>,
StackAllocator<char, stack_capacity> >,
stack_capacity> {
public:
StackString()
: StackContainer<std::basic_string<char, std::char_traits<char>,
StackAllocator<char, stack_capacity> >,
stack_capacity>() {}
private:
// DISALLOW_EVIL_CONSTRUCTORS(StackString);
StackString(const StackString &);
void operator=(const StackString &);
};
// StackWString
template <size_t stack_capacity>
class StackWString
: public StackContainer<
std::basic_string<wchar_t, std::char_traits<wchar_t>,
StackAllocator<wchar_t, stack_capacity> >,
stack_capacity> {
public:
StackWString()
: StackContainer<
std::basic_string<wchar_t, std::char_traits<wchar_t>,
StackAllocator<wchar_t, stack_capacity> >,
stack_capacity>() {}
private:
// DISALLOW_EVIL_CONSTRUCTORS(StackWString);
StackWString(const StackWString &);
void operator=(const StackWString &);
};
// StackVector
//
// Example:
// StackVector<int, 16> foo;
// foo->push_back(22); // we have overloaded operator->
// foo[0] = 10; // as well as operator[]
template <typename T, size_t stack_capacity>
class StackVector
: public StackContainer<std::vector<T, StackAllocator<T, stack_capacity> >,
stack_capacity> {
public:
StackVector()
: StackContainer<std::vector<T, StackAllocator<T, stack_capacity> >,
stack_capacity>() {}
// We need to put this in STL containers sometimes, which requires a copy
// constructor. We can't call the regular copy constructor because that will
// take the stack buffer from the original. Here, we create an empty object
// and make a stack buffer of its own.
StackVector(const StackVector<T, stack_capacity> &other)
: StackContainer<std::vector<T, StackAllocator<T, stack_capacity> >,
stack_capacity>() {
this->container().assign(other->begin(), other->end());
}
StackVector<T, stack_capacity> &
operator=(const StackVector<T, stack_capacity> &other) {
this->container().assign(other->begin(), other->end());
return *this;
}
// Vectors are commonly indexed, which isn't very convenient even with
// operator-> (using "->at()" does exception stuff we don't want).
T &operator[](size_t i) { return this->container().operator[](i); }
const T &operator[](size_t i) const {
return this->container().operator[](i);
}
};
namespace {
struct float3 {
float3() {}
float3(float xx, float yy, float zz) {
x = xx;
y = yy;
z = zz;
}
float3(const float *p) {
x = p[0];
y = p[1];
z = p[2];
}
float3 operator*(float f) const { return float3(x * f, y * f, z * f); }
float3 operator-(const float3 &f2) const {
return float3(x - f2.x, y - f2.y, z - f2.z);
}
float3 operator*(const float3 &f2) const {
return float3(x * f2.x, y * f2.y, z * f2.z);
}
float3 operator+(const float3 &f2) const {
return float3(x + f2.x, y + f2.y, z + f2.z);
}
float3 &operator+=(const float3 &f2) {
x += f2.x;
y += f2.y;
z += f2.z;
return (*this);
}
float3 operator/(const float3 &f2) const {
return float3(x / f2.x, y / f2.y, z / f2.z);
}
float operator[](int i) const { return (&x)[i]; }
float &operator[](int i) { return (&x)[i]; }
float3 neg() { return float3(-x, -y, -z); }
float length() { return sqrtf(x * x + y * y + z * z); }
void normalize() {
float len = length();
if (fabs(len) > 1.0e-6f) {
float inv_len = 1.0 / len;
x *= inv_len;
y *= inv_len;
z *= inv_len;
}
}
float x, y, z;
// float pad; // for alignment
};
inline float3 operator*(float f, const float3 &v) {
return float3(v.x * f, v.y * f, v.z * f);
}
inline float3 vcross(float3 a, float3 b) {
float3 c;
c[0] = a[1] * b[2] - a[2] * b[1];
c[1] = a[2] * b[0] - a[0] * b[2];
c[2] = a[0] * b[1] - a[1] * b[0];
return c;
}
inline float vdot(float3 a, float3 b) {
return a[0] * b[0] + a[1] * b[1] + a[2] * b[2];
}
} // namespace
typedef struct {
float t;
float u;
float v;
unsigned int faceID;
} Intersection;
typedef struct {
float org[3]; // must set
float dir[3]; // must set
float invDir[3]; // filled internally
int dirSign[3]; // filled internally
} Ray;
class BVHNode {
public:
BVHNode(){};
~BVHNode(){};
float bmin[3];
float bmax[3];
int flag; // 1 = leaf node, 0 = branch node
int axis;
// leaf
// data[0] = npoints
// data[1] = index
//
// branch
// data[0] = child[0]
// data[1] = child[1]
unsigned int data[2];
};
namespace {
class IsectComparator {
public:
bool operator()(const Intersection &a, const Intersection &b) const {
return a.t < b.t;
}
};
// Stores furthest intersection at top
typedef std::priority_queue<Intersection, std::vector<Intersection>,
IsectComparator> IsectVector;
template <typename T> class Matrix {
public:
void Print(T m[4][4]) {
for (int i = 0; i < 4; i++) {
printf("m[%d] = %f, %f, %f, %f\n", i, m[i][0], m[i][1], m[i][2], m[i][3]);
}
}
void Identity(T m[4][4]) {
m[0][0] = 1.0;
m[0][1] = 0.0;
m[0][2] = 0.0;
m[0][3] = 0.0;
m[1][0] = 0.0;
m[1][1] = 1.0;
m[1][2] = 0.0;
m[1][3] = 0.0;
m[2][0] = 0.0;
m[2][1] = 0.0;
m[2][2] = 1.0;
m[2][3] = 0.0;
m[3][0] = 0.0;
m[3][1] = 0.0;
m[3][2] = 0.0;
m[3][3] = 1.0;
}
void Inverse(T m[4][4]) {
/*
* codes from intel web
* cramer's rule version
*/
int i, j;
T tmp[12]; /* tmp array for pairs */
T tsrc[16]; /* array of transpose source matrix */
T det; /* determinant */
/* transpose matrix */
for (i = 0; i < 4; i++) {
tsrc[i] = m[i][0];
tsrc[i + 4] = m[i][1];
tsrc[i + 8] = m[i][2];
tsrc[i + 12] = m[i][3];
}
/* calculate pair for first 8 elements(cofactors) */
tmp[0] = tsrc[10] * tsrc[15];
tmp[1] = tsrc[11] * tsrc[14];
tmp[2] = tsrc[9] * tsrc[15];
tmp[3] = tsrc[11] * tsrc[13];
tmp[4] = tsrc[9] * tsrc[14];
tmp[5] = tsrc[10] * tsrc[13];
tmp[6] = tsrc[8] * tsrc[15];
tmp[7] = tsrc[11] * tsrc[12];
tmp[8] = tsrc[8] * tsrc[14];
tmp[9] = tsrc[10] * tsrc[12];
tmp[10] = tsrc[8] * tsrc[13];
tmp[11] = tsrc[9] * tsrc[12];
/* calculate first 8 elements(cofactors) */
m[0][0] = tmp[0] * tsrc[5] + tmp[3] * tsrc[6] + tmp[4] * tsrc[7];
m[0][0] -= tmp[1] * tsrc[5] + tmp[2] * tsrc[6] + tmp[5] * tsrc[7];
m[0][1] = tmp[1] * tsrc[4] + tmp[6] * tsrc[6] + tmp[9] * tsrc[7];
m[0][1] -= tmp[0] * tsrc[4] + tmp[7] * tsrc[6] + tmp[8] * tsrc[7];
m[0][2] = tmp[2] * tsrc[4] + tmp[7] * tsrc[5] + tmp[10] * tsrc[7];
m[0][2] -= tmp[3] * tsrc[4] + tmp[6] * tsrc[5] + tmp[11] * tsrc[7];
m[0][3] = tmp[5] * tsrc[4] + tmp[8] * tsrc[5] + tmp[11] * tsrc[6];
m[0][3] -= tmp[4] * tsrc[4] + tmp[9] * tsrc[5] + tmp[10] * tsrc[6];
m[1][0] = tmp[1] * tsrc[1] + tmp[2] * tsrc[2] + tmp[5] * tsrc[3];
m[1][0] -= tmp[0] * tsrc[1] + tmp[3] * tsrc[2] + tmp[4] * tsrc[3];
m[1][1] = tmp[0] * tsrc[0] + tmp[7] * tsrc[2] + tmp[8] * tsrc[3];
m[1][1] -= tmp[1] * tsrc[0] + tmp[6] * tsrc[2] + tmp[9] * tsrc[3];
m[1][2] = tmp[3] * tsrc[0] + tmp[6] * tsrc[1] + tmp[11] * tsrc[3];
m[1][2] -= tmp[2] * tsrc[0] + tmp[7] * tsrc[1] + tmp[10] * tsrc[3];
m[1][3] = tmp[4] * tsrc[0] + tmp[9] * tsrc[1] + tmp[10] * tsrc[2];
m[1][3] -= tmp[5] * tsrc[0] + tmp[8] * tsrc[1] + tmp[11] * tsrc[2];
/* calculate pairs for second 8 elements(cofactors) */
tmp[0] = tsrc[2] * tsrc[7];
tmp[1] = tsrc[3] * tsrc[6];
tmp[2] = tsrc[1] * tsrc[7];
tmp[3] = tsrc[3] * tsrc[5];
tmp[4] = tsrc[1] * tsrc[6];
tmp[5] = tsrc[2] * tsrc[5];
tmp[6] = tsrc[0] * tsrc[7];
tmp[7] = tsrc[3] * tsrc[4];
tmp[8] = tsrc[0] * tsrc[6];
tmp[9] = tsrc[2] * tsrc[4];
tmp[10] = tsrc[0] * tsrc[5];
tmp[11] = tsrc[1] * tsrc[4];
/* calculate second 8 elements(cofactors) */
m[2][0] = tmp[0] * tsrc[13] + tmp[3] * tsrc[14] + tmp[4] * tsrc[15];
m[2][0] -= tmp[1] * tsrc[13] + tmp[2] * tsrc[14] + tmp[5] * tsrc[15];
m[2][1] = tmp[1] * tsrc[12] + tmp[6] * tsrc[14] + tmp[9] * tsrc[15];
m[2][1] -= tmp[0] * tsrc[12] + tmp[7] * tsrc[14] + tmp[8] * tsrc[15];
m[2][2] = tmp[2] * tsrc[12] + tmp[7] * tsrc[13] + tmp[10] * tsrc[15];
m[2][2] -= tmp[3] * tsrc[12] + tmp[6] * tsrc[13] + tmp[11] * tsrc[15];
m[2][3] = tmp[5] * tsrc[12] + tmp[8] * tsrc[13] + tmp[11] * tsrc[14];
m[2][3] -= tmp[4] * tsrc[12] + tmp[9] * tsrc[13] + tmp[10] * tsrc[14];
m[3][0] = tmp[2] * tsrc[10] + tmp[5] * tsrc[11] + tmp[1] * tsrc[9];
m[3][0] -= tmp[4] * tsrc[11] + tmp[0] * tsrc[9] + tmp[3] * tsrc[10];
m[3][1] = tmp[8] * tsrc[11] + tmp[0] * tsrc[8] + tmp[7] * tsrc[10];
m[3][1] -= tmp[6] * tsrc[10] + tmp[9] * tsrc[11] + tmp[1] * tsrc[8];
m[3][2] = tmp[6] * tsrc[9] + tmp[11] * tsrc[11] + tmp[3] * tsrc[8];
m[3][2] -= tmp[10] * tsrc[11] + tmp[2] * tsrc[8] + tmp[7] * tsrc[9];
m[3][3] = tmp[10] * tsrc[10] + tmp[4] * tsrc[8] + tmp[9] * tsrc[9];
m[3][3] -= tmp[8] * tsrc[9] + tmp[11] * tsrc[0] + tmp[5] * tsrc[8];
/* calculate determinant */
det = tsrc[0] * m[0][0] + tsrc[1] * m[0][1] + tsrc[2] * m[0][2] +
tsrc[3] * m[0][3];
/* calculate matrix inverse */
det = 1.0 / det;
for (j = 0; j < 4; j++) {
for (i = 0; i < 4; i++) {
m[j][i] *= det;
}
}
}
void Transpose(T m[4][4]) {
T t[4][4];
// Transpose
for (int j = 0; j < 4; j++) {
for (int i = 0; i < 4; i++) {
t[j][i] = m[i][j];
}
}
// Copy
for (int j = 0; j < 4; j++) {
for (int i = 0; i < 4; i++) {
m[j][i] = t[j][i];
}
}
}
void Mult(T dst[4][4], const T m0[4][4], const T m1[4][4]) {
for (int i = 0; i < 4; ++i) {
for (int j = 0; j < 4; ++j) {
dst[i][j] = 0;
for (int k = 0; k < 4; ++k) {
dst[i][j] += m0[k][j] * m1[i][k];
}
}
}
}
void MultV(T dst[3], const T m[4][4], const T v[3]) {
T tmp[3];
tmp[0] = m[0][0] * v[0] + m[1][0] * v[1] + m[2][0] * v[2] + m[3][0];
tmp[1] = m[0][1] * v[0] + m[1][1] * v[1] + m[2][1] * v[2] + m[3][1];
tmp[2] = m[0][2] * v[0] + m[1][2] * v[1] + m[2][2] * v[2] + m[3][2];
dst[0] = tmp[0];
dst[1] = tmp[1];
dst[2] = tmp[2];
}
void MultV(float3 &dst, const T m[4][4], const float3 &v) {
T tmp[3];
tmp[0] = m[0][0] * v[0] + m[1][0] * v[1] + m[2][0] * v[2] + m[3][0];
tmp[1] = m[0][1] * v[0] + m[1][1] * v[1] + m[2][1] * v[2] + m[3][1];
tmp[2] = m[0][2] * v[0] + m[1][2] * v[1] + m[2][2] * v[2] + m[3][2];
dst[0] = tmp[0];
dst[1] = tmp[1];
dst[2] = tmp[2];
}
};
}
///< BVH build option.
struct BVHBuildOptions {
float costTaabb;
int minLeafPrimitives;
int maxTreeDepth;
int binSize;
int shallowDepth;
size_t minPrimitivesForParallelBuild;
// Cache bounding box computation.
// Requires more memory, but BVHbuild can be faster.
bool cacheBBox;
// Set default value: Taabb = 0.2
BVHBuildOptions()
: costTaabb(0.2), minLeafPrimitives(4), maxTreeDepth(256), binSize(64),
shallowDepth(3), minPrimitivesForParallelBuild(1024 * 128),
cacheBBox(false) {}
};
///< BVH build statistics.
class BVHBuildStatistics {
public:
int maxTreeDepth;
int numLeafNodes;
int numBranchNodes;
float epsScale;
double buildSecs;
// Set default value: Taabb = 0.2
BVHBuildStatistics()
: maxTreeDepth(0), numLeafNodes(0), numBranchNodes(0), epsScale(1.0f),
buildSecs(0.0) {}
};
///< BVH trace option.
class BVHTraceOptions {
public:
// Hit only for face IDs in indexRange.
// This feature is good to mimic something like glDrawArrays()
unsigned int faceIdsRange[2];
BVHTraceOptions() {
faceIdsRange[0] = 0;
faceIdsRange[1] = 0x7FFFFFFF; // Up to 2G face IDs.
}
};
class BBox {
public:
float bmin[3];
float bmax[3];
BBox() {
bmin[0] = bmin[1] = bmin[2] = std::numeric_limits<float>::max();
bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits<float>::max();
}
};
class BVHAccel {
public:
BVHAccel() : epsScale_(1.0f){};
~BVHAccel(){};
///< Build BVH for input mesh.
bool Build(const float *vertices, const unsigned int *faces,
const unsigned int numFaces, const BVHBuildOptions &options);
///< Get statistics of built BVH tree. Valid after Build()
BVHBuildStatistics GetStatistics() const { return stats_; }
///< Dump built BVH to the file.
bool Dump(const char *filename);
/// Load BVH binary
bool Load(const char *filename);
///< Traverse into BVH along ray and find closest hit point if found
bool Traverse(Intersection &isect, const float *vertices,
const unsigned int *faces, const Ray &ray, const BVHTraceOptions& options);
///< Multi-hit ray tracversal
///< Returns `maxIntersections` frontmost intersections
bool MultiHitTraverse(StackVector<Intersection, 128> &isects,
int maxIntersections, const float *vertices,
const unsigned int *faces, Ray &ray);
const std::vector<BVHNode> &GetNodes() const { return nodes_; }
const std::vector<unsigned int> &GetIndices() const { return indices_; }
void BoundingBox(float bmin[3], float bmax[3]) const {
if (nodes_.empty()) {
bmin[0] = bmin[1] = bmin[2] = std::numeric_limits<float>::max();
bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits<float>::max();
} else {
bmin[0] = nodes_[0].bmin[0];
bmin[1] = nodes_[0].bmin[1];
bmin[2] = nodes_[0].bmin[2];
bmax[0] = nodes_[0].bmax[0];
bmax[1] = nodes_[0].bmax[1];
bmax[2] = nodes_[0].bmax[2];
}
}
private:
#if NANORT_ENABLE_PARALLEL_BUILD
typedef struct {
unsigned int leftIdx;
unsigned int rightIdx;
unsigned int offset;
} ShallowNodeInfo;
// Used only during BVH construction
std::vector<ShallowNodeInfo> shallowNodeInfos_;
///< Builds shallow BVH tree recursively.
unsigned int BuildShallowTree(std::vector<BVHNode> &outNodes,
const float *vertices,
const unsigned int *faces, unsigned int leftIdx,
unsigned int rightIdx, int depth,
int maxShallowDepth, float epsScale);
#endif
///< Builds BVH tree recursively.
size_t BuildTree(BVHBuildStatistics &outStat, std::vector<BVHNode> &outNodes,
const float *vertices, const unsigned int *faces,
unsigned int leftIdx, unsigned int rightIdx, int depth,
float epsScale);
BVHBuildOptions options_;
std::vector<BVHNode> nodes_;
std::vector<unsigned int> indices_; // max 4G triangles.
BVHBuildStatistics stats_;
float epsScale_;
std::vector<BBox> bboxes_;
};
#if 0
class BVHBox
{
}
class Scene
{
std::vector<BVHBox> nodes_;
};
#endif
} // namespace nanort
#ifdef NANORT_IMPLEMENTATION
#include <limits>
#include <cassert>
#include <algorithm>
#include <functional>
//
// SAH functions
//
namespace nanort {
struct BinBuffer {
BinBuffer(int size) {
binSize = size;
bin.resize(2 * 3 * size);
clear();
}
void clear() { memset(&bin[0], 0, sizeof(size_t) * 2 * 3 * binSize); }
std::vector<size_t> bin; // (min, max) * xyz * binsize
int binSize;
};
inline float CalculateSurfaceArea(const float3 &min, const float3 &max) {
float3 box = max - min;
return 2.0 * (box[0] * box[1] + box[1] * box[2] + box[2] * box[0]);
}
inline void GetBoundingBoxOfTriangle(float3 &bmin, float3 &bmax,
const float *vertices,
const unsigned int *faces,
unsigned int index) {
unsigned int f0 = faces[3 * index + 0];
unsigned int f1 = faces[3 * index + 1];
unsigned int f2 = faces[3 * index + 2];
float3 p[3];
p[0] = float3(&vertices[3 * f0]);
p[1] = float3(&vertices[3 * f1]);
p[2] = float3(&vertices[3 * f2]);
bmin = p[0];
bmax = p[0];
for (int i = 1; i < 3; i++) {
bmin[0] = std::min(bmin[0], p[i][0]);
bmin[1] = std::min(bmin[1], p[i][1]);
bmin[2] = std::min(bmin[2], p[i][2]);
bmax[0] = std::max(bmax[0], p[i][0]);
bmax[1] = std::max(bmax[1], p[i][1]);
bmax[2] = std::max(bmax[2], p[i][2]);
}
}
void ContributeBinBuffer(BinBuffer *bins, // [out]
const float3 &sceneMin, const float3 &sceneMax,
const float *vertices, const unsigned int *faces,
unsigned int *indices, unsigned int leftIdx,
unsigned int rightIdx, float epsScale) {
const float kEPS = std::numeric_limits<float>::epsilon() * epsScale;
float binSize = (float)bins->binSize;
// Calculate extent
float3 sceneSize, sceneInvSize;
sceneSize = sceneMax - sceneMin;
for (int i = 0; i < 3; ++i) {
assert(sceneSize[i] >= 0.0);
if (sceneSize[i] > kEPS) {
sceneInvSize[i] = binSize / sceneSize[i];
} else {
sceneInvSize[i] = 0.0;
}
}
// Clear bin data
std::fill(bins->bin.begin(), bins->bin.end(), 0);
// memset(&bins->bin[0], 0, sizeof(2 * 3 * bins->binSize));
size_t idxBMin[3];
size_t idxBMax[3];
for (size_t i = leftIdx; i < rightIdx; i++) {
//
// Quantize the position into [0, BIN_SIZE)
//
// q[i] = (int)(p[i] - scene_bmin) / scene_size
//
float3 bmin;
float3 bmax;
GetBoundingBoxOfTriangle(bmin, bmax, vertices, faces, indices[i]);
float3 quantizedBMin = (bmin - sceneMin) * sceneInvSize;
float3 quantizedBMax = (bmax - sceneMin) * sceneInvSize;
// idx is now in [0, BIN_SIZE)
for (size_t j = 0; j < 3; ++j) {
int q0 = (int)quantizedBMin[j];
if (q0 < 0)
q0 = 0;
int q1 = (int)quantizedBMax[j];
if (q1 < 0)
q1 = 0;
idxBMin[j] = (unsigned int)q0;
idxBMax[j] = (unsigned int)q1;
if (idxBMin[j] >= binSize)
idxBMin[j] = binSize - 1;
if (idxBMax[j] >= binSize)
idxBMax[j] = binSize - 1;
assert(idxBMin[j] < binSize);
assert(idxBMax[j] < binSize);
// Increment bin counter
bins->bin[0 * (bins->binSize * 3) + j * bins->binSize + idxBMin[j]] += 1;
bins->bin[1 * (bins->binSize * 3) + j * bins->binSize + idxBMax[j]] += 1;
}
}
}
inline float SAH(size_t ns1, float leftArea, size_t ns2, float rightArea,
float invS, float Taabb, float Ttri) {
// const float Taabb = 0.2f;
// const float Ttri = 0.8f;
float T;
T = 2.0f * Taabb + (leftArea * invS) * (float)(ns1)*Ttri +
(rightArea * invS) * (float)(ns2)*Ttri;
return T;
}
bool FindCutFromBinBuffer(float *cutPos, // [out] xyz
int &minCostAxis, // [out]
const BinBuffer *bins, const float3 &bmin,
const float3 &bmax, size_t numTriangles,
float costTaabb, // should be in [0.0, 1.0]
float epsScale) {
const float eps = std::numeric_limits<float>::epsilon() * epsScale;
size_t left, right;
float3 bsize, bstep;
float3 bminLeft, bmaxLeft;
float3 bminRight, bmaxRight;
float saLeft, saRight, saTotal;
float pos;
float minCost[3];
float costTtri = 1.0 - costTaabb;
minCostAxis = 0;
bsize = bmax - bmin;
bstep = bsize * (1.0 / bins->binSize);
saTotal = CalculateSurfaceArea(bmin, bmax);
float invSaTotal = 0.0;
if (saTotal > eps) {
invSaTotal = 1.0 / saTotal;
}
for (int j = 0; j < 3; ++j) {
//
// Compute SAH cost for right side of each cell of the bbox.
// Exclude both extreme side of the bbox.
//
// i: 0 1 2 3
// +----+----+----+----+----+
// | | | | | |
// +----+----+----+----+----+
//
float minCostPos = bmin[j] + 0.5 * bstep[j];
minCost[j] = std::numeric_limits<float>::max();
left = 0;
right = numTriangles;
bminLeft = bminRight = bmin;
bmaxLeft = bmaxRight = bmax;
for (int i = 0; i < bins->binSize - 1; ++i) {
left += bins->bin[0 * (3 * bins->binSize) + j * bins->binSize + i];
right -= bins->bin[1 * (3 * bins->binSize) + j * bins->binSize + i];
assert(left <= numTriangles);
assert(right <= numTriangles);
//
// Split pos bmin + (i + 1) * (bsize / BIN_SIZE)
// +1 for i since we want a position on right side of the cell.
//
pos = bmin[j] + (i + 0.5) * bstep[j];
bmaxLeft[j] = pos;
bminRight[j] = pos;
saLeft = CalculateSurfaceArea(bminLeft, bmaxLeft);
saRight = CalculateSurfaceArea(bminRight, bmaxRight);
float cost =
SAH(left, saLeft, right, saRight, invSaTotal, costTaabb, costTtri);
if (cost < minCost[j]) {
//
// Update the min cost
//
minCost[j] = cost;
minCostPos = pos;
// minCostAxis = j;
}
}
cutPos[j] = minCostPos;
}
// cutAxis = minCostAxis;
// cutPos = minCostPos;
// Find min cost axis
float cost = minCost[0];
minCostAxis = 0;
if (cost > minCost[1]) {
minCostAxis = 1;
cost = minCost[1];
}
if (cost > minCost[2]) {
minCostAxis = 2;
cost = minCost[2];
}
return true;
}
class SAHPred : public std::unary_function<unsigned int, bool> {
public:
SAHPred(int axis, float pos, const float *vertices, const unsigned int *faces)
: axis_(axis), pos_(pos), vertices_(vertices), faces_(faces) {}
bool operator()(unsigned int i) const {
int axis = axis_;
float pos = pos_;
unsigned int i0 = faces_[3 * i + 0];
unsigned int i1 = faces_[3 * i + 1];
unsigned int i2 = faces_[3 * i + 2];
float3 p0(&vertices_[3 * i0]);
float3 p1(&vertices_[3 * i1]);
float3 p2(&vertices_[3 * i2]);
float center = p0[axis] + p1[axis] + p2[axis];
return (center < pos * 3.0f);
}
private:
int axis_;
float pos_;
const float *vertices_;
const unsigned int *faces_;
};
#ifdef _OPENMP
void ComputeBoundingBoxOMP(float3 &bmin, float3 &bmax, const float *vertices,
const unsigned int *faces, unsigned int *indices,
unsigned int leftIndex, unsigned int rightIndex,
float epsScale) {
const float kEPS = std::numeric_limits<float>::epsilon() * epsScale;
long long i = leftIndex;
long long idx = indices[i];
long long n = rightIndex - leftIndex;
bmin[0] = vertices[3 * faces[3 * idx + 0] + 0] - kEPS;
bmin[1] = vertices[3 * faces[3 * idx + 0] + 1] - kEPS;
bmin[2] = vertices[3 * faces[3 * idx + 0] + 2] - kEPS;
bmax[0] = vertices[3 * faces[3 * idx + 0] + 0] + kEPS;
bmax[1] = vertices[3 * faces[3 * idx + 0] + 1] + kEPS;
bmax[2] = vertices[3 * faces[3 * idx + 0] + 2] + kEPS;
float local_bmin[3] = {bmin[0], bmin[1], bmin[2]};
float local_bmax[3] = {bmax[0], bmax[1], bmax[2]};
#pragma omp parallel firstprivate(local_bmin, local_bmax) if (n > (1024 * 128))
{
#pragma omp for
for (i = leftIndex; i < rightIndex; i++) { // for each faces
size_t idx = indices[i];
for (int j = 0; j < 3; j++) { // for each face vertex
size_t fid = faces[3 * idx + j];
for (int k = 0; k < 3; k++) { // xyz
float minval = vertices[3 * fid + k] - kEPS;
float maxval = vertices[3 * fid + k] + kEPS;
if (local_bmin[k] > minval)
local_bmin[k] = minval;
if (local_bmax[k] < maxval)
local_bmax[k] = maxval;
}
}
}
#pragma omp critical
{
for (int k = 0; k < 3; k++) {
if (local_bmin[k] < bmin[k]) {
{
if (local_bmin[k] < bmin[k])
bmin[k] = local_bmin[k];
}
}
if (local_bmax[k] > bmax[k]) {
{
if (local_bmax[k] > bmax[k])
bmax[k] = local_bmax[k];
}
}
}
}
}
}
#endif
void ComputeBoundingBox(float3 &bmin, float3 &bmax, const float *vertices,
const unsigned int *faces, unsigned int *indices,
unsigned int leftIndex, unsigned int rightIndex,
float epsScale) {
const float kEPS = std::numeric_limits<float>::epsilon() * epsScale;
long long i = leftIndex;
long long idx = indices[i];
bmin[0] = vertices[3 * faces[3 * idx + 0] + 0] - kEPS;
bmin[1] = vertices[3 * faces[3 * idx + 0] + 1] - kEPS;
bmin[2] = vertices[3 * faces[3 * idx + 0] + 2] - kEPS;
bmax[0] = vertices[3 * faces[3 * idx + 0] + 0] + kEPS;
bmax[1] = vertices[3 * faces[3 * idx + 0] + 1] + kEPS;
bmax[2] = vertices[3 * faces[3 * idx + 0] + 2] + kEPS;
float local_bmin[3] = {bmin[0], bmin[1], bmin[2]};
float local_bmax[3] = {bmax[0], bmax[1], bmax[2]};
{
for (i = leftIndex; i < rightIndex; i++) { // for each faces
size_t idx = indices[i];
for (int j = 0; j < 3; j++) { // for each face vertex
size_t fid = faces[3 * idx + j];
for (int k = 0; k < 3; k++) { // xyz
float minval = vertices[3 * fid + k] - kEPS;
float maxval = vertices[3 * fid + k] + kEPS;
if (local_bmin[k] > minval)
local_bmin[k] = minval;
if (local_bmax[k] < maxval)
local_bmax[k] = maxval;
}
}
}
for (int k = 0; k < 3; k++) {
bmin[k] = local_bmin[k];
bmax[k] = local_bmax[k];
}
}
}
void GetBoundingBox(float3 &bmin, float3 &bmax, std::vector<BBox> &bboxes,
unsigned int *indices, unsigned int leftIndex,
unsigned int rightIndex, float epsScale) {
const float kEPS = std::numeric_limits<float>::epsilon() * epsScale;
long long i = leftIndex;
long long idx = indices[i];
bmin[0] = bboxes[idx].bmin[0] - kEPS;
bmin[1] = bboxes[idx].bmin[1] - kEPS;
bmin[2] = bboxes[idx].bmin[2] - kEPS;
bmax[0] = bboxes[idx].bmax[0] + kEPS;
bmax[1] = bboxes[idx].bmax[1] + kEPS;
bmax[2] = bboxes[idx].bmax[2] + kEPS;
float local_bmin[3] = {bmin[0], bmin[1], bmin[2]};
float local_bmax[3] = {bmax[0], bmax[1], bmax[2]};
{
for (i = leftIndex; i < rightIndex; i++) { // for each faces
size_t idx = indices[i];
for (int k = 0; k < 3; k++) { // xyz
float minval = bboxes[idx].bmin[k] - kEPS;
float maxval = bboxes[idx].bmax[k] + kEPS;
if (local_bmin[k] > minval)
local_bmin[k] = minval;
if (local_bmax[k] < maxval)
local_bmax[k] = maxval;
}
}
for (int k = 0; k < 3; k++) {
bmin[k] = local_bmin[k];
bmax[k] = local_bmax[k];
}
}
}
//
// --
//
#if NANORT_ENABLE_PARALLEL_BUILD
unsigned int BVHAccel::BuildShallowTree(std::vector<BVHNode> &outNodes,
const float *vertices,
const unsigned int *faces,
unsigned int leftIdx,
unsigned int rightIdx, int depth,
int maxShallowDepth, float epsScale) {
assert(leftIdx <= rightIdx);
unsigned int offset = outNodes.size();
if (stats_.maxTreeDepth < depth) {
stats_.maxTreeDepth = depth;
}
float3 bmin, bmax;
ComputeBoundingBox(bmin, bmax, vertices, faces, &indices_.at(0), leftIdx,
rightIdx, epsScale);
long long n = rightIdx - leftIdx;
if ((n < options_.minLeafPrimitives) || (depth >= options_.maxTreeDepth)) {
// Create leaf node.
BVHNode leaf;
leaf.bmin[0] = bmin[0];
leaf.bmin[1] = bmin[1];
leaf.bmin[2] = bmin[2];
leaf.bmax[0] = bmax[0];
leaf.bmax[1] = bmax[1];
leaf.bmax[2] = bmax[2];
assert(leftIdx < std::numeric_limits<unsigned int>::max());
leaf.flag = 1; // leaf
leaf.data[0] = n;
leaf.data[1] = (unsigned int)leftIdx;
outNodes.push_back(leaf); // atomic update
stats_.numLeafNodes++;
return offset;
}
//
// Create branch node.
//
if (depth >= maxShallowDepth) {
// Delay to build tree
ShallowNodeInfo info;
info.leftIdx = leftIdx;
info.rightIdx = rightIdx;
info.offset = offset;
shallowNodeInfos_.push_back(info);
// Add dummy node.
BVHNode node;
node.axis = -1;
node.flag = -1;
outNodes.push_back(node);
return offset;
} else {
//
// Compute SAH and find best split axis and position
//
int minCutAxis = 0;
float cutPos[3] = {0.0, 0.0, 0.0};
BinBuffer bins(options_.binSize);
ContributeBinBuffer(&bins, bmin, bmax, vertices, faces, &indices_.at(0),
leftIdx, rightIdx, epsScale);
FindCutFromBinBuffer(cutPos, minCutAxis, &bins, bmin, bmax, n,
options_.costTaabb, epsScale);
// Try all 3 axis until good cut position avaiable.
unsigned int midIdx;
int cutAxis = minCutAxis;
for (int axisTry = 0; axisTry < 1; axisTry++) {
unsigned int *begin = &indices_[leftIdx];
unsigned int *end = &indices_[rightIdx - 1] + 1; // mimics end() iterator.
unsigned int *mid = 0;
// try minCutAxis first.
cutAxis = (minCutAxis + axisTry) % 3;
//
// Split at (cutAxis, cutPos)
// indices_ will be modified.
//
mid = std::partition(begin, end,
SAHPred(cutAxis, cutPos[cutAxis], vertices, faces));
midIdx = leftIdx + (mid - begin);
if ((midIdx == leftIdx) || (midIdx == rightIdx)) {
// Can't split well.
// Switch to object median(which may create unoptimized tree, but
// stable)
midIdx = leftIdx + (n >> 1);
// Try another axis if there's axis to try.
} else {
// Found good cut. exit loop.
break;
}
}
BVHNode node;
node.axis = cutAxis;
node.flag = 0; // 0 = branch
outNodes.push_back(node);
unsigned int leftChildIndex = 0;
unsigned int rightChildIndex = 0;
leftChildIndex =
BuildShallowTree(outNodes, vertices, faces, leftIdx, midIdx, depth + 1,
maxShallowDepth, epsScale);
rightChildIndex =
BuildShallowTree(outNodes, vertices, faces, midIdx, rightIdx, depth + 1,
maxShallowDepth, epsScale);
if ((leftChildIndex != (unsigned int)(-1)) &&
(rightChildIndex != (unsigned int)(-1))) {
outNodes[offset].data[0] = leftChildIndex;
outNodes[offset].data[1] = rightChildIndex;
outNodes[offset].bmin[0] = bmin[0];
outNodes[offset].bmin[1] = bmin[1];
outNodes[offset].bmin[2] = bmin[2];
outNodes[offset].bmax[0] = bmax[0];
outNodes[offset].bmax[1] = bmax[1];
outNodes[offset].bmax[2] = bmax[2];
} else {
if ((leftChildIndex == (unsigned int)(-1)) &&
(rightChildIndex != (unsigned int)(-1))) {
fprintf(stderr, "??? : %u, %u\n", leftChildIndex, rightChildIndex);
exit(-1);
} else if ((leftChildIndex != (unsigned int)(-1)) &&
(rightChildIndex == (unsigned int)(-1))) {
fprintf(stderr, "??? : %u, %u\n", leftChildIndex, rightChildIndex);
exit(-1);
}
}
}
stats_.numBranchNodes++;
return offset;
}
#endif
size_t BVHAccel::BuildTree(BVHBuildStatistics &outStat,
std::vector<BVHNode> &outNodes,
const float *vertices, const unsigned int *faces,
unsigned int leftIdx, unsigned int rightIdx,
int depth, float epsScale) {
assert(leftIdx <= rightIdx);
size_t offset = outNodes.size();
if (outStat.maxTreeDepth < depth) {
outStat.maxTreeDepth = depth;
}
float3 bmin, bmax;
if (!bboxes_.empty()) {
GetBoundingBox(bmin, bmax, bboxes_, &indices_.at(0), leftIdx, rightIdx,
epsScale);
} else {
ComputeBoundingBox(bmin, bmax, vertices, faces, &indices_.at(0), leftIdx,
rightIdx, epsScale);
}
long long n = rightIdx - leftIdx;
if ((n < options_.minLeafPrimitives) || (depth >= options_.maxTreeDepth)) {
// Create leaf node.
BVHNode leaf;
leaf.bmin[0] = bmin[0];
leaf.bmin[1] = bmin[1];
leaf.bmin[2] = bmin[2];
leaf.bmax[0] = bmax[0];
leaf.bmax[1] = bmax[1];
leaf.bmax[2] = bmax[2];
assert(leftIdx < std::numeric_limits<unsigned int>::max());
leaf.flag = 1; // leaf
leaf.data[0] = n;
leaf.data[1] = (unsigned int)leftIdx;
outNodes.push_back(leaf); // atomic update
outStat.numLeafNodes++;
return offset;
}
//
// Create branch node.
//
//
// Compute SAH and find best split axis and position
//
int minCutAxis = 0;
float cutPos[3] = {0.0, 0.0, 0.0};
BinBuffer bins(options_.binSize);
ContributeBinBuffer(&bins, bmin, bmax, vertices, faces, &indices_.at(0),
leftIdx, rightIdx, epsScale);
FindCutFromBinBuffer(cutPos, minCutAxis, &bins, bmin, bmax, n,
options_.costTaabb, epsScale);
// Try all 3 axis until good cut position avaiable.
unsigned int midIdx;
int cutAxis = minCutAxis;
for (int axisTry = 0; axisTry < 1; axisTry++) {
unsigned int *begin = &indices_[leftIdx];
unsigned int *end = &indices_[rightIdx - 1] + 1; // mimics end() iterator.
unsigned int *mid = 0;
// try minCutAxis first.
cutAxis = (minCutAxis + axisTry) % 3;
//
// Split at (cutAxis, cutPos)
// indices_ will be modified.
//
mid = std::partition(begin, end,
SAHPred(cutAxis, cutPos[cutAxis], vertices, faces));
midIdx = leftIdx + (mid - begin);
if ((midIdx == leftIdx) || (midIdx == rightIdx)) {
// Can't split well.
// Switch to object median(which may create unoptimized tree, but
// stable)
midIdx = leftIdx + (n >> 1);
// Try another axis if there's axis to try.
} else {
// Found good cut. exit loop.
break;
}
}
BVHNode node;
node.axis = cutAxis;
node.flag = 0; // 0 = branch
outNodes.push_back(node); // atomic update
unsigned int leftChildIndex = 0;
unsigned int rightChildIndex = 0;
leftChildIndex = BuildTree(outStat, outNodes, vertices, faces, leftIdx,
midIdx, depth + 1, epsScale);
rightChildIndex = BuildTree(outStat, outNodes, vertices, faces, midIdx,
rightIdx, depth + 1, epsScale);
{
outNodes[offset].data[0] = leftChildIndex;
outNodes[offset].data[1] = rightChildIndex;
outNodes[offset].bmin[0] = bmin[0];
outNodes[offset].bmin[1] = bmin[1];
outNodes[offset].bmin[2] = bmin[2];
outNodes[offset].bmax[0] = bmax[0];
outNodes[offset].bmax[1] = bmax[1];
outNodes[offset].bmax[2] = bmax[2];
}
outStat.numBranchNodes++;
return offset;
}
bool BVHAccel::Build(const float *vertices, const unsigned int *faces,
unsigned int numFaces, const BVHBuildOptions &options) {
options_ = options;
stats_ = BVHBuildStatistics();
assert(options_.binSize > 1);
size_t n = numFaces;
//
// 1. Create triangle indices(this will be permutated in BuildTree)
//
indices_.resize(n);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (long long i = 0; i < (long long)n; i++) {
indices_[i] = i;
}
//
// 2. Compute bounding box to find scene scale.
//
float epsScale = 1.0f;
float3 bmin, bmax;
if (options.cacheBBox) {
bmin[0] = bmin[1] = bmin[2] = std::numeric_limits<float>::max();
bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits<float>::max();
bboxes_.resize(n);
for (size_t i = 0; i < n; i++) { // for each faces
size_t idx = indices_[i];
BBox bbox;
for (int j = 0; j < 3; j++) { // for each face vertex
size_t fid = faces[3 * idx + j];
for (int k = 0; k < 3; k++) { // xyz
float minval = vertices[3 * fid + k];
float maxval = vertices[3 * fid + k];
if (bbox.bmin[k] > minval) {
bbox.bmin[k] = minval;
}
if (bbox.bmax[k] < maxval) {
bbox.bmax[k] = maxval;
}
}
}
bboxes_[idx] = bbox;
for (int k = 0; k < 3; k++) { // xyz
if (bmin[k] > bbox.bmin[k]) {
bmin[k] = bbox.bmin[k];
}
if (bmax[k] < bbox.bmax[k]) {
bmax[k] = bbox.bmax[k];
}
}
}
} else {
#ifdef _OPENMP
ComputeBoundingBoxOMP(bmin, bmax, vertices, faces, &indices_.at(0), 0, n,
epsScale);
#else
ComputeBoundingBox(bmin, bmax, vertices, faces, &indices_.at(0), 0, n,
epsScale);
#endif
}
// Find max
float3 bsize = bmax - bmin;
epsScale = std::abs(bsize[0]);
if (epsScale < std::abs(bsize[1])) {
epsScale = std::abs(bsize[1]);
}
if (epsScale < std::abs(bsize[2])) {
epsScale = std::abs(bsize[2]);
}
//
// 3. Build tree
//
#ifdef _OPENMP
#if NANORT_ENABLE_PARALLEL_BUILD
// Do parallel build for enoughly large dataset.
if (n > options.minPrimitivesForParallelBuild) {
BuildShallowTree(nodes_, vertices, faces, 0, n, /* root depth */ 0,
options.shallowDepth, epsScale); // [0, n)
assert(shallowNodeInfos_.size() > 0);
// Build deeper tree in parallel
std::vector<std::vector<BVHNode> > local_nodes(shallowNodeInfos_.size());
std::vector<BVHBuildStatistics> local_stats(shallowNodeInfos_.size());
#pragma omp parallel for
for (int i = 0; i < (int)shallowNodeInfos_.size(); i++) {
unsigned int leftIdx = shallowNodeInfos_[i].leftIdx;
unsigned int rightIdx = shallowNodeInfos_[i].rightIdx;
BuildTree(local_stats[i], local_nodes[i], vertices, faces, leftIdx,
rightIdx, options.shallowDepth, epsScale);
}
// Join local nodes
for (int i = 0; i < (int)local_nodes.size(); i++) {
assert(!local_nodes[i].empty());
size_t offset = nodes_.size();
// Add offset to child index(for branch node).
for (size_t j = 0; j < local_nodes[i].size(); j++) {
if (local_nodes[i][j].flag == 0) { // branch
local_nodes[i][j].data[0] += offset - 1;
local_nodes[i][j].data[1] += offset - 1;
}
}
// replace
nodes_[shallowNodeInfos_[i].offset] = local_nodes[i][0];
// Skip root element of the local node.
nodes_.insert(nodes_.end(), local_nodes[i].begin() + 1,
local_nodes[i].end());
}
// Join statistics
for (int i = 0; i < (int)local_nodes.size(); i++) {
stats_.maxTreeDepth =
std::max(stats_.maxTreeDepth, local_stats[i].maxTreeDepth);
stats_.numLeafNodes += local_stats[i].numLeafNodes;
stats_.numBranchNodes += local_stats[i].numBranchNodes;
}
} else {
BuildTree(stats_, nodes_, vertices, faces, 0, n, /* root depth */ 0,
epsScale); // [0, n)
}
#else // !NANORT_ENABLE_PARALLEL_BUILD
{
BuildTree(stats_, nodes_, vertices, faces, 0, n, /* root depth */ 0,
epsScale); // [0, n)
}
#endif
#else // !_OPENMP
{
BuildTree(stats_, nodes_, vertices, faces, 0, n, /* root depth */ 0,
epsScale); // [0, n)
}
#endif
stats_.epsScale = epsScale;
epsScale_ = epsScale;
return true;
}
bool BVHAccel::Dump(const char *filename) {
FILE *fp = fopen(filename, "wb");
if (!fp) {
fprintf(stderr, "[BVHAccel] Cannot write a file: %s\n", filename);
return false;
}
unsigned long long numNodes = nodes_.size();
assert(nodes_.size() > 0);
unsigned long long numIndices = indices_.size();
size_t r = 0;
r = fwrite(&numNodes, sizeof(unsigned long long), 1, fp);
assert(r == 1);
r = fwrite(&nodes_.at(0), sizeof(BVHNode), numNodes, fp);
assert(r == numNodes);
r = fwrite(&numIndices, sizeof(unsigned long long), 1, fp);
assert(r == 1);
r = fwrite(&indices_.at(0), sizeof(unsigned int), numIndices, fp);
assert(r == numIndices);
fclose(fp);
return true;
}
bool BVHAccel::Load(const char *filename) {
FILE *fp = fopen(filename, "rb");
if (!fp) {
fprintf(stderr, "Cannot open file: %s\n", filename);
return false;
}
unsigned long long numNodes;
unsigned long long numIndices;
size_t r = 0;
r = fread(&numNodes, sizeof(unsigned long long), 1, fp);
assert(r == 1);
assert(numNodes > 0);
nodes_.resize(numNodes);
r = fread(&nodes_.at(0), sizeof(BVHNode), numNodes, fp);
assert(r == numNodes);
r = fread(&numIndices, sizeof(unsigned long long), 1, fp);
assert(r == 1);
indices_.resize(numIndices);
r = fread(&indices_.at(0), sizeof(unsigned int), numIndices, fp);
assert(r == numIndices);
fclose(fp);
return true;
}
namespace {
const int kMaxStackDepth = 512;
inline bool IntersectRayAABB(float &tminOut, // [out]
float &tmaxOut, // [out]
float maxT, float bmin[3], float bmax[3],
float3 rayOrg, float3 rayInvDir,
int rayDirSign[3]) {
float tmin, tmax;
const float min_x = rayDirSign[0] ? bmax[0] : bmin[0];
const float min_y = rayDirSign[1] ? bmax[1] : bmin[1];
const float min_z = rayDirSign[2] ? bmax[2] : bmin[2];
const float max_x = rayDirSign[0] ? bmin[0] : bmax[0];
const float max_y = rayDirSign[1] ? bmin[1] : bmax[1];
const float max_z = rayDirSign[2] ? bmin[2] : bmax[2];
// X
const float tmin_x = (min_x - rayOrg[0]) * rayInvDir[0];
const float tmax_x = (max_x - rayOrg[0]) * rayInvDir[0];
// Y
const float tmin_y = (min_y - rayOrg[1]) * rayInvDir[1];
const float tmax_y = (max_y - rayOrg[1]) * rayInvDir[1];
tmin = (tmin_x > tmin_y) ? tmin_x : tmin_y;
tmax = (tmax_x < tmax_y) ? tmax_x : tmax_y;
// Z
const float tmin_z = (min_z - rayOrg[2]) * rayInvDir[2];
const float tmax_z = (max_z - rayOrg[2]) * rayInvDir[2];
tmin = (tmin > tmin_z) ? tmin : tmin_z;
tmax = (tmax < tmax_z) ? tmax : tmax_z;
//
// Hit include (tmin == tmax) edge case(hit 2D plane).
//
if ((tmax > 0.0) && (tmin <= tmax) && (tmin <= maxT)) {
tminOut = tmin;
tmaxOut = tmax;
return true;
}
return false; // no hit
}
inline bool TriangleIsect(float &tInOut, float &uOut, float &vOut,
const float3 &v0, const float3 &v1, const float3 &v2,
const float3 &rayOrg, const float3 &rayDir,
float epsScale) {
const float kEPS = std::numeric_limits<float>::epsilon() * epsScale;
float3 p0(v0[0], v0[1], v0[2]);
float3 p1(v1[0], v1[1], v1[2]);
float3 p2(v2[0], v2[1], v2[2]);
float3 e1, e2;
float3 p, s, q;
e1 = p1 - p0;
e2 = p2 - p0;
p = vcross(rayDir, e2);
float invDet;
float det = vdot(e1, p);
if (std::abs(det) < kEPS) { // no-cull
return false;
}
invDet = 1.0 / det;
s = rayOrg - p0;
q = vcross(s, e1);
float u = vdot(s, p) * invDet;
float v = vdot(q, rayDir) * invDet;
float t = vdot(e2, q) * invDet;
if (u < 0.0 || u > 1.0)
return false;
if (v < 0.0 || u + v > 1.0)
return false;
if (t < 0.0 || t > tInOut)
return false;
tInOut = t;
uOut = u;
vOut = v;
return true;
}
inline bool TestLeafNode(Intersection &isect, // [inout]
const BVHNode &node,
const std::vector<unsigned int> &indices,
const float *vertices, const unsigned int *faces,
const Ray &ray, float epsScale, const BVHTraceOptions& traceOptions) {
bool hit = false;
unsigned int numTriangles = node.data[0];
unsigned int offset = node.data[1];
float t = isect.t; // current hit distance
float3 rayOrg;
rayOrg[0] = ray.org[0];
rayOrg[1] = ray.org[1];
rayOrg[2] = ray.org[2];
float3 rayDir;
rayDir[0] = ray.dir[0];
rayDir[1] = ray.dir[1];
rayDir[2] = ray.dir[2];
for (unsigned int i = 0; i < numTriangles; i++) {
int faceIdx = indices[i + offset];
if ((faceIdx < traceOptions.faceIdsRange[0]) || (faceIdx >= traceOptions.faceIdsRange[1])) {
continue;
}
int f0 = faces[3 * faceIdx + 0];
int f1 = faces[3 * faceIdx + 1];
int f2 = faces[3 * faceIdx + 2];
float3 v0, v1, v2;
v0[0] = vertices[3 * f0 + 0];
v0[1] = vertices[3 * f0 + 1];
v0[2] = vertices[3 * f0 + 2];
v1[0] = vertices[3 * f1 + 0];
v1[1] = vertices[3 * f1 + 1];
v1[2] = vertices[3 * f1 + 2];
v2[0] = vertices[3 * f2 + 0];
v2[1] = vertices[3 * f2 + 1];
v2[2] = vertices[3 * f2 + 2];
float u, v;
if (TriangleIsect(t, u, v, v0, v1, v2, rayOrg, rayDir, epsScale)) {
// Update isect state
isect.t = t;
isect.u = u;
isect.v = v;
isect.faceID = faceIdx;
hit = true;
}
}
return hit;
}
inline bool MultiHitTestLeafNode(IsectVector &isects, // [inout]
int maxIntersections, const BVHNode &node,
const std::vector<unsigned int> &indices,
const float *vertices,
const unsigned int *faces, const Ray &ray,
float epsScale) {
bool hit = false;
unsigned int numTriangles = node.data[0];
unsigned int offset = node.data[1];
float t = std::numeric_limits<float>::max();
if (isects.size() >= (size_t)maxIntersections) {
t = isects.top().t; // current furthest hit distance
}
float3 rayOrg;
rayOrg[0] = ray.org[0];
rayOrg[1] = ray.org[1];
rayOrg[2] = ray.org[2];
float3 rayDir;
rayDir[0] = ray.dir[0];
rayDir[1] = ray.dir[1];
rayDir[2] = ray.dir[2];
for (unsigned int i = 0; i < numTriangles; i++) {
int faceIdx = indices[i + offset];
int f0 = faces[3 * faceIdx + 0];
int f1 = faces[3 * faceIdx + 1];
int f2 = faces[3 * faceIdx + 2];
float3 v0, v1, v2;
v0[0] = vertices[3 * f0 + 0];
v0[1] = vertices[3 * f0 + 1];
v0[2] = vertices[3 * f0 + 2];
v1[0] = vertices[3 * f1 + 0];
v1[1] = vertices[3 * f1 + 1];
v1[2] = vertices[3 * f1 + 2];
v2[0] = vertices[3 * f2 + 0];
v2[1] = vertices[3 * f2 + 1];
v2[2] = vertices[3 * f2 + 2];
float u, v;
if (TriangleIsect(t, u, v, v0, v1, v2, rayOrg, rayDir, epsScale)) {
// Update isect state
if (isects.size() < (size_t)maxIntersections) {
Intersection isect;
isect.t = t;
isect.u = u;
isect.v = v;
isect.faceID = faceIdx;
isects.push(isect);
// Update furthest distance to far.
t = std::numeric_limits<float>::max();
hit = true;
} else {
if (t < isects.top().t) {
// delete furthest intersection and add new intersection.
isects.pop();
Intersection isect;
isect.t = t;
isect.u = u;
isect.v = v;
isect.faceID = faceIdx;
isects.push(isect);
// Update furthest hit distance
t = isects.top().t;
hit = true;
}
}
}
}
return hit;
}
} // namespace
bool BVHAccel::Traverse(Intersection &isect, const float *vertices,
const unsigned int *faces, const Ray &ray, const BVHTraceOptions& options) {
float hitT = std::numeric_limits<float>::max(); // far = no hit.
int nodeStackIndex = 0;
int nodeStack[512];
nodeStack[0] = 0;
// Init isect info as no hit
isect.t = hitT;
isect.u = 0.0;
isect.v = 0.0;
isect.faceID = -1;
int dirSign[3];
dirSign[0] = ray.dir[0] < 0.0 ? 1 : 0;
dirSign[1] = ray.dir[1] < 0.0 ? 1 : 0;
dirSign[2] = ray.dir[2] < 0.0 ? 1 : 0;
// @fixme { Check edge case; i.e., 1/0 }
float3 rayInvDir;
rayInvDir[0] = 1.0 / ray.dir[0];
rayInvDir[1] = 1.0 / ray.dir[1];
rayInvDir[2] = 1.0 / ray.dir[2];
float3 rayOrg;
rayOrg[0] = ray.org[0];
rayOrg[1] = ray.org[1];
rayOrg[2] = ray.org[2];
float minT, maxT;
while (nodeStackIndex >= 0) {
int index = nodeStack[nodeStackIndex];
BVHNode &node = nodes_[index];
nodeStackIndex--;
bool hit = IntersectRayAABB(minT, maxT, hitT, node.bmin, node.bmax, rayOrg,
rayInvDir, dirSign);
if (node.flag == 0) { // branch node
if (hit) {
int orderNear = dirSign[node.axis];
int orderFar = 1 - orderNear;
// Traverse near first.
nodeStack[++nodeStackIndex] = node.data[orderFar];
nodeStack[++nodeStackIndex] = node.data[orderNear];
}
} else { // leaf node
if (hit) {
if (TestLeafNode(isect, node, indices_, vertices, faces, ray,
epsScale_, options)) {
hitT = isect.t;
}
}
}
}
assert(nodeStackIndex < kMaxStackDepth);
if (isect.t < std::numeric_limits<float>::max()) {
return true;
}
return false;
}
bool BVHAccel::MultiHitTraverse(StackVector<Intersection, 128> &isects,
int maxIntersections, const float *vertices,
const unsigned int *faces, Ray &ray) {
float hitT = std::numeric_limits<float>::max(); // far = no hit.
int nodeStackIndex = 0;
int nodeStack[512];
nodeStack[0] = 0;
IsectVector isectPQ;
isects->clear();
int dirSign[3];
dirSign[0] = ray.dir[0] < 0.0 ? 1 : 0;
dirSign[1] = ray.dir[1] < 0.0 ? 1 : 0;
dirSign[2] = ray.dir[2] < 0.0 ? 1 : 0;
// @fixme { Check edge case; i.e., 1/0 }
float3 rayInvDir;
rayInvDir[0] = 1.0 / ray.dir[0];
rayInvDir[1] = 1.0 / ray.dir[1];
rayInvDir[2] = 1.0 / ray.dir[2];
float3 rayOrg;
rayOrg[0] = ray.org[0];
rayOrg[1] = ray.org[1];
rayOrg[2] = ray.org[2];
float minT, maxT;
while (nodeStackIndex >= 0) {
int index = nodeStack[nodeStackIndex];
BVHNode &node = nodes_[index];
nodeStackIndex--;
bool hit = IntersectRayAABB(minT, maxT, hitT, node.bmin, node.bmax, rayOrg,
rayInvDir, dirSign);
if (node.flag == 0) { // branch node
if (hit) {
int orderNear = dirSign[node.axis];
int orderFar = 1 - orderNear;
// Traverse near first.
nodeStack[++nodeStackIndex] = node.data[orderFar];
nodeStack[++nodeStackIndex] = node.data[orderNear];
}
} else { // leaf node
if (hit) {
if (MultiHitTestLeafNode(isectPQ, maxIntersections, node, indices_,
vertices, faces, ray, epsScale_)) {
// Only update `hitT` when queue is full.
if (isectPQ.size() >= (size_t)maxIntersections) {
hitT = isectPQ.top().t;
}
}
}
}
}
assert(nodeStackIndex < kMaxStackDepth);
if (!isectPQ.empty()) {
// Store intesection in reverse order(make it frontmost order)
size_t n = isectPQ.size();
isects->resize(n);
for (size_t i = 0; i < n; i++) {
const Intersection &isect = isectPQ.top();
isects[n - i - 1] = isect;
isectPQ.pop();
}
return true;
}
return false;
}
} // namespace
#endif
#endif // __NANORT_H__
|
callback.h | #define _BSD_SOURCE
#define _DEFAULT_SOURCE
#include <stdio.h>
#include <inttypes.h>
#include <omp.h>
#include <ompt.h>
#include "ompt-signal.h"
// Used to detect architecture
#include "../../src/kmp_platform.h"
static const char* ompt_thread_type_t_values[] = {
NULL,
"ompt_thread_initial",
"ompt_thread_worker",
"ompt_thread_other"
};
static const char* ompt_task_status_t_values[] = {
NULL,
"ompt_task_complete",
"ompt_task_yield",
"ompt_task_cancel",
"ompt_task_others"
};
static const char* ompt_cancel_flag_t_values[] = {
"ompt_cancel_parallel",
"ompt_cancel_sections",
"ompt_cancel_do",
"ompt_cancel_taskgroup",
"ompt_cancel_activated",
"ompt_cancel_detected",
"ompt_cancel_discarded_task"
};
static void format_task_type(int type, char *buffer) {
char *progress = buffer;
if (type & ompt_task_initial)
progress += sprintf(progress, "ompt_task_initial");
if (type & ompt_task_implicit)
progress += sprintf(progress, "ompt_task_implicit");
if (type & ompt_task_explicit)
progress += sprintf(progress, "ompt_task_explicit");
if (type & ompt_task_target)
progress += sprintf(progress, "ompt_task_target");
if (type & ompt_task_undeferred)
progress += sprintf(progress, "|ompt_task_undeferred");
if (type & ompt_task_untied)
progress += sprintf(progress, "|ompt_task_untied");
if (type & ompt_task_final)
progress += sprintf(progress, "|ompt_task_final");
if (type & ompt_task_mergeable)
progress += sprintf(progress, "|ompt_task_mergeable");
if (type & ompt_task_merged)
progress += sprintf(progress, "|ompt_task_merged");
}
static ompt_set_callback_t ompt_set_callback;
static ompt_get_task_info_t ompt_get_task_info;
static ompt_get_thread_data_t ompt_get_thread_data;
static ompt_get_parallel_info_t ompt_get_parallel_info;
static ompt_get_unique_id_t ompt_get_unique_id;
static ompt_get_num_procs_t ompt_get_num_procs;
static ompt_get_num_places_t ompt_get_num_places;
static ompt_get_place_proc_ids_t ompt_get_place_proc_ids;
static ompt_get_place_num_t ompt_get_place_num;
static ompt_get_partition_place_nums_t ompt_get_partition_place_nums;
static ompt_get_proc_id_t ompt_get_proc_id;
static ompt_enumerate_states_t ompt_enumerate_states;
static ompt_enumerate_mutex_impls_t ompt_enumerate_mutex_impls;
static void print_ids(int level)
{
int task_type, thread_num;
ompt_frame_t *frame;
ompt_data_t *task_parallel_data;
ompt_data_t *task_data;
int exists_task = ompt_get_task_info(level, &task_type, &task_data, &frame,
&task_parallel_data, &thread_num);
char buffer[2048];
format_task_type(task_type, buffer);
if (frame)
printf("%" PRIu64 ": task level %d: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", exit_frame=%p, reenter_frame=%p, "
"task_type=%s=%d, thread_num=%d\n",
ompt_get_thread_data()->value, level,
exists_task ? task_parallel_data->value : 0,
exists_task ? task_data->value : 0, frame->exit_frame,
frame->enter_frame, buffer, task_type, thread_num);
}
#define get_frame_address(level) __builtin_frame_address(level)
#define print_frame(level) \
printf("%" PRIu64 ": __builtin_frame_address(%d)=%p\n", \
ompt_get_thread_data()->value, level, get_frame_address(level))
// clang (version 5.0 and above) adds an intermediate function call with debug flag (-g)
#if defined(TEST_NEED_PRINT_FRAME_FROM_OUTLINED_FN)
#if defined(DEBUG) && defined(__clang__) && __clang_major__ >= 5
#define print_frame_from_outlined_fn(level) print_frame(level+1)
#else
#define print_frame_from_outlined_fn(level) print_frame(level)
#endif
#if defined(__clang__) && __clang_major__ >= 5
#warning "Clang 5.0 and later add an additional wrapper for outlined functions when compiling with debug information."
#warning "Please define -DDEBUG iff you manually pass in -g to make the tests succeed!"
#endif
#endif
// This macro helps to define a label at the current position that can be used
// to get the current address in the code.
//
// For print_current_address():
// To reliably determine the offset between the address of the label and the
// actual return address, we insert a NOP instruction as a jump target as the
// compiler would otherwise insert an instruction that we can't control. The
// instruction length is target dependent and is explained below.
//
// (The empty block between "#pragma omp ..." and the __asm__ statement is a
// workaround for a bug in the Intel Compiler.)
#define define_ompt_label(id) \
{} \
__asm__("nop"); \
ompt_label_##id:
// This macro helps to get the address of a label that is inserted by the above
// macro define_ompt_label(). The address is obtained with a GNU extension
// (&&label) that has been tested with gcc, clang and icc.
#define get_ompt_label_address(id) (&& ompt_label_##id)
// This macro prints the exact address that a previously called runtime function
// returns to.
#define print_current_address(id) \
define_ompt_label(id) \
print_possible_return_addresses(get_ompt_label_address(id))
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
// On X86 the NOP instruction is 1 byte long. In addition, the comiler inserts
// a MOV instruction for non-void runtime functions which is 3 bytes long.
#define print_possible_return_addresses(addr) \
printf("%" PRIu64 ": current_address=%p or %p for non-void functions\n", \
ompt_get_thread_data()->value, ((char *)addr) - 1, ((char *)addr) - 4)
#elif KMP_ARCH_PPC64
// On Power the NOP instruction is 4 bytes long. In addition, the compiler
// inserts an LD instruction which accounts for another 4 bytes. In contrast to
// X86 this instruction is always there, even for void runtime functions.
#define print_possible_return_addresses(addr) \
printf("%" PRIu64 ": current_address=%p\n", ompt_get_thread_data()->value, \
((char *)addr) - 8)
#elif KMP_ARCH_AARCH64
// On AArch64 the NOP instruction is 4 bytes long, can be followed by inserted
// store instruction (another 4 bytes long).
#define print_possible_return_addresses(addr) \
printf("%" PRIu64 ": current_address=%p or %p\n", ompt_get_thread_data()->value, \
((char *)addr) - 4, ((char *)addr) - 8)
#else
#error Unsupported target architecture, cannot determine address offset!
#endif
// This macro performs a somewhat similar job to print_current_address(), except
// that it discards a certain number of nibbles from the address and only prints
// the most significant bits / nibbles. This can be used for cases where the
// return address can only be approximated.
//
// To account for overflows (ie the most significant bits / nibbles have just
// changed as we are a few bytes above the relevant power of two) the addresses
// of the "current" and of the "previous block" are printed.
#define print_fuzzy_address(id) \
define_ompt_label(id) \
print_fuzzy_address_blocks(get_ompt_label_address(id))
// If you change this define you need to adapt all capture patterns in the tests
// to include or discard the new number of nibbles!
#define FUZZY_ADDRESS_DISCARD_NIBBLES 2
#define FUZZY_ADDRESS_DISCARD_BYTES (1 << ((FUZZY_ADDRESS_DISCARD_NIBBLES) * 4))
#define print_fuzzy_address_blocks(addr) \
printf("%" PRIu64 ": fuzzy_address=0x%" PRIx64 " or 0x%" PRIx64 " (%p)\n", \
ompt_get_thread_data()->value, \
((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES - 1, \
((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES, addr)
static void
on_ompt_callback_mutex_acquire(
ompt_mutex_kind_t kind,
unsigned int hint,
unsigned int impl,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ": ompt_event_wait_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ": ompt_event_wait_nest_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_critical:
printf("%" PRIu64 ": ompt_event_wait_critical: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_atomic:
printf("%" PRIu64 ": ompt_event_wait_atomic: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_ordered:
printf("%" PRIu64 ": ompt_event_wait_ordered: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_mutex_acquired(
ompt_mutex_kind_t kind,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ": ompt_event_acquired_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ": ompt_event_acquired_nest_lock_first: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_critical:
printf("%" PRIu64 ": ompt_event_acquired_critical: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_atomic:
printf("%" PRIu64 ": ompt_event_acquired_atomic: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_ordered:
printf("%" PRIu64 ": ompt_event_acquired_ordered: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_mutex_released(
ompt_mutex_kind_t kind,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ": ompt_event_release_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ": ompt_event_release_nest_lock_last: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_critical:
printf("%" PRIu64 ": ompt_event_release_critical: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_atomic:
printf("%" PRIu64 ": ompt_event_release_atomic: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_ordered:
printf("%" PRIu64 ": ompt_event_release_ordered: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_nest_lock(
ompt_scope_endpoint_t endpoint,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
printf("%" PRIu64 ": ompt_event_acquired_nest_lock_next: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_scope_end:
printf("%" PRIu64 ": ompt_event_release_nest_lock_prev: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
}
}
static void
on_ompt_callback_sync_region(
ompt_sync_region_kind_t kind,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
switch(kind)
{
case ompt_sync_region_barrier:
printf("%" PRIu64 ": ompt_event_barrier_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra);
print_ids(0);
break;
case ompt_sync_region_taskwait:
printf("%" PRIu64 ": ompt_event_taskwait_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskgroup:
printf("%" PRIu64 ": ompt_event_taskgroup_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra);
break;
}
break;
case ompt_scope_end:
switch(kind)
{
case ompt_sync_region_barrier:
printf("%" PRIu64 ": ompt_event_barrier_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskwait:
printf("%" PRIu64 ": ompt_event_taskwait_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskgroup:
printf("%" PRIu64 ": ompt_event_taskgroup_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra);
break;
}
break;
}
}
static void
on_ompt_callback_sync_region_wait(
ompt_sync_region_kind_t kind,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
switch(kind)
{
case ompt_sync_region_barrier:
printf("%" PRIu64 ": ompt_event_wait_barrier_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskwait:
printf("%" PRIu64 ": ompt_event_wait_taskwait_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskgroup:
printf("%" PRIu64 ": ompt_event_wait_taskgroup_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra);
break;
}
break;
case ompt_scope_end:
switch(kind)
{
case ompt_sync_region_barrier:
printf("%" PRIu64 ": ompt_event_wait_barrier_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskwait:
printf("%" PRIu64 ": ompt_event_wait_taskwait_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskgroup:
printf("%" PRIu64 ": ompt_event_wait_taskgroup_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra);
break;
}
break;
}
}
static void
on_ompt_callback_flush(
ompt_data_t *thread_data,
const void *codeptr_ra)
{
printf("%" PRIu64 ": ompt_event_flush: codeptr_ra=%p\n", thread_data->value, codeptr_ra);
}
static void
on_ompt_callback_cancel(
ompt_data_t *task_data,
int flags,
const void *codeptr_ra)
{
const char* first_flag_value;
const char* second_flag_value;
if(flags & ompt_cancel_parallel)
first_flag_value = ompt_cancel_flag_t_values[0];
else if(flags & ompt_cancel_sections)
first_flag_value = ompt_cancel_flag_t_values[1];
else if(flags & ompt_cancel_do)
first_flag_value = ompt_cancel_flag_t_values[2];
else if(flags & ompt_cancel_taskgroup)
first_flag_value = ompt_cancel_flag_t_values[3];
if(flags & ompt_cancel_activated)
second_flag_value = ompt_cancel_flag_t_values[4];
else if(flags & ompt_cancel_detected)
second_flag_value = ompt_cancel_flag_t_values[5];
else if(flags & ompt_cancel_discarded_task)
second_flag_value = ompt_cancel_flag_t_values[6];
printf("%" PRIu64 ": ompt_event_cancel: task_data=%" PRIu64 ", flags=%s|%s=%" PRIu32 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, task_data->value, first_flag_value, second_flag_value, flags, codeptr_ra);
}
static void
on_ompt_callback_idle(
ompt_scope_endpoint_t endpoint)
{
switch(endpoint)
{
case ompt_scope_begin:
printf("%" PRIu64 ": ompt_event_idle_begin:\n", ompt_get_thread_data()->value);
break;
case ompt_scope_end:
printf("%" PRIu64 ": ompt_event_idle_end:\n", ompt_get_thread_data()->value);
break;
}
}
static void
on_ompt_callback_implicit_task(
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
unsigned int team_size,
unsigned int thread_num)
{
switch(endpoint)
{
case ompt_scope_begin:
if(task_data->ptr)
printf("%s\n", "0: task_data initially not null");
task_data->value = ompt_get_unique_id();
printf("%" PRIu64 ": ompt_event_implicit_task_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", team_size=%" PRIu32 ", thread_num=%" PRIu32 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, team_size, thread_num);
break;
case ompt_scope_end:
printf("%" PRIu64 ": ompt_event_implicit_task_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", team_size=%" PRIu32 ", thread_num=%" PRIu32 "\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, team_size, thread_num);
break;
}
}
static void
on_ompt_callback_lock_init(
ompt_mutex_kind_t kind,
unsigned int hint,
unsigned int impl,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ": ompt_event_init_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ": ompt_event_init_nest_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_lock_destroy(
ompt_mutex_kind_t kind,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ": ompt_event_destroy_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ": ompt_event_destroy_nest_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_work(
ompt_work_type_t wstype,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
uint64_t count,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
switch(wstype)
{
case ompt_work_loop:
printf("%" PRIu64 ": ompt_event_loop_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_sections:
printf("%" PRIu64 ": ompt_event_sections_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_single_executor:
printf("%" PRIu64 ": ompt_event_single_in_block_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_single_other:
printf("%" PRIu64 ": ompt_event_single_others_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_workshare:
//impl
break;
case ompt_work_distribute:
printf("%" PRIu64 ": ompt_event_distribute_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_taskloop:
//impl
printf("%" PRIu64 ": ompt_event_taskloop_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
}
break;
case ompt_scope_end:
switch(wstype)
{
case ompt_work_loop:
printf("%" PRIu64 ": ompt_event_loop_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_sections:
printf("%" PRIu64 ": ompt_event_sections_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_single_executor:
printf("%" PRIu64 ": ompt_event_single_in_block_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_single_other:
printf("%" PRIu64 ": ompt_event_single_others_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_workshare:
//impl
break;
case ompt_work_distribute:
printf("%" PRIu64 ": ompt_event_distribute_end: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
case ompt_work_taskloop:
//impl
printf("%" PRIu64 ": ompt_event_taskloop_end: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count);
break;
}
break;
}
}
static void
on_ompt_callback_master(
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
printf("%" PRIu64 ": ompt_event_master_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra);
break;
case ompt_scope_end:
printf("%" PRIu64 ": ompt_event_master_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra);
break;
}
}
static void
on_ompt_callback_parallel_begin(
ompt_data_t *encountering_task_data,
const ompt_frame_t *encountering_task_frame,
ompt_data_t* parallel_data,
uint32_t requested_team_size,
ompt_invoker_t invoker,
const void *codeptr_ra)
{
if(parallel_data->ptr)
printf("0: parallel_data initially not null\n");
parallel_data->value = ompt_get_unique_id();
printf("%" PRIu64 ": ompt_event_parallel_begin: parent_task_id=%" PRIu64 ", parent_task_frame.exit=%p, parent_task_frame.reenter=%p, parallel_id=%" PRIu64 ", requested_team_size=%" PRIu32 ", codeptr_ra=%p, invoker=%d\n", ompt_get_thread_data()->value, encountering_task_data->value, encountering_task_frame->exit_frame, encountering_task_frame->enter_frame, parallel_data->value, requested_team_size, codeptr_ra, invoker);
}
static void
on_ompt_callback_parallel_end(
ompt_data_t *parallel_data,
ompt_data_t *encountering_task_data,
ompt_invoker_t invoker,
const void *codeptr_ra)
{
printf("%" PRIu64 ": ompt_event_parallel_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", invoker=%d, codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, encountering_task_data->value, invoker, codeptr_ra);
}
static void
on_ompt_callback_task_create(
ompt_data_t *encountering_task_data,
const ompt_frame_t *encountering_task_frame,
ompt_data_t* new_task_data,
int type,
int has_dependences,
const void *codeptr_ra)
{
if(new_task_data->ptr)
printf("0: new_task_data initially not null\n");
new_task_data->value = ompt_get_unique_id();
char buffer[2048];
format_task_type(type, buffer);
//there is no parallel_begin callback for implicit parallel region
//thus it is initialized in initial task
if(type & ompt_task_initial)
{
ompt_data_t *parallel_data;
ompt_get_parallel_info(0, ¶llel_data, NULL);
if(parallel_data->ptr)
printf("%s\n", "0: parallel_data initially not null");
parallel_data->value = ompt_get_unique_id();
}
printf("%" PRIu64 ": ompt_event_task_create: parent_task_id=%" PRIu64 ", parent_task_frame.exit=%p, parent_task_frame.reenter=%p, new_task_id=%" PRIu64 ", codeptr_ra=%p, task_type=%s=%d, has_dependences=%s\n", ompt_get_thread_data()->value, encountering_task_data ? encountering_task_data->value : 0, encountering_task_frame ? encountering_task_frame->exit_frame : NULL, encountering_task_frame ? encountering_task_frame->enter_frame : NULL, new_task_data->value, codeptr_ra, buffer, type, has_dependences ? "yes" : "no");
}
static void
on_ompt_callback_task_schedule(
ompt_data_t *first_task_data,
ompt_task_status_t prior_task_status,
ompt_data_t *second_task_data)
{
printf("%" PRIu64 ": ompt_event_task_schedule: first_task_id=%" PRIu64 ", second_task_id=%" PRIu64 ", prior_task_status=%s=%d\n", ompt_get_thread_data()->value, first_task_data->value, second_task_data->value, ompt_task_status_t_values[prior_task_status], prior_task_status);
if(prior_task_status == ompt_task_complete)
{
printf("%" PRIu64 ": ompt_event_task_end: task_id=%" PRIu64 "\n", ompt_get_thread_data()->value, first_task_data->value);
}
}
static void
on_ompt_callback_task_dependences(
ompt_data_t *task_data,
const ompt_task_dependence_t *deps,
int ndeps)
{
printf("%" PRIu64 ": ompt_event_task_dependences: task_id=%" PRIu64 ", deps=%p, ndeps=%d\n", ompt_get_thread_data()->value, task_data->value, (void *)deps, ndeps);
}
static void
on_ompt_callback_task_dependence(
ompt_data_t *first_task_data,
ompt_data_t *second_task_data)
{
printf("%" PRIu64 ": ompt_event_task_dependence_pair: first_task_id=%" PRIu64 ", second_task_id=%" PRIu64 "\n", ompt_get_thread_data()->value, first_task_data->value, second_task_data->value);
}
static void
on_ompt_callback_thread_begin(
ompt_thread_type_t thread_type,
ompt_data_t *thread_data)
{
if(thread_data->ptr)
printf("%s\n", "0: thread_data initially not null");
thread_data->value = ompt_get_unique_id();
printf("%" PRIu64 ": ompt_event_thread_begin: thread_type=%s=%d, thread_id=%" PRIu64 "\n", ompt_get_thread_data()->value, ompt_thread_type_t_values[thread_type], thread_type, thread_data->value);
}
static void
on_ompt_callback_thread_end(
ompt_data_t *thread_data)
{
printf("%" PRIu64 ": ompt_event_thread_end: thread_id=%" PRIu64 "\n", ompt_get_thread_data()->value, thread_data->value);
}
static int
on_ompt_callback_control_tool(
uint64_t command,
uint64_t modifier,
void *arg,
const void *codeptr_ra)
{
ompt_frame_t* omptTaskFrame;
ompt_get_task_info(0, NULL, (ompt_data_t**) NULL, &omptTaskFrame, NULL, NULL);
printf("%" PRIu64 ": ompt_event_control_tool: command=%" PRIu64 ", modifier=%" PRIu64 ", arg=%p, codeptr_ra=%p, current_task_frame.exit=%p, current_task_frame.reenter=%p \n", ompt_get_thread_data()->value, command, modifier, arg, codeptr_ra, omptTaskFrame->exit_frame, omptTaskFrame->enter_frame);
return 0; //success
}
#define register_callback_t(name, type) \
do{ \
type f_##name = &on_##name; \
if (ompt_set_callback(name, (ompt_callback_t)f_##name) == \
ompt_set_never) \
printf("0: Could not register callback '" #name "'\n"); \
}while(0)
#define register_callback(name) register_callback_t(name, name##_t)
int ompt_initialize(
ompt_function_lookup_t lookup,
ompt_data_t *tool_data)
{
ompt_set_callback = (ompt_set_callback_t) lookup("ompt_set_callback");
ompt_get_task_info = (ompt_get_task_info_t) lookup("ompt_get_task_info");
ompt_get_thread_data = (ompt_get_thread_data_t) lookup("ompt_get_thread_data");
ompt_get_parallel_info = (ompt_get_parallel_info_t) lookup("ompt_get_parallel_info");
ompt_get_unique_id = (ompt_get_unique_id_t) lookup("ompt_get_unique_id");
ompt_get_num_procs = (ompt_get_num_procs_t) lookup("ompt_get_num_procs");
ompt_get_num_places = (ompt_get_num_places_t) lookup("ompt_get_num_places");
ompt_get_place_proc_ids = (ompt_get_place_proc_ids_t) lookup("ompt_get_place_proc_ids");
ompt_get_place_num = (ompt_get_place_num_t) lookup("ompt_get_place_num");
ompt_get_partition_place_nums = (ompt_get_partition_place_nums_t) lookup("ompt_get_partition_place_nums");
ompt_get_proc_id = (ompt_get_proc_id_t) lookup("ompt_get_proc_id");
ompt_enumerate_states = (ompt_enumerate_states_t) lookup("ompt_enumerate_states");
ompt_enumerate_mutex_impls = (ompt_enumerate_mutex_impls_t) lookup("ompt_enumerate_mutex_impls");
register_callback(ompt_callback_mutex_acquire);
register_callback_t(ompt_callback_mutex_acquired, ompt_callback_mutex_t);
register_callback_t(ompt_callback_mutex_released, ompt_callback_mutex_t);
register_callback(ompt_callback_nest_lock);
register_callback(ompt_callback_sync_region);
register_callback_t(ompt_callback_sync_region_wait, ompt_callback_sync_region_t);
register_callback(ompt_callback_control_tool);
register_callback(ompt_callback_flush);
register_callback(ompt_callback_cancel);
register_callback(ompt_callback_idle);
register_callback(ompt_callback_implicit_task);
register_callback_t(ompt_callback_lock_init, ompt_callback_mutex_acquire_t);
register_callback_t(ompt_callback_lock_destroy, ompt_callback_mutex_t);
register_callback(ompt_callback_work);
register_callback(ompt_callback_master);
register_callback(ompt_callback_parallel_begin);
register_callback(ompt_callback_parallel_end);
register_callback(ompt_callback_task_create);
register_callback(ompt_callback_task_schedule);
register_callback(ompt_callback_task_dependences);
register_callback(ompt_callback_task_dependence);
register_callback(ompt_callback_thread_begin);
register_callback(ompt_callback_thread_end);
printf("0: NULL_POINTER=%p\n", (void*)NULL);
return 1; //success
}
void ompt_finalize(ompt_data_t *tool_data)
{
printf("0: ompt_event_runtime_shutdown\n");
}
ompt_start_tool_result_t* ompt_start_tool(
unsigned int omp_version,
const char *runtime_version)
{
static ompt_start_tool_result_t ompt_start_tool_result = {&ompt_initialize,&ompt_finalize, 0};
return &ompt_start_tool_result;
}
|
omp-taskgroup-single.c | #include <omp.h>
#include <unistd.h>
#include <stdio.h>
#define THREADS 4
#define LEN 25
int main(void)
{
int j=0;
#pragma omp parallel num_threads(THREADS)
#pragma omp single
{
#pragma omp taskgroup
for (j=0; j<LEN; j++)
{
#pragma omp task
{usleep(10);}
}
#pragma omp taskgroup
for (j=0; j<LEN; j++)
{
#pragma omp task
{usleep(10);}
}
#pragma omp taskgroup
for (j=0; j<LEN; j++)
{
#pragma omp task
{usleep(10);}
}
}
return 0;
}
|
GB_binop__lxor_uint8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lxor_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__lxor_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__lxor_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__lxor_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_uint8)
// A*D function (colscale): GB (_AxD__lxor_uint8)
// D*A function (rowscale): GB (_DxB__lxor_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__lxor_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__lxor_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_uint8)
// C=scalar+B GB (_bind1st__lxor_uint8)
// C=scalar+B' GB (_bind1st_tran__lxor_uint8)
// C=A+scalar GB (_bind2nd__lxor_uint8)
// C=A'+scalar GB (_bind2nd_tran__lxor_uint8)
// C type: uint8_t
// A type: uint8_t
// A pattern? 0
// B type: uint8_t
// B pattern? 0
// BinaryOp: cij = ((aij != 0) != (bij != 0))
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) != (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LXOR || GxB_NO_UINT8 || GxB_NO_LXOR_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__lxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lxor_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lxor_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint8_t alpha_scalar ;
uint8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lxor_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lxor_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lxor_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) != (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lxor_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) != (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) != (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lxor_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) != (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
gamma_index_ivfpq.h | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This faiss source code is licensed under the MIT license.
* https://github.com/facebookresearch/faiss/blob/master/LICENSE
*
*
* The works below are modified based on faiss:
* 1. Replace the static batch indexing with real time indexing
* 2. Add the fine-grained sort after PQ coarse sort
* 3. Add the numeric field and bitmap filters in the process of searching
*
* Modified works copyright 2019 The Gamma Authors.
*
* The modified codes are licensed under the Apache License, Version 2.0 license
* found in the LICENSE file in the root directory of this source tree.
*
*/
#ifndef GAMMA_INDEX_IVFPQ_H_
#define GAMMA_INDEX_IVFPQ_H_
#include <unistd.h>
#include <atomic>
#include "faiss/IndexIVF.h"
#include "faiss/IndexIVFPQ.h"
#include "faiss/VectorTransform.h"
#include "faiss/IndexHNSW.h"
#include "faiss/InvertedLists.h"
#include "faiss/impl/FaissAssert.h"
#include "faiss/impl/io.h"
#include "faiss/index_io.h"
#include "faiss/utils/Heap.h"
#include "faiss/utils/distances.h"
#include "faiss/utils/hamming.h"
#include "faiss/utils/utils.h"
#include "table/field_range_index.h"
#include "common/gamma_common_data.h"
#include "gamma_index_flat.h"
#include "gamma_scanner.h"
#include "util/log.h"
#include "vector/memory_raw_vector.h"
#include "vector/raw_vector.h"
#include "realtime/realtime_invert_index.h"
#include "index/retrieval_model.h"
#include "util/utils.h"
namespace tig_gamma {
/// statistics are robust to internal threading, but not if
/// IndexIVFPQ::search_preassigned is called by multiple threads
struct IndexIVFPQStats {
size_t nrefine; // nb of refines (IVFPQR)
size_t n_hamming_pass;
// nb of passed Hamming distance tests (for polysemous)
// timings measured with the CPU RTC
// on all threads
size_t search_cycles;
size_t refine_cycles; // only for IVFPQR
IndexIVFPQStats() { reset(); }
void reset(){};
};
// global var that collects them all
extern IndexIVFPQStats indexIVFPQ_stats;
// namespace {
using idx_t = faiss::Index::idx_t;
static uint64_t get_cycles() {
#ifdef __x86_64__
uint32_t high, low;
asm volatile("rdtsc \n\t" : "=a"(low), "=d"(high));
return ((uint64_t)high << 32) | (low);
#else
return 0;
#endif
}
#define TIC t0 = get_cycles()
#define TOC get_cycles() - t0
/** QueryTables manages the various ways of searching an
* IndexIVFPQ. The code contains a lot of branches, depending on:
* - metric_type: are we computing L2 or Inner product similarity?
* - by_residual: do we encode raw vectors or residuals?
* - use_precomputed_table: are x_R|x_C tables precomputed?
* - polysemous_ht: are we filtering with polysemous codes?
*/
struct QueryTables {
/*****************************************************
* General data from the IVFPQ
*****************************************************/
const faiss::IndexIVFPQ &ivfpq;
const faiss::IVFSearchParameters *params;
// copied from IndexIVFPQ for easier access
int d;
const faiss::ProductQuantizer &pq;
faiss::MetricType metric_type;
bool by_residual;
int use_precomputed_table;
int polysemous_ht;
// pre-allocated data buffers
float *sim_table, *sim_table_2;
float *residual_vec, *decoded_vec;
// single data buffer
std::vector<float> mem;
// for table pointers
std::vector<const float *> sim_table_ptrs;
explicit QueryTables(const faiss::IndexIVFPQ &ivfpq,
const faiss::IVFSearchParameters *params,
faiss::MetricType metric_type)
: ivfpq(ivfpq),
d(ivfpq.d),
pq(ivfpq.pq),
metric_type(metric_type),
by_residual(ivfpq.by_residual),
use_precomputed_table(ivfpq.use_precomputed_table) {
mem.resize(pq.ksub * pq.M * 2 + d * 2);
sim_table = mem.data();
sim_table_2 = sim_table + pq.ksub * pq.M;
residual_vec = sim_table_2 + pq.ksub * pq.M;
decoded_vec = residual_vec + d;
// for polysemous
polysemous_ht = ivfpq.polysemous_ht;
if (auto ivfpq_params =
dynamic_cast<const faiss::IVFPQSearchParameters *>(params)) {
polysemous_ht = ivfpq_params->polysemous_ht;
}
if (polysemous_ht != 0) {
q_code.resize(pq.code_size);
}
init_list_cycles = 0;
sim_table_ptrs.resize(pq.M);
}
/*****************************************************
* What we do when query is known
*****************************************************/
// field specific to query
const float *qi;
// query-specific intialization
void init_query(const float *qi) {
this->qi = qi;
if (metric_type == faiss::METRIC_INNER_PRODUCT)
init_query_IP();
else
init_query_L2();
if (!by_residual && polysemous_ht != 0) pq.compute_code(qi, q_code.data());
}
void init_query_IP() {
// precompute some tables specific to the query qi
pq.compute_inner_prod_table(qi, sim_table);
}
void init_query_L2() {
if (!by_residual) {
pq.compute_distance_table(qi, sim_table);
} else if (use_precomputed_table) {
pq.compute_inner_prod_table(qi, sim_table_2);
}
}
/*****************************************************
* When inverted list is known: prepare computations
*****************************************************/
// fields specific to list
long key;
float coarse_dis;
std::vector<uint8_t> q_code;
uint64_t init_list_cycles;
/// once we know the query and the centroid, we can prepare the
/// sim_table that will be used for accumulation
/// and dis0, the initial value
float precompute_list_tables() {
float dis0 = 0;
uint64_t t0;
TIC;
if (by_residual) {
if (metric_type == faiss::METRIC_INNER_PRODUCT)
dis0 = precompute_list_tables_IP();
else
dis0 = precompute_list_tables_L2();
}
init_list_cycles += TOC;
return dis0;
}
float precompute_list_table_pointers() {
float dis0 = 0;
uint64_t t0;
TIC;
if (by_residual) {
if (metric_type == faiss::METRIC_INNER_PRODUCT)
FAISS_THROW_MSG("not implemented");
else
dis0 = precompute_list_table_pointers_L2();
}
init_list_cycles += TOC;
return dis0;
}
/*****************************************************
* compute tables for inner prod
*****************************************************/
float precompute_list_tables_IP() {
// prepare the sim_table that will be used for accumulation
// and dis0, the initial value
ivfpq.quantizer->reconstruct(key, decoded_vec);
// decoded_vec = centroid
float dis0 = faiss::fvec_inner_product(qi, decoded_vec, d);
if (polysemous_ht) {
for (int i = 0; i < d; i++) {
residual_vec[i] = qi[i] - decoded_vec[i];
}
pq.compute_code(residual_vec, q_code.data());
}
return dis0;
}
/*****************************************************
* compute tables for L2 distance
*****************************************************/
float precompute_list_tables_L2() {
float dis0 = 0;
if (use_precomputed_table == 0 || use_precomputed_table == -1) {
ivfpq.quantizer->compute_residual(qi, residual_vec, key);
pq.compute_distance_table(residual_vec, sim_table);
if (polysemous_ht != 0) {
pq.compute_code(residual_vec, q_code.data());
}
} else if (use_precomputed_table == 1) {
dis0 = coarse_dis;
faiss::fvec_madd(pq.M * pq.ksub,
&ivfpq.precomputed_table[key * pq.ksub * pq.M], -2.0,
sim_table_2, sim_table);
if (polysemous_ht != 0) {
ivfpq.quantizer->compute_residual(qi, residual_vec, key);
pq.compute_code(residual_vec, q_code.data());
}
} else if (use_precomputed_table == 2) {
dis0 = coarse_dis;
const faiss::MultiIndexQuantizer *miq =
dynamic_cast<const faiss::MultiIndexQuantizer *>(ivfpq.quantizer);
FAISS_THROW_IF_NOT(miq);
const faiss::ProductQuantizer &cpq = miq->pq;
int Mf = pq.M / cpq.M;
const float *qtab = sim_table_2; // query-specific table
float *ltab = sim_table; // (output) list-specific table
long k = key;
for (size_t cm = 0; cm < cpq.M; cm++) {
// compute PQ index
int ki = k & ((uint64_t(1) << cpq.nbits) - 1);
k >>= cpq.nbits;
// get corresponding table
const float *pc =
&ivfpq.precomputed_table[(ki * pq.M + cm * Mf) * pq.ksub];
if (polysemous_ht == 0) {
// sum up with query-specific table
faiss::fvec_madd(Mf * pq.ksub, pc, -2.0, qtab, ltab);
ltab += Mf * pq.ksub;
qtab += Mf * pq.ksub;
} else {
for (size_t m = cm * Mf; m < (cm + 1) * Mf; m++) {
q_code[m] =
faiss::fvec_madd_and_argmin(pq.ksub, pc, -2, qtab, ltab);
pc += pq.ksub;
ltab += pq.ksub;
qtab += pq.ksub;
}
}
}
}
return dis0;
}
float precompute_list_table_pointers_L2() {
float dis0 = 0;
if (use_precomputed_table == 1) {
dis0 = coarse_dis;
const float *s = &ivfpq.precomputed_table[key * pq.ksub * pq.M];
for (size_t m = 0; m < pq.M; m++) {
sim_table_ptrs[m] = s;
s += pq.ksub;
}
} else if (use_precomputed_table == 2) {
dis0 = coarse_dis;
const faiss::MultiIndexQuantizer *miq =
dynamic_cast<const faiss::MultiIndexQuantizer *>(ivfpq.quantizer);
FAISS_THROW_IF_NOT(miq);
const faiss::ProductQuantizer &cpq = miq->pq;
int Mf = pq.M / cpq.M;
long k = key;
int m0 = 0;
for (size_t cm = 0; cm < cpq.M; cm++) {
int ki = k & ((uint64_t(1) << cpq.nbits) - 1);
k >>= cpq.nbits;
const float *pc =
&ivfpq.precomputed_table[(ki * pq.M + cm * Mf) * pq.ksub];
for (int m = m0; m < m0 + Mf; m++) {
sim_table_ptrs[m] = pc;
pc += pq.ksub;
}
m0 += Mf;
}
} else {
FAISS_THROW_MSG("need precomputed tables");
}
if (polysemous_ht) {
FAISS_THROW_MSG("not implemented");
// Not clear that it makes sense to implemente this,
// because it costs M * ksub, which is what we wanted to
// avoid with the tables pointers.
}
return dis0;
}
};
template <class C>
struct KnnSearchResults {
idx_t key;
const idx_t *ids;
// heap params
size_t k;
float *heap_sim;
idx_t *heap_ids;
size_t nup;
inline void add(idx_t j, float dis) {
if (C::cmp(heap_sim[0], dis)) {
faiss::heap_pop<C>(k, heap_sim, heap_ids);
idx_t id = ids ? ids[j] : (key << 32 | j);
faiss::heap_push<C>(k, heap_sim, heap_ids, dis, id);
nup++;
}
}
};
/*****************************************************
* Scaning the codes.
* The scanning functions call their favorite precompute_*
* function to precompute the tables they need.
*****************************************************/
template <typename IDType, faiss::MetricType METRIC_TYPE>
struct IVFPQScannerT : QueryTables {
const uint8_t *list_codes;
const IDType *list_ids;
size_t list_size;
explicit IVFPQScannerT(const faiss::IndexIVFPQ &ivfpq,
const faiss::IVFSearchParameters *params)
: QueryTables(ivfpq, params, METRIC_TYPE) {
FAISS_THROW_IF_NOT(pq.nbits == 8);
}
float dis0;
void init_list(idx_t list_no, float coarse_dis, int mode) {
this->key = list_no;
this->coarse_dis = coarse_dis;
if (mode == 2) {
dis0 = precompute_list_tables();
} else if (mode == 1) {
dis0 = precompute_list_table_pointers();
}
}
/// tables are not precomputed, but pointers are provided to the
/// relevant X_c|x_r tables
template <class SearchResultType>
void scan_list_with_pointer(size_t ncode, const uint8_t *codes,
SearchResultType &res) const {
for (size_t j = 0; j < ncode; j++) {
float dis = dis0;
const float *tab = sim_table_2;
for (size_t m = 0; m < pq.M; m++) {
int ci = *codes++;
dis += sim_table_ptrs[m][ci] - 2 * tab[ci];
tab += pq.ksub;
}
res.add(j, dis);
}
}
/// nothing is precomputed: access residuals on-the-fly
template <class SearchResultType>
void scan_on_the_fly_dist(size_t ncode, const uint8_t *codes,
SearchResultType &res) const {
const float *dvec;
float dis0 = 0;
if (by_residual) {
if (METRIC_TYPE == faiss::METRIC_INNER_PRODUCT) {
ivfpq.quantizer->reconstruct(key, residual_vec);
dis0 = faiss::fvec_inner_product(residual_vec, qi, d);
} else {
ivfpq.quantizer->compute_residual(qi, residual_vec, key);
}
dvec = residual_vec;
} else {
dvec = qi;
dis0 = 0;
}
for (size_t j = 0; j < ncode; j++) {
pq.decode(codes, decoded_vec);
codes += pq.code_size;
float dis;
if (METRIC_TYPE == faiss::METRIC_INNER_PRODUCT) {
dis = dis0 + faiss::fvec_inner_product(decoded_vec, qi, d);
} else {
dis = faiss::fvec_L2sqr(decoded_vec, dvec, d);
}
res.add(j, dis);
}
}
/*****************************************************
* Scanning codes with polysemous filtering
*****************************************************/
template <class HammingComputer, class SearchResultType>
void scan_list_polysemous_hc(size_t ncode, const uint8_t *codes,
SearchResultType &res) const {
int ht = ivfpq.polysemous_ht;
size_t n_hamming_pass = 0;
int code_size = pq.code_size;
HammingComputer hc(q_code.data(), code_size);
for (size_t j = 0; j < ncode; j++) {
const uint8_t *b_code = codes;
int hd = hc.hamming(b_code);
if (hd < ht) {
n_hamming_pass++;
float dis = dis0;
const float *tab = sim_table;
for (size_t m = 0; m < pq.M; m++) {
dis += tab[*b_code++];
tab += pq.ksub;
}
res.add(j, dis);
}
codes += code_size;
}
#pragma omp critical
{ indexIVFPQ_stats.n_hamming_pass += n_hamming_pass; }
}
template <class SearchResultType>
void scan_list_polysemous(size_t ncode, const uint8_t *codes,
SearchResultType &res) const {
switch (pq.code_size) {
#define HANDLE_CODE_SIZE(cs) \
case cs: \
scan_list_polysemous_hc<faiss::HammingComputer##cs, SearchResultType>( \
ncode, codes, res); \
break
HANDLE_CODE_SIZE(4);
HANDLE_CODE_SIZE(8);
HANDLE_CODE_SIZE(16);
HANDLE_CODE_SIZE(20);
HANDLE_CODE_SIZE(32);
HANDLE_CODE_SIZE(64);
#undef HANDLE_CODE_SIZE
default:
if (pq.code_size % 8 == 0)
scan_list_polysemous_hc<faiss::HammingComputerM8, SearchResultType>(
ncode, codes, res);
else
scan_list_polysemous_hc<faiss::HammingComputerM4, SearchResultType>(
ncode, codes, res);
break;
}
}
};
/* struct GammaInvertedListScanner : faiss::InvertedListScanner { */
/* GammaInvertedListScanner() { retrieval_context_ = nullptr; } */
/* virtual size_t scan_codes_pointer(size_t ncode, const uint8_t **codes, */
/* const idx_t *ids, float *heap_sim, */
/* idx_t *heap_ids, size_t k) = 0; */
/* void set_search_context(RetrievalContext *retrieval_context) { */
/* this->retrieval_context_ = retrieval_context; */
/* } */
/* RetrievalContext *retrieval_context_; */
/* }; */
template <faiss::MetricType metric, class C>
struct GammaIVFFlatScanner : GammaInvertedListScanner {
size_t d;
GammaIVFFlatScanner(size_t d) : d(d) {}
const float *xi;
void set_query(const float *query) override { this->xi = query; }
idx_t list_no;
void set_list(idx_t list_no, float /* coarse_dis */) override {
this->list_no = list_no;
}
float distance_to_code(const uint8_t *code) const override {
const float *yj = (float *)code;
float dis = metric == faiss::METRIC_INNER_PRODUCT
? faiss::fvec_inner_product(xi, yj, d)
: faiss::fvec_L2sqr(xi, yj, d);
return dis;
}
inline size_t scan_codes(size_t list_size, const uint8_t *codes,
const idx_t *ids, float *simi, idx_t *idxi,
size_t k) const override {
RawVector *raw_vec = (RawVector *)codes;
size_t nup = 0;
for (size_t j = 0; j < list_size; j++) {
if (ids[j] & realtime::kDelIdxMask) continue;
idx_t vid = ids[j] & realtime::kRecoverIdxMask;
if (vid < 0) continue;
if (retrieval_context_->IsValid(vid) == false) continue;
ScopeVector svec;
raw_vec->GetVector(vid, svec);
const float *yj = (const float *)svec.Get();
float dis = metric == faiss::METRIC_INNER_PRODUCT
? faiss::fvec_inner_product(xi, yj, d)
: faiss::fvec_L2sqr(xi, yj, d);
if (retrieval_context_->IsSimilarScoreValid(dis) && C::cmp(simi[0], dis)) {
faiss::heap_pop<C>(k, simi, idxi);
faiss::heap_push<C>(k, simi, idxi, dis, vid);
nup++;
}
}
return nup;
}
size_t scan_codes_pointer(size_t ncode, const uint8_t **codes,
const idx_t *ids, float *heap_sim, idx_t *heap_ids,
size_t k) {
return 0;
}
};
class IVFPQRetrievalParameters : public RetrievalParameters {
public:
IVFPQRetrievalParameters() : RetrievalParameters() {
parallel_on_queries_ = true;
recall_num_ = 100;
nprobe_ = 80;
ivf_flat_ = false;
}
IVFPQRetrievalParameters(bool parallel_on_queries, int recall_num, int nprobe,
enum DistanceComputeType type, bool ivf_flat) {
parallel_on_queries_ = parallel_on_queries;
recall_num_ = recall_num;
nprobe_ = nprobe;
ivf_flat_ = ivf_flat;
distance_compute_type_ = type;
}
IVFPQRetrievalParameters(enum DistanceComputeType type) {
parallel_on_queries_ = true;
recall_num_ = 100;
nprobe_ = 80;
ivf_flat_ = false;
distance_compute_type_ = type;
}
virtual ~IVFPQRetrievalParameters() {}
int RecallNum() { return recall_num_; }
void SetRecallNum(int recall_num) { recall_num_ = recall_num; }
int Nprobe() { return nprobe_; }
void SetNprobe(int nprobe) { nprobe_ = nprobe; }
bool ParallelOnQueries() { return parallel_on_queries_; }
void SetParallelOnQueries(bool parallel_on_queries) {
parallel_on_queries_ = parallel_on_queries;
}
bool IvfFlat() { return ivf_flat_; }
void SetIvfFlat(bool ivf_flat) { ivf_flat_ = ivf_flat; }
protected:
// parallelize over queries or ivf lists
bool parallel_on_queries_;
int recall_num_;
int nprobe_;
bool ivf_flat_;
};
struct IVFPQModelParams;
struct GammaIVFPQIndex : GammaFLATIndex, faiss::IndexIVFPQ {
GammaIVFPQIndex();
virtual ~GammaIVFPQIndex();
faiss::InvertedListScanner *get_InvertedListScanner(
bool store_pairs, faiss::MetricType metric_type);
GammaInvertedListScanner *GetGammaIVFFlatScanner(
size_t d, faiss::MetricType metric_type);
GammaInvertedListScanner *GetGammaInvertedListScanner(
bool store_pairs, faiss::MetricType metric_type);
int Init(const std::string &model_parameters, int indexing_size) override;
RetrievalParameters *Parse(const std::string ¶meters) override;
int Indexing() override;
bool Add(int n, const uint8_t *vec);
int Update(const std::vector<int64_t> &ids,
const std::vector<const uint8_t *> &vecs);
// assign the vectors, then call search_preassign
int Search(RetrievalContext *retrieval_context, int n, const uint8_t *x,
int k, float *distances, idx_t *labels);
void search_preassigned(RetrievalContext *retrieval_context, int n,
const float *x, const float *applied_x, int k, const idx_t *keys,
const float *coarse_dis, float *distances,
idx_t *labels, int nprobe, bool store_pairs,
const faiss::IVFSearchParameters *params = nullptr);
void search_ivf_flat(RetrievalContext *retrieval_context, int n,
const float *x, int k, const idx_t *keys,
const float *coarse_dis, float *distances, idx_t *labels,
int nprobe, bool store_pairs,
const faiss::IVFSearchParameters *params = nullptr);
long GetTotalMemBytes() override {
if (!rt_invert_index_ptr_) {
return 0;
}
return rt_invert_index_ptr_->GetTotalMemBytes();
}
int Dump(const std::string &dir) override;
int Load(const std::string &index_dir) override;
virtual void copy_subset_to(faiss::IndexIVF &other, int subset_type, idx_t a1,
idx_t a2) const;
int Delete(const std::vector<int64_t> &ids);
int indexed_vec_count_;
realtime::RTInvertIndex *rt_invert_index_ptr_;
bool compaction_;
size_t compact_bucket_no_;
uint64_t compacted_num_;
uint64_t updated_num_;
int d_;
DistanceComputeType metric_type_;
faiss::VectorTransform *opq_;
// 0 is FlatL2, 1 is HNSWFlat
int quantizer_type_;
#ifdef PERFORMANCE_TESTING
std::atomic<uint64_t> search_count_;
int add_count_;
#endif
IVFPQModelParams *model_param_;
};
template <faiss::MetricType METRIC_TYPE, class C, int precompute_mode>
struct GammaIVFPQScanner : IVFPQScannerT<idx_t, METRIC_TYPE>,
GammaInvertedListScanner {
const GammaIVFPQIndex &gamma_ivfpq_;
bool store_pairs_;
GammaIVFPQScanner(const GammaIVFPQIndex &gamma_ivfpq, bool store_pairs)
: IVFPQScannerT<idx_t, METRIC_TYPE>(gamma_ivfpq, nullptr),
gamma_ivfpq_(gamma_ivfpq) {
store_pairs_ = store_pairs;
}
template <class SearchResultType>
void scan_list_with_table(size_t ncode, const uint8_t *codes,
SearchResultType &res) const {
size_t j = 0;
for (; j < ncode; j++) {
if (res.ids[j] & realtime::kDelIdxMask) {
codes += this->pq.M;
continue;
}
if (!retrieval_context_->IsValid(res.ids[j] &
realtime::kRecoverIdxMask)) {
codes += this->pq.M;
continue;
}
float dis = this->dis0;
const float *tab = this->sim_table;
for (size_t m = 0; m < this->pq.M; m++) {
dis += tab[*codes++];
tab += this->pq.ksub;
}
res.add(j, dis);
}
assert(j == ncode);
}
inline void set_query(const float *query) override {
this->init_query(query);
}
inline void set_list(idx_t list_no, float coarse_dis) override {
this->init_list(list_no, coarse_dis, precompute_mode);
}
inline float distance_to_code(const uint8_t *code) const override {
assert(precompute_mode == 2);
float dis = this->dis0;
const float *tab = this->sim_table;
for (size_t m = 0; m < this->pq.M; m++) {
dis += tab[*code++];
tab += this->pq.ksub;
}
return dis;
}
inline size_t scan_codes(size_t ncode, const uint8_t *codes, const idx_t *ids,
float *heap_sim, idx_t *heap_ids,
size_t k) const override {
KnnSearchResults<C> res = {/* key */ this->key,
/* ids */ this->store_pairs_ ? nullptr : ids,
/* k */ k,
/* heap_sim */ heap_sim,
/* heap_ids */ heap_ids,
/* nup */ 0};
if (this->polysemous_ht > 0) {
assert(precompute_mode == 2);
this->scan_list_polysemous(ncode, codes, res);
} else if (precompute_mode == 2) {
this->scan_list_with_table(ncode, codes, res);
} else if (precompute_mode == 1) {
this->scan_list_with_pointer(ncode, codes, res);
} else if (precompute_mode == 0) {
this->scan_on_the_fly_dist(ncode, codes, res);
} else {
FAISS_THROW_MSG("bad precomp mode");
}
return 0;
}
inline size_t scan_codes_pointer(size_t ncode, const uint8_t **codes,
const idx_t *ids, float *heap_sim,
idx_t *heap_ids, size_t k) {
KnnSearchResults<C> res = {/* key */ this->key,
/* ids */ this->store_pairs_ ? nullptr : ids,
/* k */ k,
/* heap_sim */ heap_sim,
/* heap_ids */ heap_ids,
/* nup */ 0};
if (precompute_mode == 2) {
this->scan_list_with_table(ncode, codes, res);
} else {
FAISS_THROW_MSG("bad precomp mode");
}
return 0;
}
};
} // namespace tig_gamma
#endif
|
openmp_wrapper.h | /*!
* Copyright (c) 2017 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_OPENMP_WRAPPER_H_
#define LIGHTGBM_OPENMP_WRAPPER_H_
#ifdef _OPENMP
#include <omp.h>
#include <LightGBM/utils/log.h>
#include <exception>
#include <memory>
#include <mutex>
#include <stdexcept>
#include <vector>
inline int OMP_NUM_THREADS() {
int ret = 1;
#pragma omp parallel
#pragma omp master
{ ret = omp_get_num_threads(); }
return ret;
}
class ThreadExceptionHelper {
public:
ThreadExceptionHelper() {
ex_ptr_ = nullptr;
}
~ThreadExceptionHelper() {
ReThrow();
}
void ReThrow() {
if (ex_ptr_ != nullptr) {
std::rethrow_exception(ex_ptr_);
}
}
void CaptureException() {
// only catch first exception.
if (ex_ptr_ != nullptr) { return; }
std::unique_lock<std::mutex> guard(lock_);
if (ex_ptr_ != nullptr) { return; }
ex_ptr_ = std::current_exception();
}
private:
std::exception_ptr ex_ptr_;
std::mutex lock_;
};
#define OMP_INIT_EX() ThreadExceptionHelper omp_except_helper
#define OMP_LOOP_EX_BEGIN() try {
#define OMP_LOOP_EX_END() \
} \
catch (std::exception & ex) { \
Log::Warning(ex.what()); \
omp_except_helper.CaptureException(); \
} \
catch (...) { \
omp_except_helper.CaptureException(); \
}
#define OMP_THROW_EX() omp_except_helper.ReThrow()
#else
#ifdef _MSC_VER
#pragma warning(disable : 4068) // disable unknown pragma warning
#endif
#ifdef __cplusplus
extern "C" {
#endif
/** Fall here if no OPENMP support, so just
simulate a single thread running.
All #pragma omp should be ignored by the compiler **/
inline void omp_set_num_threads(int) {}
inline int omp_get_num_threads() {return 1;}
inline int omp_get_thread_num() {return 0;}
inline int OMP_NUM_THREADS() { return 1; }
#ifdef __cplusplus
}; // extern "C"
#endif
#define OMP_INIT_EX()
#define OMP_LOOP_EX_BEGIN()
#define OMP_LOOP_EX_END()
#define OMP_THROW_EX()
#endif
#endif /* LIGHTGBM_OPENMP_WRAPPER_H_ */
|
GB_binop__bshift_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bshift_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__bshift_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__bshift_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__bshift_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bshift_int16)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bshift_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__bshift_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bshift_int16)
// C=scalar+B GB (_bind1st__bshift_int16)
// C=scalar+B' GB (_bind1st_tran__bshift_int16)
// C=A+scalar GB (_bind2nd__bshift_int16)
// C=A'+scalar GB (_bind2nd_tran__bshift_int16)
// C type: int16_t
// A type: int16_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = GB_bitshift_int16 (aij, bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
0
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_bitshift_int16 (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSHIFT || GxB_NO_INT16 || GxB_NO_BSHIFT_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bshift_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bshift_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bshift_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bshift_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bshift_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bshift_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bshift_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bshift_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bshift_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_bitshift_int16 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bshift_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_bitshift_int16 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_bitshift_int16 (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__bshift_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_bitshift_int16 (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__bshift_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
yolov2.h | #ifndef YOLOV3
#define YOLOV3
#include <stdio.h>
#include <stdlib.h>
//#include <iostream>
#include <math.h>
#include <fcntl.h>
#include <string.h>
#include <time.h>
#include "xconv_hw.h"
//#include "hw_drivers.h"
#define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb_image_write.h"
#define FLT_MAX 3.402823466e+38F /* max value */
double what_time_is_it_now()
{
struct timeval time;
if (gettimeofday(&time,NULL)){
return 0;
}
return (double)time.tv_sec + (double)time.tv_usec * .000001;
}
//#include "yolo_hls.h"
typedef enum{
LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, LEAKY, ELU, LOGGY, STAIR, HARDTAN, LHTAN
} ACTIVATION;
typedef enum {
CONVOLUTIONAL,
DECONVOLUTIONAL,
CONNECTED,
MAXPOOL,
SOFTMAX,
DETECTION,
DROPOUT,
CROP,
ROUTE,
COST,
NORMALIZATION,
AVGPOOL,
LOCAL,
SHORTCUT,
ACTIVE,
RNN,
GRU,
LSTM,
CRNN,
BATCHNORM,
NETWORK,
XNOR,
REGION,
YOLO,
REORG,
UPSAMPLE,
LOGXENT,
L2NORM,
BLANK
} LAYER_TYPE;
struct network;
typedef struct network network;
struct layer;
typedef struct layer layer;
struct layer{
LAYER_TYPE type;
ACTIVATION activation;
void (*forward) (struct layer, struct network);
int batch_normalize;
int shortcut;
int batch;
int forced;
int flipped;
int inputs;
int outputs;
int nweights;
int nbiases;
int extra;
int truths;
int h,w,c;
int out_h, out_w, out_c;
int n;
int max_boxes;
int groups;
int size;
int side;
int stride;
int reverse;
int flatten;
int spatial;
int pad;
int sqrt;
int flip;
int index;
int binary;
int xnor;
int steps;
int hidden;
int truth;
float smooth;
float dot;
float angle;
float jitter;
float saturation;
float exposure;
float shift;
float ratio;
float learning_rate_scale;
float clip;
int softmax;
int classes;
int coords;
int background;
int rescore;
int objectness;
int joint;
int noadjust;
int reorg;
int log;
int tanh;
int *mask;
int total;
float alpha;
float beta;
float kappa;
float coord_scale;
float object_scale;
float noobject_scale;
float mask_scale;
float class_scale;
int bias_match;
int random;
float ignore_thresh;
float truth_thresh;
float thresh;
float focus;
int classfix;
int absolute;
int onlyforward;
int stopbackward;
// int dontload;
int dontsave;
// int dontloadscales;
float temperature;
float probability;
float scale;
char * cweights;
int * indexes;
int * input_layers;
int * input_sizes;
int * map;
float * rand;
float * cost;
float * state;
float * prev_state;
float * forgot_state;
float * forgot_delta;
float * state_delta;
float * combine_cpu;
float * combine_delta_cpu;
float * concat;
float * concat_delta;
float * binary_weights;
float * biases;
float * bias_updates;
float * scales;
float * scale_updates;
float * weights;
float * weight_updates;
float * delta;
float * output;
float * loss;
float * squared;
float * norms;
float * spatial_mean;
float * mean;
float * variance;
float * mean_delta;
float * variance_delta;
float * rolling_mean;
float * rolling_variance;
float * x;
float * x_norm;
float * m;
float * v;
float * bias_m;
float * bias_v;
float * scale_m;
float * scale_v;
float *z_cpu;
float *r_cpu;
float *h_cpu;
float * prev_state_cpu;
float *temp_cpu;
float *temp2_cpu;
float *temp3_cpu;
float *dh_cpu;
float *hh_cpu;
float *prev_cell_cpu;
float *cell_cpu;
float *f_cpu;
float *i_cpu;
float *g_cpu;
float *o_cpu;
float *c_cpu;
float *dc_cpu;
float * binary_input;
struct layer *input_layer;
struct layer *self_layer;
struct layer *output_layer;
struct layer *reset_layer;
struct layer *update_layer;
struct layer *state_layer;
struct layer *input_gate_layer;
struct layer *state_gate_layer;
struct layer *input_save_layer;
struct layer *state_save_layer;
struct layer *input_state_layer;
struct layer *state_state_layer;
struct layer *input_z_layer;
struct layer *state_z_layer;
struct layer *input_r_layer;
struct layer *state_r_layer;
struct layer *input_h_layer;
struct layer *state_h_layer;
struct layer *wz;
struct layer *uz;
struct layer *wr;
struct layer *ur;
struct layer *wh;
struct layer *uh;
struct layer *uo;
struct layer *wo;
struct layer *uf;
struct layer *wf;
struct layer *ui;
struct layer *wi;
struct layer *ug;
struct layer *wg;
//tree *softmax_tree;
size_t workspace_size;
};
void free_layer(layer l)
{
if(l.cweights) free(l.cweights);
if(l.indexes) free(l.indexes);
if(l.input_layers) free(l.input_layers);
if(l.input_sizes) free(l.input_sizes);
if(l.map) free(l.map);
if(l.rand) free(l.rand);
if(l.cost) free(l.cost);
if(l.state) free(l.state);
if(l.prev_state) free(l.prev_state);
if(l.forgot_state) free(l.forgot_state);
if(l.forgot_delta) free(l.forgot_delta);
if(l.state_delta) free(l.state_delta);
if(l.concat) free(l.concat);
if(l.concat_delta) free(l.concat_delta);
if(l.binary_weights) free(l.binary_weights);
if(l.biases) free(l.biases);
if(l.bias_updates) free(l.bias_updates);
if(l.scales) free(l.scales);
if(l.scale_updates) free(l.scale_updates);
if(l.weights) free(l.weights);
if(l.weight_updates) free(l.weight_updates);
if(l.delta) free(l.delta);
if(l.output) free(l.output);
if(l.squared) free(l.squared);
if(l.norms) free(l.norms);
if(l.spatial_mean) free(l.spatial_mean);
if(l.mean) free(l.mean);
if(l.variance) free(l.variance);
if(l.mean_delta) free(l.mean_delta);
if(l.variance_delta) free(l.variance_delta);
if(l.rolling_mean) free(l.rolling_mean);
if(l.rolling_variance) free(l.rolling_variance);
if(l.x) free(l.x);
if(l.x_norm) free(l.x_norm);
if(l.m) free(l.m);
if(l.v) free(l.v);
if(l.z_cpu) free(l.z_cpu);
if(l.r_cpu) free(l.r_cpu);
if(l.h_cpu) free(l.h_cpu);
if(l.binary_input) free(l.binary_input);
}
//void free_layer(layer);
typedef enum {
CONSTANT, STEP, EXP, POLY, STEPS, SIG, RANDOM
} learning_rate_policy;
typedef struct network{
int n;
int batch;
size_t *seen;
int *t;
float epoch;
int subdivisions;
layer *layers;
float *output;
learning_rate_policy policy;
float learning_rate;
float momentum;
float decay;
float gamma;
float scale;
float power;
int time_steps;
int step;
int max_batches;
float *scales;
int *steps;
int num_steps;
int burn_in;
int adam;
float B1;
float B2;
float eps;
int inputs;
int outputs;
int truths;
int notruth;
int h, w, c;
int max_crop;
int min_crop;
float max_ratio;
float min_ratio;
int center;
float angle;
float aspect;
float exposure;
float saturation;
float hue;
int random;
int gpu_index;
// tree *hierarchy;
float *input;
float *truth;
float *delta;
float *workspace;
int train;
int index;
float *cost;
float clip;
} network;
network *make_network(int n);
layer get_network_output_layer(network *net);
typedef struct {
int w;
int h;
float scale;
float rad;
float dx;
float dy;
float aspect;
} augment_args;
typedef struct {
int w;
int h;
int c;
float *data;
} image;
typedef struct{
float x, y, w, h;
} box;
typedef struct detection{
box bbox;
int classes;
float *prob;
float *mask;
float objectness;
int sort_class;
} detection;
typedef struct matrix{
int rows, cols;
float **vals;
} matrix;
typedef struct{
int w, h;
matrix X;
matrix y;
int shallow;
int *num_boxes;
box **boxes;
} data;
typedef enum {
CLASSIFICATION_DATA, DETECTION_DATA, CAPTCHA_DATA, REGION_DATA, IMAGE_DATA, COMPARE_DATA, WRITING_DATA, SWAG_DATA, TAG_DATA, OLD_CLASSIFICATION_DATA, STUDY_DATA, DET_DATA, SUPER_DATA, LETTERBOX_DATA, REGRESSION_DATA, SEGMENTATION_DATA, INSTANCE_DATA
} data_type;
typedef struct load_args{
int threads;
char **paths;
char *path;
int n;
int m;
char **labels;
int h;
int w;
int out_w;
int out_h;
int nh;
int nw;
int num_boxes;
int min, max, size;
int classes;
int background;
int scale;
int center;
int coords;
float jitter;
float angle;
float aspect;
float saturation;
float exposure;
float hue;
data *d;
image *im;
image *resized;
data_type type;
// tree *hierarchy;
} load_args;
typedef struct{
int id;
float x,y,w,h;
float left, right, top, bottom;
} box_label;
//network *load_network(char *cfg, char *weights, int clear);
//load_args get_base_args(network *net);
//void free_data(data d);
typedef struct{
char *key;
char *val;
int used;
} kvp;
typedef struct node{
void *val;
struct node *next;
struct node *prev;
} node;
typedef struct list{
int size;
node *front;
node *back;
} list;
void error(const char *s)
{
perror(s);
assert(0);
exit(-1);
}
void malloc_error()
{
fprintf(stderr, "Malloc error\n");
exit(-1);
}
void file_error(char *s)
{
fprintf(stderr, "Couldn't open file: %s\n", s);
exit(0);
}
/////////////////list begin
list *make_list()
{
list *l = (list *)malloc(sizeof(list));
l->size = 0;
l->front = 0;
l->back = 0;
return l;
}
void *list_pop(list *l){
if(!l->back) return 0;
node *b = l->back;
void *val = b->val;
l->back = b->prev;
if(l->back) l->back->next = 0;
free(b);
--l->size;
return val;
}
void list_insert(list *l, void *val)
{
node *new_node = (node *)malloc(sizeof(node));
new_node->val = val;
new_node->next = 0;
if(!l->back){
l->front = new_node;
new_node->prev = 0;
}else{
l->back->next = new_node;
new_node->prev = l->back;
}
l->back = new_node;
++l->size;
}
void free_node(node *n)
{
node *next;
while(n) {
next = n->next;
free(n);
n = next;
}
}
void free_list(list *l)
{
free_node(l->front);
free(l);
}
void free_list_contents(list *l)
{
node *n = l->front;
while(n){
free(n->val);
n = n->next;
}
}
void **list_to_array(list *l)
{
void **a = (void **)calloc(l->size, sizeof(void*));
int count = 0;
node *n = l->front;
while(n){
a[count++] = n->val;
n = n->next;
}
return a;
}
/////////////////list end
/////////////////////utils begin
void del_arg(int argc, char **argv, int index)
{
int i;
for(i = index; i < argc-1; ++i) argv[i] = argv[i+1];
argv[i] = 0;
}
int find_arg(int argc, char* argv[], char *arg)
{
int i;
for(i = 0; i < argc; ++i) {
if(!argv[i]) continue;
if(0==strcmp(argv[i], arg)) {
del_arg(argc, argv, i);
return 1;
}
}
return 0;
}
int find_int_arg(int argc, char **argv, char *arg, int def)
{
int i;
for(i = 0; i < argc-1; ++i){
if(!argv[i]) continue;
if(0==strcmp(argv[i], arg)){
def = atoi(argv[i+1]);
del_arg(argc, argv, i);
del_arg(argc, argv, i);
break;
}
}
return def;
}
float find_float_arg(int argc, char **argv, char *arg, float def)
{
int i;
for(i = 0; i < argc-1; ++i){
if(!argv[i]) continue;
if(0==strcmp(argv[i], arg)){
def = atof(argv[i+1]);
del_arg(argc, argv, i);
del_arg(argc, argv, i);
break;
}
}
return def;
}
char *find_char_arg(int argc, char **argv, char *arg, char *def)
{
int i;
for(i = 0; i < argc-1; ++i){
if(!argv[i]) continue;
if(0==strcmp(argv[i], arg)){
def = argv[i+1];
del_arg(argc, argv, i);
del_arg(argc, argv, i);
break;
}
}
return def;
}
unsigned char *read_file(char *filename)
{
FILE *fp = fopen(filename, "rb");
size_t size;
fseek(fp, 0, SEEK_END);
size = ftell(fp);
fseek(fp, 0, SEEK_SET);
unsigned char *text = (unsigned char *)calloc(size+1, sizeof(unsigned char));
fread(text, 1, size, fp);
fclose(fp);
return text;
}
list *split_str(char *s, char delim)
{
size_t i;
size_t len = strlen(s);
list *l = make_list();
list_insert(l, s);
for(i = 0; i < len; ++i){
if(s[i] == delim){
s[i] = '\0';
list_insert(l, &(s[i+1]));
}
}
return l;
}
void strip(char *s)
{
size_t i;
size_t len = strlen(s);
size_t offset = 0;
for(i = 0; i < len; ++i){
char c = s[i];
if(c==' '||c=='\t'||c=='\n') ++offset;
else s[i-offset] = c;
}
s[len-offset] = '\0';
}
void strip_char(char *s, char bad)
{
size_t i;
size_t len = strlen(s);
size_t offset = 0;
for(i = 0; i < len; ++i){
char c = s[i];
if(c==bad) ++offset;
else s[i-offset] = c;
}
s[len-offset] = '\0';
}
void free_ptrs(void **ptrs, int n)
{
int i;
for(i = 0; i < n; ++i) free(ptrs[i]);
free(ptrs);
}
char *fgetl(FILE *fp)
{
if(feof(fp)) return 0;
size_t size = 512;
char *line = (char *)malloc(size*sizeof(char));
if(!fgets(line, size, fp)){
free(line);
return 0;
}
size_t curr = strlen(line);
while((line[curr-1] != '\n') && !feof(fp)){
if(curr == size-1){
size *= 2;
line = (char *)realloc(line, size*sizeof(char));
if(!line) {
printf("%ld\n", size);
malloc_error();
}
}
size_t readsize = size-curr;
if(readsize > INT_MAX) readsize = INT_MAX-1;
fgets(&line[curr], readsize, fp);
curr = strlen(line);
}
if(line[curr-1] == '\n') line[curr-1] = '\0';
return line;
}
/////////////////////utils end
////////////////////option_list begin
void option_insert(list *l, char *key, char *val)
{
kvp *p = (kvp *)malloc(sizeof(kvp));
p->key = key;
p->val = val;
p->used = 0;
list_insert(l, p);
}
int read_option(char *s, list *options)
{
size_t i;
size_t len = strlen(s);
char *val = 0;
for(i = 0; i < len; ++i){
if(s[i] == '='){
s[i] = '\0';
val = s+i+1;
break;
}
}
if(i == len-1) return 0;
char *key = s;
option_insert(options, key, val);
return 1;
}
void option_unused(list *l)
{
node *n = l->front;
while(n){
kvp *p = (kvp *)n->val;
if(!p->used){
fprintf(stderr, "Unused field: '%s = %s'\n", p->key, p->val);
}
n = n->next;
}
}
char *option_find(list *l, char *key)
{
node *n = l->front;
while(n){
kvp *p = (kvp *)n->val;
if(strcmp(p->key, key) == 0){
p->used = 1;
return p->val;
}
n = n->next;
}
return 0;
}
char *option_find_str(list *l, char *key, char *def)
{
char *v = option_find(l, key);
if(v) return v;
if(def) fprintf(stderr, "%s: Using default '%s'\n", key, def);
return def;
}
int option_find_int(list *l, char *key, int def)
{
char *v = option_find(l, key);
if(v) return atoi(v);
fprintf(stderr, "%s: Using default '%d'\n", key, def);
return def;
}
int option_find_int_quiet(list *l, char *key, int def)
{
char *v = option_find(l, key);
if(v) return atoi(v);
return def;
}
float option_find_float_quiet(list *l, char *key, float def)
{
char *v = option_find(l, key);
if(v) return atof(v);
return def;
}
float option_find_float(list *l, char *key, float def)
{
char *v = option_find(l, key);
if(v) return atof(v);
fprintf(stderr, "%s: Using default '%lf'\n", key, def);
return def;
}
list *read_data_cfg(char *filename)
{
FILE *file = fopen(filename, "r");
if(file == 0) file_error(filename);
char *line;
int nu = 0;
list *options = make_list();
while((line=fgetl(file)) != 0){
++ nu;
strip(line);
switch(line[0]){
case '\0':
case '#':
case ';':
free(line);
break;
default:
if(!read_option(line, options)){
fprintf(stderr, "Config file error line %d, could parse: %s\n", nu, line);
free(line);
}
break;
}
}
fclose(file);
return options;
}
///////////////////option_list end
image make_empty_image(int w, int h, int c)
{
image out;
out.data = 0;
out.h = h;
out.w = w;
out.c = c;
return out;
}
list *get_paths(char *filename)
{
char *path;
FILE *file = fopen(filename, "r");
if(!file) file_error(filename);
list *lines = make_list();
while((path=fgetl(file))){
list_insert(lines, path);
}
fclose(file);
return lines;
}
char **get_labels(char *filename)
{
list *plist = get_paths(filename);
char **labels = (char **)list_to_array(plist);
free_list(plist);
return labels;
}
image make_image(int w, int h, int c)
{
image out = make_empty_image(w,h,c);
out.data = (float *)calloc(h*w*c, sizeof(float));
return out;
}
static float get_pixel(image m, int x, int y, int c)
{
assert(x < m.w && y < m.h && c < m.c);
return m.data[c*m.h*m.w + y*m.w + x];
}
static void set_pixel(image m, int x, int y, int c, float val)
{
if (x < 0 || y < 0 || c < 0 || x >= m.w || y >= m.h || c >= m.c) return;
assert(x < m.w && y < m.h && c < m.c);
m.data[c*m.h*m.w + y*m.w + x] = val;
}
static void add_pixel(image m, int x, int y, int c, float val)
{
assert(x < m.w && y < m.h && c < m.c);
m.data[c*m.h*m.w + y*m.w + x] += val;
}
void free_image(image m)
{
if(m.data){
free(m.data);
}
}
image resize_image(image im, int w, int h)
{
image resized = make_image(w, h, im.c);
image part = make_image(w, im.h, im.c);
int r, c, k;
float w_scale = (float)(im.w - 1) / (w - 1);
float h_scale = (float)(im.h - 1) / (h - 1);
for(k = 0; k < im.c; ++k){
for(r = 0; r < im.h; ++r){
for(c = 0; c < w; ++c){
float val = 0;
if(c == w-1 || im.w == 1){
val = get_pixel(im, im.w-1, r, k);
} else {
float sx = c*w_scale;
int ix = (int) sx;
float dx = sx - ix;
val = (1 - dx) * get_pixel(im, ix, r, k) + dx * get_pixel(im, ix+1, r, k);
}
set_pixel(part, c, r, k, val);
}
}
}
for(k = 0; k < im.c; ++k){
for(r = 0; r < h; ++r){
float sy = r*h_scale;
int iy = (int) sy;
float dy = sy - iy;
for(c = 0; c < w; ++c){
float val = (1-dy) * get_pixel(part, c, iy, k);
set_pixel(resized, c, r, k, val);
}
if(r == h-1 || im.h == 1) continue;
for(c = 0; c < w; ++c){
float val = dy * get_pixel(part, c, iy+1, k);
add_pixel(resized, c, r, k, val);
}
}
}
free_image(part);
return resized;
}
void fill_image(image m, float s)
{
int i;
for(i = 0; i < m.h*m.w*m.c; ++i) m.data[i] = s;
}
void embed_image(image source, image dest, int dx, int dy)
{
int x,y,k;
for(k = 0; k < source.c; ++k){
for(y = 0; y < source.h; ++y){
for(x = 0; x < source.w; ++x){
float val = get_pixel(source, x,y,k);
set_pixel(dest, dx+x, dy+y, k, val);
}
}
}
}
image letterbox_image(image im, int w, int h)
{
int new_w = im.w;
int new_h = im.h;
if (((float)w/im.w) < ((float)h/im.h)) {
new_w = w;
new_h = (im.h * w)/im.w;
} else {
new_h = h;
new_w = (im.w * h)/im.h;
}
image resized = resize_image(im, new_w, new_h);
image boxed = make_image(w, h, im.c);
fill_image(boxed, .5);
//int i;
//for(i = 0; i < boxed.w*boxed.h*boxed.c; ++i) boxed.data[i] = 0;
embed_image(resized, boxed, (w-new_w)/2, (h-new_h)/2);
free_image(resized);
return boxed;
}
image load_image_stb(char *filename, int channels)
{
int w, h, c;
unsigned char *data = stbi_load(filename, &w, &h, &c, channels);
if (!data) {
fprintf(stderr, "Cannot load image \"%s\"\nSTB Reason: %s\n", filename, stbi_failure_reason());
exit(0);
}
if(channels) c = channels;
int i,j,k;
image im = make_image(w, h, c);
for(k = 0; k < c; ++k){
for(j = 0; j < h; ++j){
for(i = 0; i < w; ++i){
int dst_index = i + w*j + w*h*k;
int src_index = k + c*i + c*w*j;
im.data[dst_index] = (float)data[src_index]/255.;
}
}
}
free(data);
return im;
}
void save_image_png(image im, const char *name)
{
char buff[256];
//sprintf(buff, "%s (%d)", name, windows);
sprintf(buff, "%s.png", name);
unsigned char *data = (unsigned char *)calloc(im.w*im.h*im.c, sizeof(char));
int i,k;
for(k = 0; k < im.c; ++k){
for(i = 0; i < im.w*im.h; ++i){
data[i*im.c+k] = (unsigned char) (255*im.data[i + k*im.w*im.h]);
}
}
int success = stbi_write_png(buff, im.w, im.h, im.c, data, im.w*im.c);
free(data);
if(!success) fprintf(stderr, "Failed to write image %s\n", buff);
}
image **load_alphabet()
{
int i, j;
const int nsize = 8;
image **alphabets = (image **)calloc(nsize, sizeof(image));
for(j = 0; j < nsize; ++j){
alphabets[j] = (image *)calloc(128, sizeof(image));
for(i = 32; i < 127; ++i){
char buff[256];
sprintf(buff, "labels/%d_%d.png", i, j);
//alphabets[j][i] = load_image_color(buff, 0, 0);
alphabets[j][i] = load_image_stb(buff, 3);
}
}
return alphabets;
}
///////////////////activation begin
static inline float stair_activate(float x)
{
int n = floor(x);
if (n%2 == 0) return floor(x/2.);
else return (x - n) + floor(x/2.);
}
static inline float hardtan_activate(float x)
{
if (x < -1) return -1;
if (x > 1) return 1;
return x;
}
static inline float linear_activate(float x){return x;}
static inline float logistic_activate(float x){return 1./(1. + exp(-x));}
static inline float loggy_activate(float x){return 2./(1. + exp(-x)) - 1;}
static inline float relu_activate(float x){return x*(x>0);}
static inline float elu_activate(float x){return (x >= 0)*x + (x < 0)*(exp(x)-1);}
static inline float relie_activate(float x){return (x>0) ? x : .01*x;}
static inline float ramp_activate(float x){return x*(x>0)+.1*x;}
static inline float leaky_activate(float x){return (x>0) ? x : .1*x;}
static inline float tanh_activate(float x){return (exp(2*x)-1)/(exp(2*x)+1);}
static inline float plse_activate(float x)
{
if(x < -4) return .01 * (x + 4);
if(x > 4) return .01 * (x - 4) + 1;
return .125*x + .5;
}
static inline float lhtan_activate(float x)
{
if(x < 0) return .001*x;
if(x > 1) return .001*(x-1) + 1;
return x;
}
static inline float lhtan_gradient(float x)
{
if(x > 0 && x < 1) return 1;
return .001;
}
static inline float hardtan_gradient(float x)
{
if (x > -1 && x < 1) return 1;
return 0;
}
static inline float linear_gradient(float x){return 1;}
static inline float logistic_gradient(float x){return (1-x)*x;}
static inline float loggy_gradient(float x)
{
float y = (x+1.)/2.;
return 2*(1-y)*y;
}
static inline float stair_gradient(float x)
{
if (floor(x) == x) return 0;
return 1;
}
static inline float relu_gradient(float x){return (x>0);}
static inline float elu_gradient(float x){return (x >= 0) + (x < 0)*(x + 1);}
static inline float relie_gradient(float x){return (x>0) ? 1 : .01;}
static inline float ramp_gradient(float x){return (x>0)+.1;}
static inline float leaky_gradient(float x){return (x>0) ? 1 : .1;}
static inline float tanh_gradient(float x){return 1-x*x;}
static inline float plse_gradient(float x){return (x < 0 || x > 1) ? .01 : .125;}
char *get_activation_string(ACTIVATION a)
{
switch(a){
case LOGISTIC:
return "logistic";
case LOGGY:
return "loggy";
case RELU:
return "relu";
case ELU:
return "elu";
case RELIE:
return "relie";
case RAMP:
return "ramp";
case LINEAR:
return "linear";
case TANH:
return "tanh";
case PLSE:
return "plse";
case LEAKY:
return "leaky";
case STAIR:
return "stair";
case HARDTAN:
return "hardtan";
case LHTAN:
return "lhtan";
default:
break;
}
return "relu";
}
ACTIVATION get_activation(char *s)
{
if (strcmp(s, "logistic")==0) return LOGISTIC;
if (strcmp(s, "loggy")==0) return LOGGY;
if (strcmp(s, "relu")==0) return RELU;
if (strcmp(s, "elu")==0) return ELU;
if (strcmp(s, "relie")==0) return RELIE;
if (strcmp(s, "plse")==0) return PLSE;
if (strcmp(s, "hardtan")==0) return HARDTAN;
if (strcmp(s, "lhtan")==0) return LHTAN;
if (strcmp(s, "linear")==0) return LINEAR;
if (strcmp(s, "ramp")==0) return RAMP;
if (strcmp(s, "leaky")==0) return LEAKY;
if (strcmp(s, "tanh")==0) return TANH;
if (strcmp(s, "stair")==0) return STAIR;
fprintf(stderr, "Couldn't find activation function %s, going with ReLU\n", s);
return RELU;
}
float activate(float x, ACTIVATION a)
{
switch(a){
case LINEAR:
return linear_activate(x);
case LOGISTIC:
return logistic_activate(x);
case LOGGY:
return loggy_activate(x);
case RELU:
return relu_activate(x);
case ELU:
return elu_activate(x);
case RELIE:
return relie_activate(x);
case RAMP:
return ramp_activate(x);
case LEAKY:
return leaky_activate(x);
case TANH:
return tanh_activate(x);
case PLSE:
return plse_activate(x);
case STAIR:
return stair_activate(x);
case HARDTAN:
return hardtan_activate(x);
case LHTAN:
return lhtan_activate(x);
}
return 0;
}
void activate_array(float *x, const int n, const ACTIVATION a)
{
int i;
for(i = 0; i < n; ++i){
x[i] = activate(x[i], a);
}
}
float gradient(float x, ACTIVATION a)
{
switch(a){
case LINEAR:
return linear_gradient(x);
case LOGISTIC:
return logistic_gradient(x);
case LOGGY:
return loggy_gradient(x);
case RELU:
return relu_gradient(x);
case ELU:
return elu_gradient(x);
case RELIE:
return relie_gradient(x);
case RAMP:
return ramp_gradient(x);
case LEAKY:
return leaky_gradient(x);
case TANH:
return tanh_gradient(x);
case PLSE:
return plse_gradient(x);
case STAIR:
return stair_gradient(x);
case HARDTAN:
return hardtan_gradient(x);
case LHTAN:
return lhtan_gradient(x);
}
return 0;
}
///////////////////activation end
void copy_cpu(int N, float *X, int INCX, float *Y, int INCY)
{
int i;
for(i = 0; i < N; ++i) Y[i*INCY] = X[i*INCX];
}
void fill_cpu(int N, float ALPHA, float *X, int INCX)
{
int i;
for(i = 0; i < N; ++i) X[i*INCX] = ALPHA;
}
void shortcut_cpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out)
{
int stride = w1/w2;
int sample = w2/w1;
assert(stride == h1/h2);
assert(sample == h2/h1);
//printf("shorcut_layer batch=%d,stride=%d,sample=%d\n",batch,stride,sample);
if(stride < 1) stride = 1;
if(sample < 1) sample = 1;
int minw = (w1 < w2) ? w1 : w2;
int minh = (h1 < h2) ? h1 : h2;
int minc = (c1 < c2) ? c1 : c2;
int i,j,k,b;
for(b = 0; b < batch; ++b){
for(k = 0; k < minc; ++k){
for(j = 0; j < minh; ++j){
for(i = 0; i < minw; ++i){
int out_index = i*sample + w2*(j*sample + h2*(k + c2*b));
int add_index = i*stride + w1*(j*stride + h1*(k + c1*b));
out[out_index] = s1*out[out_index] + s2*add[add_index];
}
}
}
}
}
void forward_shortcut_layer(const layer l, network net)
{
//copy_cpu(l.outputs*l.batch, net.input, 1, l.output, 1);
//shortcut_cpu(l.batch, l.w, l.h, l.c, net.layers[l.index].output, l.out_w, l.out_h, l.out_c, l.alpha, l.beta, l.output);
//activate_array(l.output, l.outputs*l.batch, l.activation);
int w = l.w;
int h = l.h;
int c = l.c;
float *add = net.layers[l.index].output;
float *out = l.output;
float *in = net.input;
int i,j,k;
for(k = 0; k < c; ++k){
for(j = 0; j < h; ++j){
for(i = 0; i < w; ++i){
int index = i + w*(j + h*k );
out[index] = in[index] + add[index];
}
}
}
}
layer make_shortcut_layer(int batch, int index, int w, int h, int c, int w2, int h2, int c2)
{
fprintf(stderr, "res %3d %4d x%4d x%4d -> %4d x%4d x%4d\n",index, w2,h2,c2, w,h,c);
layer l;
memset(&l,0,sizeof(layer));
l.type = SHORTCUT;
l.batch = batch;
l.w = w2;
l.h = h2;
l.c = c2;
l.out_w = w;
l.out_h = h;
l.out_c = c;
l.outputs = w*h*c;
l.inputs = l.outputs;
l.index = index;
l.output = (float *)calloc(l.outputs*batch, sizeof(float));;
l.forward = forward_shortcut_layer;
return l;
}
int convolutional_out_height(layer l)
{
return (l.h + 2*l.pad - l.size) / l.stride + 1;
}
int convolutional_out_width(layer l)
{
return (l.w + 2*l.pad - l.size) / l.stride + 1;
}
static size_t get_workspace_size(layer l){
return (size_t)l.out_h*l.out_w*l.size*l.size*l.c/l.groups*sizeof(float);
}
void add_bias(float *output, float *biases, int batch, int n, int size)
{
int i,j,b;
for(b = 0; b < batch; ++b){
for(i = 0; i < n; ++i){
for(j = 0; j < size; ++j){
output[(b*n + i)*size + j] += biases[i];
}
}
}
}
void scale_bias(float *output, float *scales, int batch, int n, int size)
{
int i,j,b;
for(b = 0; b < batch; ++b){
for(i = 0; i < n; ++i){
for(j = 0; j < size; ++j){
output[(b*n + i)*size + j] *= scales[i];
}
}
}
}
float im2col_get_pixel(float *im, int height, int width, int channels,
int row, int col, int channel, int pad)
{
row -= pad;
col -= pad;
if (row < 0 || col < 0 ||
row >= height || col >= width) return 0;
return im[col + width*(row + height*channel)];
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col)
{
int c,h,w;
int height_col = (height + 2*pad - ksize) / stride + 1;
int width_col = (width + 2*pad - ksize) / stride + 1;
int channels_col = channels * ksize * ksize;
for (c = 0; c < channels_col; ++c) {
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = 0; h < height_col; ++h) {
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h * stride;
int im_col = w_offset + w * stride;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
}
}
void gemm_nn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
register float A_PART = ALPHA*A[i*lda+k];
for(j = 0; j < N; ++j){
C[i*ldc+j] += A_PART*B[k*ldb+j];
}
}
}
}
void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
//printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc);
int i, j;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
C[i*ldc + j] *= BETA;
}
}
if(!TA && !TB)
gemm_nn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
//else if(TA && !TB)
// gemm_tn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
//else if(!TA && TB)
// gemm_nt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
//else
// gemm_tt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
}
void gemm(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc);
}
void normalize_cpu(float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
int b, f, i;
for(b = 0; b < batch; ++b){
for(f = 0; f < filters; ++f){
for(i = 0; i < spatial; ++i){
int index = b*filters*spatial + f*spatial + i;
x[index] = (x[index] - mean[f])/(sqrt(variance[f]) + .000001f);
}
}
}
}
void forward_batchnorm_layer(layer l, network net)//for conv
{
normalize_cpu(l.output, l.rolling_mean, l.rolling_variance, l.batch, l.out_c, l.out_h*l.out_w);
scale_bias(l.output, l.scales, l.batch, l.out_c, l.out_h*l.out_w);
add_bias(l.output, l.biases, l.batch, l.out_c, l.out_h*l.out_w);
}
void CONV_Padding_Relu(float *Input,float *Output,float *Weight,const int InFM_num,const int OutFM_num,const int Kernel_size,const int Kernel_stride,const int Input_w,const int Input_h,const int Padding)
{
// (output_w - 1)*Kernel_stride + Kernel_size = Input_w
const int output_w = (Input_w - Kernel_size + 2*Padding)/Kernel_stride + 1 ;
const int output_h = (Input_h - Kernel_size + 2*Padding)/Kernel_stride + 1 ;
int x, y, of, inf;
int m,n;
for( of = 0; of < OutFM_num; of++){
for( y = 0; y < output_h; y++) {
for( x = 0; x < output_w; x++){
float tmp = 0.0;
for(inf = 0;inf < InFM_num; inf++)
{
int intput_offset = inf*Input_w*Input_h + (y*Kernel_stride - Padding)*Input_w + x*Kernel_stride - Padding;
for(m = 0;m < Kernel_size; m++)
{
for(n = 0;n < Kernel_size; n++)
{
int kernel_offset = of*InFM_num*Kernel_size*Kernel_size + inf*Kernel_size*Kernel_size;
bool inFM_width = ((x*Kernel_stride + n - Padding) >= 0)&&((x*Kernel_stride + n - Padding) < Input_w);
bool inFM_height = ((y*Kernel_stride + m - Padding) >= 0)&&((y*Kernel_stride + m - Padding) < Input_h);
if(inFM_width&&inFM_height)
tmp += Weight[kernel_offset + m*Kernel_size + n]*Input[intput_offset + m*Input_w + n];
}
}
}
Output[of*output_w*output_h + y*output_w + x] = tmp;
}
}
}
}
void forward_convolutional_layer(layer l, network net)
{
int i, j;
fill_cpu(l.outputs*l.batch, 0, l.output, 1);
//printf("c=%d,n=%d,size=%d,stride=%d,w=%d,h=%d,pad=%d\n",l.c,l.n,l.size,l.stride,l.w,l.h,l.pad);
//int m = l.n/l.groups;
//int k = l.size*l.size*l.c/l.groups;
//int n = l.out_w*l.out_h;
//for(i = 0; i < l.batch; ++i){
// for(j = 0; j < l.groups; ++j){
// float *a = l.weights + j*l.nweights/l.groups;
// float *b = net.workspace;
// float *c = l.output + (i*l.groups + j)*n*m;
// im2col_cpu(net.input + (i*l.groups + j)*l.c/l.groups*l.h*l.w,
// l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, b);
// gemm(0,0,m,n,k,1,a,k,b,n,1,c,n);
// }
//}
int m = l.n;
int k = l.size*l.size*l.c;
int n = l.out_w*l.out_h;
float *a = l.weights;
float *b = net.workspace;
float *c = l.output;
im2col_cpu(net.input,l.c, l.h, l.w, l.size, l.stride, l.pad, b);
gemm(0,0,m,n,k,1,a,k,b,n,1,c,n);
//CONV_Padding_Relu(net.input,l.output,l.weights,l.c,l.n,l.size,l.stride,l.w,l.h,l.pad);
if(l.batch_normalize){
forward_batchnorm_layer(l, net);
} else {
add_bias(l.output, l.biases, l.batch, l.n, l.out_h*l.out_w);
}
activate_array(l.output, l.outputs*l.batch, l.activation);
}
layer make_convolutional_layer(int batch, int h, int w, int c, int n, int groups, int size, int stride, int padding, ACTIVATION activation, int batch_normalize, int binary, int xnor, int adam)
{
int i;
layer l;
memset(&l,0,sizeof(layer));
l.type = CONVOLUTIONAL;
l.groups = groups;
l.h = h;
l.w = w;
l.c = c;
l.n = n;
l.binary = binary;
l.xnor = xnor;
l.batch = batch;
l.stride = stride;
l.size = size;
l.pad = padding;
l.batch_normalize = batch_normalize;
// l.weights = (float *)calloc(c/groups*n*size*size, sizeof(float));
// l.biases = (float *)calloc(n, sizeof(float));
l.nweights = c/groups*n*size*size;
l.nbiases = n;
int out_w = convolutional_out_width(l);
int out_h = convolutional_out_height(l);
l.out_h = out_h;
l.out_w = out_w;
l.out_c = n;
l.outputs = l.out_h * l.out_w * l.out_c;
l.inputs = l.w * l.h * l.c;
// l.output = (float *)calloc(l.batch*l.outputs, sizeof(float));
l.forward = forward_convolutional_layer;
if(batch_normalize){
// l.scales = (float *)calloc(n, sizeof(float));
// l.rolling_mean = (float *)calloc(n, sizeof(float));
//l.rolling_variance = (float *)calloc(n, sizeof(float));
}
l.workspace_size = get_workspace_size(l);
l.activation = activation;
fprintf(stderr, "conv %5d %2d x%2d /%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BFLOPs\n", n, size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c, (2.0 * l.n * l.size*l.size*l.c/l.groups * l.out_h*l.out_w)/1000000000.);
return l;
}
void upsample_cpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
{
int i, j, k, b;
for(b = 0; b < batch; ++b){
for(k = 0; k < c; ++k){
for(j = 0; j < h*stride; ++j){
for(i = 0; i < w*stride; ++i){
int in_index = b*w*h*c + k*w*h + (j/stride)*w + i/stride;
int out_index = b*w*h*c*stride*stride + k*w*h*stride*stride + j*w*stride + i;
if(forward) out[out_index] = scale*in[in_index];
else in[in_index] += scale*out[out_index];
}
}
}
}
}
void forward_upsample_layer(const layer l, network net)
{
//fill_cpu(l.outputs*l.batch, 0, l.output, 1);
//upsample_cpu(net.input, l.w, l.h, l.c, l.batch, l.stride, 1, l.scale, l.output);
int c = l.c;
int h = l.h;
int w = l.w;
int stride = l.stride;
float *in = net.input;
float *out = l.output;
int i, j, k;
for(k = 0; k < c; ++k){
for(j = 0; j < h*stride; ++j){
for(i = 0; i < w*stride; ++i){
int in_index = k*w*h + (j/stride)*w + i/stride;
int out_index = k*w*h*stride*stride + j*w*stride + i;
out[out_index] = in[in_index];
}
}
}
}
layer make_upsample_layer(int batch, int w, int h, int c, int stride)
{
layer l;
memset(&l,0,sizeof(layer));
l.type = UPSAMPLE;
l.batch = batch;
l.w = w;
l.h = h;
l.c = c;
l.out_w = w*stride;
l.out_h = h*stride;
l.out_c = c;
if(stride < 0){
stride = -stride;
l.reverse=1;
l.out_w = w/stride;
l.out_h = h/stride;
}
l.stride = stride;
l.outputs = l.out_w*l.out_h*l.out_c;
l.inputs = l.w*l.h*l.c;
l.output = (float *)calloc(l.outputs*batch, sizeof(float));;
l.forward = forward_upsample_layer;
if(l.reverse) fprintf(stderr, "downsample %2dx %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c);
else fprintf(stderr, "upsample %2dx %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c);
return l;
}
void forward_route_layer(const layer l, network net)
{
int i, j;
int offset = 0;
for(i = 0; i < l.n; ++i){
int index = l.input_layers[i];
float *input = net.layers[index].output;
int input_size = l.input_sizes[i];
copy_cpu(input_size, input, 1, l.output + offset, 1);
offset += input_size;
}
}
layer make_route_layer(int batch, int n, int *input_layers, int *input_sizes)
{
fprintf(stderr,"route ");
layer l;
memset(&l,0,sizeof(layer));
l.type = ROUTE;
l.batch = batch;
l.n = n;
l.input_layers = input_layers;
l.input_sizes = input_sizes;
int i;
int outputs = 0;
for(i = 0; i < n; ++i){
fprintf(stderr," %d", input_layers[i]);
outputs += input_sizes[i];
}
fprintf(stderr, "\n");
l.outputs = outputs;
l.inputs = outputs;
// l.output = (float *)calloc(outputs*batch, sizeof(float));;
l.forward = forward_route_layer;
return l;
}
static int entry_index(layer l, int batch, int location, int entry)
{
int n = location / (l.w*l.h);
int loc = location % (l.w*l.h);
return batch*l.outputs + n*l.w*l.h*(4+l.classes+1) + entry*l.w*l.h + loc;
}
void forward_yolo_layer(const layer l, network net)
{
int i,j,b,t,n;
//char line[256];
//FILE *fp3;
//char filename[256];
//sprintf(filename, "yolo_layer_%d.txt", l.outputs);
//printf("YOLO_layer:outputs=%d,%s\n",l.outputs,filename);
// if( (fp3 = fopen(filename, "w")) == NULL)fprintf(stderr,"CANNOT OPEN\n");
//int x;
// for( x = 0; x < l.outputs; x++)
//{
// sprintf(line, "%f\n", net.input[x]);
// if(fputs(line,fp3)<0)fprintf(stderr,"write FILE failed\n");
// }
// fclose(fp3);
memcpy(l.output, net.input, l.outputs*l.batch*sizeof(float));
for (b = 0; b < l.batch; ++b){
for(n = 0; n < l.n; ++n){
int index = entry_index(l, b, n*l.w*l.h, 0);
activate_array(l.output + index, 2*l.w*l.h, LOGISTIC);
index = entry_index(l, b, n*l.w*l.h, 4);
activate_array(l.output + index, (1+l.classes)*l.w*l.h, LOGISTIC);
}
}
return ;
}
layer make_yolo_layer(int batch, int w, int h, int n, int total, int *mask, int classes)
{
int i;
layer l;
memset(&l,0,sizeof(layer));
l.type = YOLO;
l.n = n;
l.total = total;
l.batch = batch;
l.h = h;
l.w = w;
l.c = n*(classes + 4 + 1);
l.out_w = l.w;
l.out_h = l.h;
l.out_c = l.c;
l.classes = classes;
//l.cost = (float *)calloc(1, sizeof(float));
l.biases = (float *)calloc(total*2, sizeof(float));
if(mask) l.mask = mask;
else{
l.mask = (int *)calloc(n, sizeof(int));
for(i = 0; i < n; ++i){
l.mask[i] = i;
}
}
//l.bias_updates = (float *)calloc(n*2, sizeof(float));
l.outputs = h*w*n*(classes + 4 + 1);
l.inputs = l.outputs;
//l.truths = 90*(4 + 1);
//l.delta = (float *)calloc(batch*l.outputs, sizeof(float));
l.output = (float *)calloc(batch*l.outputs, sizeof(float));
for(i = 0; i < total*2; ++i){
l.biases[i] = .5;
}
l.forward = forward_yolo_layer;
fprintf(stderr, "detection\n");
srand(0);
return l;
}
/////////////////praser begin
typedef struct{
char *type;
list *options;
}section;
list *read_cfg(char *filename);
LAYER_TYPE string_to_layer_type(char * type)
{
if (strcmp(type, "[shortcut]")==0) return SHORTCUT;
if (strcmp(type, "[crop]")==0) return CROP;
if (strcmp(type, "[cost]")==0) return COST;
if (strcmp(type, "[detection]")==0) return DETECTION;
if (strcmp(type, "[region]")==0) return REGION;
if (strcmp(type, "[yolo]")==0) return YOLO;
if (strcmp(type, "[local]")==0) return LOCAL;
if (strcmp(type, "[conv]")==0
|| strcmp(type, "[convolutional]")==0) return CONVOLUTIONAL;
if (strcmp(type, "[deconv]")==0
|| strcmp(type, "[deconvolutional]")==0) return DECONVOLUTIONAL;
if (strcmp(type, "[activation]")==0) return ACTIVE;
if (strcmp(type, "[logistic]")==0) return LOGXENT;
if (strcmp(type, "[l2norm]")==0) return L2NORM;
if (strcmp(type, "[net]")==0
|| strcmp(type, "[network]")==0) return NETWORK;
if (strcmp(type, "[crnn]")==0) return CRNN;
if (strcmp(type, "[gru]")==0) return GRU;
if (strcmp(type, "[lstm]") == 0) return LSTM;
if (strcmp(type, "[rnn]")==0) return RNN;
if (strcmp(type, "[conn]")==0
|| strcmp(type, "[connected]")==0) return CONNECTED;
if (strcmp(type, "[max]")==0
|| strcmp(type, "[maxpool]")==0) return MAXPOOL;
if (strcmp(type, "[reorg]")==0) return REORG;
if (strcmp(type, "[avg]")==0
|| strcmp(type, "[avgpool]")==0) return AVGPOOL;
if (strcmp(type, "[dropout]")==0) return DROPOUT;
if (strcmp(type, "[lrn]")==0
|| strcmp(type, "[normalization]")==0) return NORMALIZATION;
if (strcmp(type, "[batchnorm]")==0) return BATCHNORM;
if (strcmp(type, "[soft]")==0
|| strcmp(type, "[softmax]")==0) return SOFTMAX;
if (strcmp(type, "[route]")==0) return ROUTE;
if (strcmp(type, "[upsample]")==0) return UPSAMPLE;
return BLANK;
}
void free_section(section *s)
{
free(s->type);
node *n = s->options->front;
while(n){
kvp *pair = (kvp *)n->val;
free(pair->key);
free(pair);
node *next = n->next;
free(n);
n = next;
}
free(s->options);
free(s);
}
void parse_data(char *data, float *a, int n)
{
int i;
if(!data) return;
char *curr = data;
char *next = data;
int done = 0;
for(i = 0; i < n && !done; ++i){
while(*++next !='\0' && *next != ',');
if(*next == '\0') done = 1;
*next = '\0';
sscanf(curr, "%g", &a[i]);
curr = next+1;
}
}
typedef struct size_params{
int batch;
int inputs;
int h;
int w;
int c;
int index;
int time_steps;
network *net;
} size_params;
layer parse_convolutional(list *options, size_params params)
{
int n = option_find_int(options, "filters",1);
int size = option_find_int(options, "size",1);
int stride = option_find_int(options, "stride",1);
int pad = option_find_int_quiet(options, "pad",0);
int padding = option_find_int_quiet(options, "padding",0);
int groups = option_find_int_quiet(options, "groups", 1);
if(pad) padding = size/2;
char *activation_s = option_find_str(options, "activation", "logistic");
ACTIVATION activation = get_activation(activation_s);
int batch,h,w,c;
h = params.h;
w = params.w;
c = params.c;
batch=params.batch;
if(!(h && w && c)) error("Layer before convolutional layer must output image.");
int batch_normalize = option_find_int_quiet(options, "batch_normalize", 0);
int binary = option_find_int_quiet(options, "binary", 0);
int xnor = option_find_int_quiet(options, "xnor", 0);
layer l = make_convolutional_layer(batch,h,w,c,n,groups,size,stride,padding,activation, batch_normalize, binary, xnor, params.net->adam);
l.flipped = option_find_int_quiet(options, "flipped", 0);
l.dot = option_find_float_quiet(options, "dot", 0);
return l;
}
int *parse_yolo_mask(char *a, int *num)
{
int *mask = 0;
if(a){
int len = strlen(a);
int n = 1;
int i;
for(i = 0; i < len; ++i){
if (a[i] == ',') ++n;
}
mask = (int *)calloc(n, sizeof(int));
for(i = 0; i < n; ++i){
int val = atoi(a);
mask[i] = val;
a = strchr(a, ',')+1;
}
*num = n;
}
return mask;
}
layer parse_yolo(list *options, size_params params)
{
int classes = option_find_int(options, "classes", 20);
int total = option_find_int(options, "num", 1);
int num = total;
char *a = option_find_str(options, "mask", 0);
int *mask = parse_yolo_mask(a, &num);
layer l = make_yolo_layer(params.batch, params.w, params.h, num, total, mask, classes);
assert(l.outputs == params.inputs);
l.max_boxes = option_find_int_quiet(options, "max",90);
l.jitter = option_find_float(options, "jitter", .2);
l.ignore_thresh = option_find_float(options, "ignore_thresh", .5);
l.truth_thresh = option_find_float(options, "truth_thresh", 1);
l.random = option_find_int_quiet(options, "random", 0);
a = option_find_str(options, "anchors", 0);
if(a){
int len = strlen(a);
int n = 1;
int i;
for(i = 0; i < len; ++i){
if (a[i] == ',') ++n;
}
for(i = 0; i < n; ++i){
float bias = atof(a);
l.biases[i] = bias;
a = strchr(a, ',')+1;
}
}
return l;
}
layer parse_shortcut(list *options, size_params params, network *net)
{
char *l = option_find(options, "from");
int index = atoi(l);
if(index < 0) index = params.index + index;
int batch = params.batch;
layer from = net->layers[index];
layer s = make_shortcut_layer(batch, index, params.w, params.h, params.c, from.out_w, from.out_h, from.out_c);
char *activation_s = option_find_str(options, "activation", "linear");
ACTIVATION activation = get_activation(activation_s);
s.activation = activation;
s.alpha = option_find_float_quiet(options, "alpha", 1);
s.beta = option_find_float_quiet(options, "beta", 1);
return s;
}
layer parse_upsample(list *options, size_params params, network *net)
{
int stride = option_find_int(options, "stride",2);
layer l = make_upsample_layer(params.batch, params.w, params.h, params.c, stride);
l.scale = option_find_float_quiet(options, "scale", 1);
return l;
}
layer parse_route(list *options, size_params params, network *net)
{
char *l = option_find(options, "layers");
int len = strlen(l);
if(!l) error("Route Layer must specify input layers");
int n = 1;
int i;
for(i = 0; i < len; ++i){
if (l[i] == ',') ++n;
}
int *layers = (int *)calloc(n, sizeof(int));
int *sizes = (int *)calloc(n, sizeof(int));
for(i = 0; i < n; ++i){
int index = atoi(l);
l = strchr(l, ',')+1;
if(index < 0) index = params.index + index;
layers[i] = index;
sizes[i] = net->layers[index].outputs;
}
int batch = params.batch;
layer route_layer = make_route_layer(batch, n, layers, sizes);
layer first = net->layers[layers[0]];
route_layer.out_w = first.out_w;
route_layer.out_h = first.out_h;
route_layer.out_c = first.out_c;
for(i = 1; i < n; ++i){
int index = layers[i];
layer next = net->layers[index];
if(next.out_w == first.out_w && next.out_h == first.out_h){
route_layer.out_c += next.out_c;
}else{
route_layer.out_h = route_layer.out_w = route_layer.out_c = 0;
}
}
return route_layer;
}
void softmax(float *input, int n, float temp, int stride, float *output)
{
int i;
float sum = 0;
float largest = -FLT_MAX;
for(i = 0; i < n; ++i){
if(input[i*stride] > largest) largest = input[i*stride];
}
for(i = 0; i < n; ++i){
float e = exp(input[i*stride]/temp - largest/temp);
sum += e;
output[i*stride] = e;
}
for(i = 0; i < n; ++i){
output[i*stride] /= sum;
}
}
void softmax_cpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output)
{
int g, b;
for(b = 0; b < batch; ++b){
for(g = 0; g < groups; ++g){
softmax(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset);
}
}
}
void forward_region_layer(const layer l, network net)
{
int i,j,b,t,n;
memcpy(l.output, net.input, l.outputs*l.batch*sizeof(float));
#ifndef GPU
for (b = 0; b < l.batch; ++b){
for(n = 0; n < l.n; ++n){
int index = entry_index(l, b, n*l.w*l.h, 0);
activate_array(l.output + index, 2*l.w*l.h, LOGISTIC);
index = entry_index(l, b, n*l.w*l.h, l.coords);
if(!l.background) activate_array(l.output + index, l.w*l.h, LOGISTIC);
index = entry_index(l, b, n*l.w*l.h, l.coords + 1);
//if(!l.softmax) activate_array(l.output + index, l.classes*l.w*l.h, LOGISTIC);
}
}
if (l.softmax){
int index = entry_index(l, 0, 0, l.coords + !l.background);
softmax_cpu(net.input + index, l.classes + l.background, l.batch*l.n, l.inputs/l.n, l.w*l.h, 1, l.w*l.h, 1, l.output + index);
}
// double time1,time2;
// time1 = what_time_is_it_now();
// char line[256];
// FILE *fp3;
// char filename[256];
// sprintf(filename, "yolo_region_input_float32_%d.txt", 13*13*425);
// printf("YOLO_layer:outputs=%d,%s\n",l.outputs,filename);
// if( (fp3 = fopen(filename, "w")) == NULL)fprintf(stderr,"CANNOT OPEN\n");
// int x;
// for( x = 0; x < l.outputs; x++)
// {
// sprintf(line, "%f\n", net.input[x]);
// if(fputs(line,fp3)<0)fprintf(stderr,"write FILE failed\n");
// }
// fclose(fp3);
// time2 = what_time_is_it_now();
// printf("Predicted in %f seconds.\n",time2 - time1);
#endif
if(!net.train) return;
}
layer make_region_layer(int batch, int w, int h, int n, int classes, int coords)
{
layer l;
memset(&l,0,sizeof(layer));
l.type = REGION;
l.n = n;
l.batch = batch;
l.h = h;
l.w = w;
l.c = n*(classes + coords + 1);
l.out_w = l.w;
l.out_h = l.h;
l.out_c = l.c;
l.classes = classes;
l.coords = coords;
l.biases = (float *)calloc(n*2, sizeof(float));
l.outputs = h*w*n*(classes + coords + 1);
l.inputs = l.outputs;
l.truths = 30*(l.coords + 1);
l.output = (float *)calloc(batch*l.outputs, sizeof(float));
int i;
for(i = 0; i < n*2; ++i){
l.biases[i] = .5;
}
l.forward = forward_region_layer;
fprintf(stderr, "detection\n");
srand(0);
return l;
}
layer parse_region(list *options, size_params params)
{
int coords = option_find_int(options, "coords", 4);
int classes = option_find_int(options, "classes", 20);
int num = option_find_int(options, "num", 1);
layer l = make_region_layer(params.batch, params.w, params.h, num, classes, coords);
assert(l.outputs == params.inputs);
l.log = option_find_int_quiet(options, "log", 0);
l.sqrt = option_find_int_quiet(options, "sqrt", 0);
l.softmax = option_find_int(options, "softmax", 0);
l.background = option_find_int_quiet(options, "background", 0);
l.max_boxes = option_find_int_quiet(options, "max",30);
l.jitter = option_find_float(options, "jitter", .2);
l.rescore = option_find_int_quiet(options, "rescore",0);
l.thresh = option_find_float(options, "thresh", .5);
l.classfix = option_find_int_quiet(options, "classfix", 0);
l.absolute = option_find_int_quiet(options, "absolute", 0);
l.random = option_find_int_quiet(options, "random", 0);
l.coord_scale = option_find_float(options, "coord_scale", 1);
l.object_scale = option_find_float(options, "object_scale", 1);
l.noobject_scale = option_find_float(options, "noobject_scale", 1);
l.mask_scale = option_find_float(options, "mask_scale", 1);
l.class_scale = option_find_float(options, "class_scale", 1);
l.bias_match = option_find_int_quiet(options, "bias_match",0);
char *tree_file = option_find_str(options, "tree", 0);
// if (tree_file) l.softmax_tree = read_tree(tree_file);
char *map_file = option_find_str(options, "map", 0);
// if (map_file) l.map = read_map(map_file);
char *a = option_find_str(options, "anchors", 0);
if(a){
int len = strlen(a);
int n = 1;
int i;
for(i = 0; i < len; ++i){
if (a[i] == ',') ++n;
}
for(i = 0; i < n; ++i){
float bias = atof(a);
l.biases[i] = bias;
a = strchr(a, ',')+1;
}
}
return l;
}
void reorg_cpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
{
int b,i,j,k;
int out_c = c/(stride*stride);
for(b = 0; b < batch; ++b){
for(k = 0; k < c; ++k){
for(j = 0; j < h; ++j){
for(i = 0; i < w; ++i){
int in_index = i + w*(j + h*(k + c*b));
int c2 = k % out_c;
int offset = k / out_c;
int w2 = i*stride + offset % stride;
int h2 = j*stride + offset / stride;
int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b));
if(forward) out[out_index] = x[in_index];
else out[in_index] = x[out_index];
}
}
}
}
}
void forward_reorg_layer(const layer l, network net)
{
int i;
//if(l.flatten){
// memcpy(l.output, net.input, l.outputs*l.batch*sizeof(float));
// if(l.reverse){
// flatten(l.output, l.w*l.h, l.c, l.batch, 0);
// }else{
// flatten(l.output, l.w*l.h, l.c, l.batch, 1);
// }
//} else if (l.extra) {
// for(i = 0; i < l.batch; ++i){
// copy_cpu(l.inputs, net.input + i*l.inputs, 1, l.output + i*l.outputs, 1);
// }
//} else if (l.reverse){
// reorg_cpu(net.input, l.w, l.h, l.c, l.batch, l.stride, 1, l.output);
//} else {
reorg_cpu(net.input, l.w, l.h, l.c, l.batch, l.stride, 0, l.output);
//}
}
layer make_reorg_layer(int batch, int w, int h, int c, int stride, int reverse, int flatten, int extra)
{
layer l;
memset(&l,0,sizeof(layer));
l.type = REORG;
l.batch = batch;
l.stride = stride;
l.extra = extra;
l.h = h;
l.w = w;
l.c = c;
l.flatten = flatten;
if(reverse){
l.out_w = w*stride;
l.out_h = h*stride;
l.out_c = c/(stride*stride);
}else{
l.out_w = w/stride;
l.out_h = h/stride;
l.out_c = c*(stride*stride);
}
l.reverse = reverse;
l.outputs = l.out_h * l.out_w * l.out_c;
l.inputs = h*w*c;
if(l.extra){
l.out_w = l.out_h = l.out_c = 0;
l.outputs = l.inputs + l.extra;
}
if(extra){
fprintf(stderr, "reorg %4d -> %4d\n", l.inputs, l.outputs);
} else {
fprintf(stderr, "reorg /%2d %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c);
}
int output_size = l.outputs * batch;
//l.output = (float *)calloc(output_size, sizeof(float));
l.forward = forward_reorg_layer;
return l;
}
layer parse_reorg(list *options, size_params params)
{
int stride = option_find_int(options, "stride",1);
int reverse = option_find_int_quiet(options, "reverse",0);
int flatten = option_find_int_quiet(options, "flatten",0);
int extra = option_find_int_quiet(options, "extra",0);
int batch,h,w,c;
h = params.h;
w = params.w;
c = params.c;
batch=params.batch;
if(!(h && w && c)) error("Layer before reorg layer must output image.");
layer layer = make_reorg_layer(batch,w,h,c,stride,reverse, flatten, extra);
return layer;
}
void forward_maxpool_layer(layer l, network net)
{
int b,i,j,k,m,n;
int w_offset = -l.pad;
int h_offset = -l.pad;
int h = l.out_h;
int w = l.out_w;
int c = l.c;
for(b = 0; b < l.batch; ++b){
for(k = 0; k < c; ++k){
for(i = 0; i < h; ++i){
for(j = 0; j < w; ++j){
int out_index = j + w*(i + h*(k + c*b));
float max = -FLT_MAX;
int max_i = -1;
for(n = 0; n < l.size; ++n){
for(m = 0; m < l.size; ++m){
int cur_h = h_offset + i*l.stride + n;
int cur_w = w_offset + j*l.stride + m;
int index = cur_w + l.w*(cur_h + l.h*(k + b*l.c));
int valid = (cur_h >= 0 && cur_h < l.h &&
cur_w >= 0 && cur_w < l.w);
float val = (valid != 0) ? net.input[index] : -FLT_MAX;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
l.output[out_index] = max;
l.indexes[out_index] = max_i;
}
}
}
}
}
layer make_maxpool_layer(int batch, int h, int w, int c, int size, int stride, int padding)
{
layer l;
memset(&l,0,sizeof(layer));
l.type = MAXPOOL;
l.batch = batch;
l.h = h;
l.w = w;
l.c = c;
l.pad = padding;
l.out_w = (w + padding - size)/stride + 1;
l.out_h = (h + padding - size)/stride + 1;
l.out_c = c;
l.outputs = l.out_h * l.out_w * l.out_c;
l.inputs = h*w*c;
l.size = size;
l.stride = stride;
int output_size = l.out_h * l.out_w * l.out_c * batch;
fprintf(stderr, "max %d x %d / %d %4d x%4d x%4d -> %4d x%4d x%4d\n", size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c);
return l;
}
layer parse_maxpool(list *options, size_params params)
{
int stride = option_find_int(options, "stride",1);
int size = option_find_int(options, "size",stride);
int padding = option_find_int_quiet(options, "padding", size-1);
int batch,h,w,c;
h = params.h;
w = params.w;
c = params.c;
batch=params.batch;
if(!(h && w && c)) error("Layer before maxpool layer must output image.");
layer maxpool_layer = make_maxpool_layer(batch,h,w,c,size,stride,padding);
return maxpool_layer;
}
learning_rate_policy get_policy(char *s)
{
if (strcmp(s, "random")==0) return RANDOM;
if (strcmp(s, "poly")==0) return POLY;
if (strcmp(s, "constant")==0) return CONSTANT;
if (strcmp(s, "step")==0) return STEP;
if (strcmp(s, "exp")==0) return EXP;
if (strcmp(s, "sigmoid")==0) return SIG;
if (strcmp(s, "steps")==0) return STEPS;
fprintf(stderr, "Couldn't find policy %s, going with constant\n", s);
return CONSTANT;
}
void parse_net_options(list *options, network *net)
{
net->batch = option_find_int(options, "batch",1);
net->learning_rate = option_find_float(options, "learning_rate", .001);
net->momentum = option_find_float(options, "momentum", .9);
net->decay = option_find_float(options, "decay", .0001);
int subdivs = option_find_int(options, "subdivisions",1);
net->time_steps = option_find_int_quiet(options, "time_steps",1);
net->notruth = option_find_int_quiet(options, "notruth",0);
net->batch /= subdivs;
net->batch *= net->time_steps;
net->subdivisions = subdivs;
net->random = option_find_int_quiet(options, "random", 0);
net->adam = option_find_int_quiet(options, "adam", 0);
if(net->adam){
net->B1 = option_find_float(options, "B1", .9);
net->B2 = option_find_float(options, "B2", .999);
net->eps = option_find_float(options, "eps", .0000001);
}
net->h = option_find_int_quiet(options, "height",0);
net->w = option_find_int_quiet(options, "width",0);
net->c = option_find_int_quiet(options, "channels",0);
net->inputs = option_find_int_quiet(options, "inputs", net->h * net->w * net->c);
net->max_crop = option_find_int_quiet(options, "max_crop",net->w*2);
net->min_crop = option_find_int_quiet(options, "min_crop",net->w);
net->max_ratio = option_find_float_quiet(options, "max_ratio", (float) net->max_crop / net->w);
net->min_ratio = option_find_float_quiet(options, "min_ratio", (float) net->min_crop / net->w);
net->center = option_find_int_quiet(options, "center",0);
net->clip = option_find_float_quiet(options, "clip", 0);
net->angle = option_find_float_quiet(options, "angle", 0);
net->aspect = option_find_float_quiet(options, "aspect", 1);
net->saturation = option_find_float_quiet(options, "saturation", 1);
net->exposure = option_find_float_quiet(options, "exposure", 1);
net->hue = option_find_float_quiet(options, "hue", 0);
if(!net->inputs && !(net->h && net->w && net->c)) error("No input parameters supplied");
char *policy_s = option_find_str(options, "policy", "constant");
net->policy = get_policy(policy_s);
net->burn_in = option_find_int_quiet(options, "burn_in", 0);
net->power = option_find_float_quiet(options, "power", 4);
if(net->policy == STEP){
net->step = option_find_int(options, "step", 1);
net->scale = option_find_float(options, "scale", 1);
} else if (net->policy == STEPS){
char *l = option_find(options, "steps");
char *p = option_find(options, "scales");
if(!l || !p) error("STEPS policy must have steps and scales in cfg file");
int len = strlen(l);
int n = 1;
int i;
for(i = 0; i < len; ++i){
if (l[i] == ',') ++n;
}
int *steps = (int *)calloc(n, sizeof(int));
float *scales = (float *)calloc(n, sizeof(float));
for(i = 0; i < n; ++i){
int step = atoi(l);
float scale = atof(p);
l = strchr(l, ',')+1;
p = strchr(p, ',')+1;
steps[i] = step;
scales[i] = scale;
}
net->scales = scales;
net->steps = steps;
net->num_steps = n;
} else if (net->policy == EXP){
net->gamma = option_find_float(options, "gamma", 1);
} else if (net->policy == SIG){
net->gamma = option_find_float(options, "gamma", 1);
net->step = option_find_int(options, "step", 1);
} else if (net->policy == POLY || net->policy == RANDOM){
}
net->max_batches = option_find_int(options, "max_batches", 0);
}
int is_network(section *s)
{
return (strcmp(s->type, "[net]")==0
|| strcmp(s->type, "[network]")==0);
}
network *parse_network_cfg(char *filename)
{
list *sections = read_cfg(filename);
node *n = sections->front;
if(!n) error("Config file has no sections");
network *net = make_network(sections->size - 1);
net->gpu_index = -1;
size_params params;
section *s = (section *)n->val;
list *options = s->options;
if(!is_network(s)) error("First section must be [net] or [network]");
parse_net_options(options, net);
params.h = net->h;
params.w = net->w;
params.c = net->c;
params.inputs = net->inputs;
params.batch = net->batch;
params.time_steps = net->time_steps;
params.net = net;
size_t workspace_size = 0;
n = n->next;
int count = 0;
free_section(s);
fprintf(stderr, "layer filters size input output\n");
while(n){
params.index = count;
fprintf(stderr, "%5d ", count);
s = (section *)n->val;
options = s->options;
//layer l = {0};
layer l;
memset(&l,0,sizeof(layer));
LAYER_TYPE lt = string_to_layer_type(s->type);
if(lt == CONVOLUTIONAL){
l = parse_convolutional(options, params);
}else if(lt == YOLO){
l = parse_yolo(options, params);
}else if(lt == ROUTE){
l = parse_route(options, params, net);
}else if(lt == UPSAMPLE){
l = parse_upsample(options, params, net);
}else if(lt == SHORTCUT){
l = parse_shortcut(options, params, net);
}else if(lt == REGION){
l = parse_region(options, params);
}else if(lt == YOLO){
l = parse_yolo(options, params);
}else if(lt == MAXPOOL){
l = parse_maxpool(options, params);
}else if(lt == REORG){
l = parse_reorg(options, params);
}else{
fprintf(stderr, "Type not recognized: %s\n", s->type);
}
l.clip = net->clip;
l.truth = option_find_int_quiet(options, "truth", 0);
l.onlyforward = option_find_int_quiet(options, "onlyforward", 0);
l.stopbackward = option_find_int_quiet(options, "stopbackward", 0);
l.dontsave = option_find_int_quiet(options, "dontsave", 0);
// l.dontload = option_find_int_quiet(options, "dontload", 0);
// l.dontloadscales = option_find_int_quiet(options, "dontloadscales", 0);
//l.learning_rate_scale = option_find_float_quiet(options, "learning_rate", 1);
l.smooth = option_find_float_quiet(options, "smooth", 0);
option_unused(options);
net->layers[count] = l;
if (l.workspace_size > workspace_size) workspace_size = l.workspace_size;
free_section(s);
n = n->next;
++count;
if(n){
params.h = l.out_h;
params.w = l.out_w;
params.c = l.out_c;
params.inputs = l.outputs;
}
}
free_list(sections);
layer out = get_network_output_layer(net);
net->outputs = out.outputs;
net->output = out.output;
//net->input = (float *)calloc(net->inputs*net->batch, sizeof(float));
workspace_size = 0;//donot calloc workspace
//if(workspace_size){
// //printf("%ld\n", workspace_size);
// net->workspace = (float *)calloc(1, workspace_size);
//}
return net;
}
list *read_cfg(char *filename)
{
FILE *file = fopen(filename, "r");
if(file == 0) file_error(filename);
char *line;
int nu = 0;
list *options = make_list();
section *current = 0;
while((line=fgetl(file)) != 0){
++ nu;
strip(line);
switch(line[0]){
case '[':
current = (section *)malloc(sizeof(section));
list_insert(options, current);
current->options = make_list();
current->type = line;
break;
case '\0':
case '#':
case ';':
free(line);
break;
default:
if(!read_option(line, current->options)){
fprintf(stderr, "Config file error line %d, could parse: %s\n", nu, line);
free(line);
}
break;
}
}
fclose(file);
return options;
}
void load_convolutional_weights(layer l, FILE *fp)
{
int num = l.nweights;
fread(l.biases, sizeof(float), l.n, fp);
if (l.batch_normalize){
fread(l.scales, sizeof(float), l.n, fp);
fread(l.rolling_mean, sizeof(float), l.n, fp);
fread(l.rolling_variance, sizeof(float), l.n, fp);
}
fread(l.weights, sizeof(float), num, fp);
}
void load_weights_upto(network *net, char *filename, int start, int cutoff)
{
fprintf(stderr, "Loading weights from %s...", filename);
fflush(stdout);
FILE *fp = fopen(filename, "rb");
if(!fp) file_error(filename);
int major;
int minor;
int revision;
fread(&major, sizeof(int), 1, fp);
fread(&minor, sizeof(int), 1, fp);
fread(&revision, sizeof(int), 1, fp);
printf("major=%d;minor=%d;revision=%d\n",major,minor,revision);// 0 2 0
printf("if true ro false:%d\n",(major*10 + minor) >= 2 && major < 1000 && minor < 1000);
if ((major*10 + minor) >= 2 && major < 1000 && minor < 1000){
//fread(net->seen, sizeof(size_t), 1, fp);
fread(net->seen, sizeof(size_t), 1, fp);
fread(net->seen, sizeof(size_t), 1, fp);
}else {
int iseen = 0;
fread(&iseen, sizeof(int), 1, fp);
*net->seen = iseen;
}
//printf("sizeof(size_t)=%u\n",sizeof(size_t));// in my PC is 4
int i;
for(i = start; i < net->n && i < cutoff; ++i){
layer l = net->layers[i];
if(l.type == CONVOLUTIONAL){
load_convolutional_weights(l, fp);
}
}
fprintf(stderr, "Done!\n");
fclose(fp);
}
void load_weights(network *net, char *filename)
{
load_weights_upto(net, filename, 0, net->n);
}
/////////////////praser end
/////////////////network begin
load_args get_base_args(network *net)
{
load_args args = {0};
args.w = net->w;
args.h = net->h;
args.size = net->w;
args.min = net->min_crop;
args.max = net->max_crop;
args.angle = net->angle;
args.aspect = net->aspect;
args.exposure = net->exposure;
args.center = net->center;
args.saturation = net->saturation;
args.hue = net->hue;
return args;
}
network *load_network(char *cfg, char *weights, int clear)
{
network *net = parse_network_cfg(cfg);
//if(weights && weights[0] != 0){
// load_weights(net, weights);
//}
if(clear) (*net->seen) = 0;
return net;
}
char *get_layer_string(LAYER_TYPE a)
{
switch(a){
case CONVOLUTIONAL:
return "convolutional";
case ACTIVE:
return "activation";
case LOCAL:
return "local";
case DECONVOLUTIONAL:
return "deconvolutional";
case CONNECTED:
return "connected";
case RNN:
return "rnn";
case GRU:
return "gru";
case LSTM:
return "lstm";
case CRNN:
return "crnn";
case MAXPOOL:
return "maxpool";
case REORG:
return "reorg";
case AVGPOOL:
return "avgpool";
case SOFTMAX:
return "softmax";
case DETECTION:
return "detection";
case REGION:
return "region";
case YOLO:
return "yolo";
case DROPOUT:
return "dropout";
case CROP:
return "crop";
case COST:
return "cost";
case ROUTE:
return "route";
case SHORTCUT:
return "shortcut";
case NORMALIZATION:
return "normalization";
case BATCHNORM:
return "batchnorm";
default:
break;
}
return "none";
}
network *make_network(int n)
{
network *net = (network *)calloc(1, sizeof(network));
net->n = n;
net->layers = (layer *)calloc(net->n, sizeof(layer));
net->seen = (size_t *)calloc(1, sizeof(size_t));
net->t = (int *)calloc(1, sizeof(int));
net->cost = (float *)calloc(1, sizeof(float));
return net;
}
void forward_network(network *netp)
{
network net = *netp;
int i;
for(i = 0; i < net.n; ++i){
net.index = i;
layer l = net.layers[i];
l.forward(l, net);
net.input = l.output;
// printf("layer [%d]\n",i);
}
}
void set_temp_network(network *net, float t)
{
int i;
for(i = 0; i < net->n; ++i){
net->layers[i].temperature = t;
}
}
void set_batch_network(network *net, int b)
{
net->batch = b;
int i;
for(i = 0; i < net->n; ++i){
net->layers[i].batch = b;
}
}
float *network_predict(network *net, float *input)
{
network orig = *net;
net->input = input;
net->truth = 0;
net->train = 0;
net->delta = 0;
forward_network(net);
float *out = net->output;
*net = orig;
return out;
}
int yolo_num_detections(layer l, float thresh)
{
int i, n;
int count = 0;
for (i = 0; i < l.w*l.h; ++i){
for(n = 0; n < l.n; ++n){
int obj_index = entry_index(l, 0, n*l.w*l.h + i, 4);
if(l.output[obj_index] > thresh){
++count;
}
}
}
return count;
}
int num_detections(network *net, float thresh)
{
int i;
int s = 0;
for(i = 0; i < net->n; ++i){
layer l = net->layers[i];
if(l.type == YOLO){
s += yolo_num_detections(l, thresh);
}
if(l.type == DETECTION || l.type == REGION){
s += l.w*l.h*l.n;
}
}
return s;
}
detection *make_network_boxes(network *net, float thresh, int *num)
{
layer l = net->layers[net->n - 1];
int i;
int nboxes = num_detections(net, thresh);
//printf("num_detections nboxes = %d\n",nboxes);
if(num) *num = nboxes;
detection *dets = (detection *)calloc(nboxes, sizeof(detection));
for(i = 0; i < nboxes; ++i){
dets[i].prob = (float *)calloc(l.classes, sizeof(float));
}
return dets;
}
box get_yolo_box(float *x, float *biases, int n, int index, int i, int j, int lw, int lh, int w, int h, int stride)
{
box b;
b.x = (i + x[index + 0*stride]) / lw;
b.y = (j + x[index + 1*stride]) / lh;
b.w = exp(x[index + 2*stride]) * biases[2*n] / w;
b.h = exp(x[index + 3*stride]) * biases[2*n+1] / h;
return b;
}
void correct_yolo_boxes(detection *dets, int n, int w, int h, int netw, int neth, int relative)
{
int i;
int new_w=0;
int new_h=0;
if (((float)netw/w) < ((float)neth/h)) {
new_w = netw;
new_h = (h * netw)/w;
} else {
new_h = neth;
new_w = (w * neth)/h;
}
for (i = 0; i < n; ++i){
box b = dets[i].bbox;
b.x = (b.x - (netw - new_w)/2./netw) / ((float)new_w/netw);
b.y = (b.y - (neth - new_h)/2./neth) / ((float)new_h/neth);
b.w *= (float)netw/new_w;
b.h *= (float)neth/new_h;
if(!relative){
b.x *= w;
b.w *= w;
b.y *= h;
b.h *= h;
}
dets[i].bbox = b;
}
}
int get_yolo_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, int relative, detection *dets)
{
int i,j,n;
float *predictions = l.output;
// if (l.batch == 2) avg_flipped_yolo(l);
int count = 0;
for (i = 0; i < l.w*l.h; ++i){
int row = i / l.w;
int col = i % l.w;
for(n = 0; n < l.n; ++n){
int obj_index = entry_index(l, 0, n*l.w*l.h + i, 4);
float objectness = predictions[obj_index];
if(objectness <= thresh) continue;
int box_index = entry_index(l, 0, n*l.w*l.h + i, 0);
dets[count].bbox = get_yolo_box(predictions, l.biases, l.mask[n], box_index, col, row, l.w, l.h, netw, neth, l.w*l.h);
dets[count].objectness = objectness;
dets[count].classes = l.classes;
for(j = 0; j < l.classes; ++j){
int class_index = entry_index(l, 0, n*l.w*l.h + i, 4 + 1 + j);
float prob = objectness*predictions[class_index];
dets[count].prob[j] = (prob > thresh) ? prob : 0;
}
++count;
}
}
correct_yolo_boxes(dets, count, w, h, netw, neth, relative);
return count;
}
box get_region_box(float *x, float *biases, int n, int index, int i, int j, int w, int h, int stride)
{
box b;
b.x = (i + x[index + 0*stride]) / w;
b.y = (j + x[index + 1*stride]) / h;
b.w = exp(x[index + 2*stride]) * biases[2*n] / w;
b.h = exp(x[index + 3*stride]) * biases[2*n+1] / h;
return b;
}
void correct_region_boxes(detection *dets, int n, int w, int h, int netw, int neth, int relative)
{
int i;
int new_w=0;
int new_h=0;
if (((float)netw/w) < ((float)neth/h)) {
new_w = netw;
new_h = (h * netw)/w;
} else {
new_h = neth;
new_w = (w * neth)/h;
}
for (i = 0; i < n; ++i){
box b = dets[i].bbox;
b.x = (b.x - (netw - new_w)/2./netw) / ((float)new_w/netw);
b.y = (b.y - (neth - new_h)/2./neth) / ((float)new_h/neth);
b.w *= (float)netw/new_w;
b.h *= (float)neth/new_h;
if(!relative){
b.x *= w;
b.w *= w;
b.y *= h;
b.h *= h;
}
dets[i].bbox = b;
}
}
void get_region_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, float tree_thresh, int relative, detection *dets)
{
int i,j,n,z;
float *predictions = l.output;
if (l.batch == 2) {
float *flip = l.output + l.outputs;
for (j = 0; j < l.h; ++j) {
for (i = 0; i < l.w/2; ++i) {
for (n = 0; n < l.n; ++n) {
for(z = 0; z < l.classes + l.coords + 1; ++z){
int i1 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + i;
int i2 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + (l.w - i - 1);
float swap = flip[i1];
flip[i1] = flip[i2];
flip[i2] = swap;
if(z == 0){
flip[i1] = -flip[i1];
flip[i2] = -flip[i2];
}
}
}
}
}
for(i = 0; i < l.outputs; ++i){
l.output[i] = (l.output[i] + flip[i])/2.;
}
}
for (i = 0; i < l.w*l.h; ++i){
int row = i / l.w;
int col = i % l.w;
for(n = 0; n < l.n; ++n){
int index = n*l.w*l.h + i;
for(j = 0; j < l.classes; ++j){
dets[index].prob[j] = 0;
}
int obj_index = entry_index(l, 0, n*l.w*l.h + i, l.coords);
int box_index = entry_index(l, 0, n*l.w*l.h + i, 0);
int mask_index = entry_index(l, 0, n*l.w*l.h + i, 4);
float scale = l.background ? 1 : predictions[obj_index];
dets[index].bbox = get_region_box(predictions, l.biases, n, box_index, col, row, l.w, l.h, l.w*l.h);
dets[index].objectness = scale > thresh ? scale : 0;
if(dets[index].mask){
for(j = 0; j < l.coords - 4; ++j){
dets[index].mask[j] = l.output[mask_index + j*l.w*l.h];
}
}
int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + !l.background);
if(dets[index].objectness){
for(j = 0; j < l.classes; ++j){
int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + 1 + j);
float prob = scale*predictions[class_index];
dets[index].prob[j] = (prob > thresh) ? prob : 0;
}
}
}
}
correct_region_boxes(dets, l.w*l.h*l.n, w, h, netw, neth, relative);
}
void fill_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, detection *dets)
{
int j;
for(j = 0; j < net->n; ++j){
layer l = net->layers[j];
if(l.type == YOLO){
int count = get_yolo_detections(l, w, h, net->w, net->h, thresh, map, relative, dets);
dets += count;
}
if(l.type == REGION){
get_region_detections(l, w, h, net->w, net->h, thresh, map, hier, relative, dets);
dets += l.w*l.h*l.n;
}
}
}
detection *get_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, int *num)
{
detection *dets = make_network_boxes(net, thresh, num);
fill_network_boxes(net, w, h, thresh, hier, map, relative, dets);
return dets;
}
void free_detections(detection *dets, int n)
{
int i;
for(i = 0; i < n; ++i){
free(dets[i].prob);
if(dets[i].mask) free(dets[i].mask);
}
free(dets);
}
int network_width(network *net){return net->w;}
int network_height(network *net){return net->h;}
layer get_network_output_layer(network *net)
{
int i;
for(i = net->n - 1; i >= 0; --i){
if(net->layers[i].type != COST) break;
}
return net->layers[i];
}
void free_network(network *net)
{
int i;
for(i = 0; i < net->n; ++i){
free_layer(net->layers[i]);
}
free(net->layers);
if(net->input) free(net->input);
if(net->truth) free(net->truth);
free(net);
}
layer network_output_layer(network *net)
{
int i;
for(i = net->n - 1; i >= 0; --i){
if(net->layers[i].type != COST) break;
}
return net->layers[i];
}
int network_inputs(network *net)
{
return net->layers[0].inputs;
}
int network_outputs(network *net)
{
return network_output_layer(net).outputs;
}
float *network_output(network *net)
{
return network_output_layer(net).output;
}
//////////////////network end
//////////////////////box begin
int nms_comparator(const void *pa, const void *pb)
{
detection a = *(detection *)pa;
detection b = *(detection *)pb;
float diff = 0;
if(b.sort_class >= 0){
diff = a.prob[b.sort_class] - b.prob[b.sort_class];
} else {
diff = a.objectness - b.objectness;
}
if(diff < 0) return 1;
else if(diff > 0) return -1;
return 0;
}
float overlap(float x1, float w1, float x2, float w2)
{
float l1 = x1 - w1/2;
float l2 = x2 - w2/2;
float left = l1 > l2 ? l1 : l2;
float r1 = x1 + w1/2;
float r2 = x2 + w2/2;
float right = r1 < r2 ? r1 : r2;
return right - left;
}
float box_intersection(box a, box b)
{
float w = overlap(a.x, a.w, b.x, b.w);
float h = overlap(a.y, a.h, b.y, b.h);
if(w < 0 || h < 0) return 0;
float area = w*h;
return area;
}
float box_union(box a, box b)
{
float i = box_intersection(a, b);
float u = a.w*a.h + b.w*b.h - i;
return u;
}
float box_iou(box a, box b)
{
return box_intersection(a, b)/box_union(a, b);
}
void do_nms_sort(detection *dets, int total, int classes, float thresh)
{
int i, j, k;
k = total-1;
for(i = 0; i <= k; ++i){
if(dets[i].objectness == 0){
detection swap = dets[i];
dets[i] = dets[k];
dets[k] = swap;
--k;
--i;
}
}
total = k+1;
for(k = 0; k < classes; ++k){
for(i = 0; i < total; ++i){
dets[i].sort_class = k;
}
qsort(dets, total, sizeof(detection), nms_comparator);
for(i = 0; i < total; ++i){
if(dets[i].prob[k] == 0) continue;
box a = dets[i].bbox;
for(j = i+1; j < total; ++j){
box b = dets[j].bbox;
if (box_iou(a, b) > thresh){
dets[j].prob[k] = 0;
}
}
}
}
}
//////////////////////box end
//////////////////////image begin
float colors[6][3] = { {1,0,1}, {0,0,1},{0,1,1},{0,1,0},{1,1,0},{1,0,0} };
float get_color(int c, int x, int max)
{
float ratio = ((float)x/max)*5;
int i = floor(ratio);
int j = ceil(ratio);
ratio -= i;
float r = (1-ratio) * colors[i][c] + ratio*colors[j][c];
//printf("%f\n", r);
return r;
}
static float get_pixel_extend(image m, int x, int y, int c)
{
if(x < 0 || x >= m.w || y < 0 || y >= m.h) return 0;
/*
if(x < 0) x = 0;
if(x >= m.w) x = m.w-1;
if(y < 0) y = 0;
if(y >= m.h) y = m.h-1;
*/
if(c < 0 || c >= m.c) return 0;
return get_pixel(m, x, y, c);
}
void composite_image(image source, image dest, int dx, int dy)
{
int x,y,k;
for(k = 0; k < source.c; ++k){
for(y = 0; y < source.h; ++y){
for(x = 0; x < source.w; ++x){
float val = get_pixel(source, x, y, k);
float val2 = get_pixel_extend(dest, dx+x, dy+y, k);
set_pixel(dest, dx+x, dy+y, k, val * val2);
}
}
}
}
image border_image(image a, int border)
{
image b = make_image(a.w + 2*border, a.h + 2*border, a.c);
int x,y,k;
for(k = 0; k < b.c; ++k){
for(y = 0; y < b.h; ++y){
for(x = 0; x < b.w; ++x){
float val = get_pixel_extend(a, x - border, y - border, k);
if(x - border < 0 || x - border >= a.w || y - border < 0 || y - border >= a.h) val = 1;
set_pixel(b, x, y, k, val);
}
}
}
return b;
}
image copy_image(image p)
{
image copy = p;
copy.data = (float *)calloc(p.h*p.w*p.c, sizeof(float));
memcpy(copy.data, p.data, p.h*p.w*p.c*sizeof(float));
return copy;
}
image tile_images(image a, image b, int dx)
{
if(a.w == 0) return copy_image(b);
image c = make_image(a.w + b.w + dx, (a.h > b.h) ? a.h : b.h, (a.c > b.c) ? a.c : b.c);
fill_cpu(c.w*c.h*c.c, 1, c.data, 1);
embed_image(a, c, 0, 0);
composite_image(b, c, a.w + dx, 0);
return c;
}
image get_label(image **characters, char *string, int size)
{
size = size/10;
if(size > 7) size = 7;
image label = make_empty_image(0,0,0);
while(*string){
image l = characters[size][(int)*string];
image n = tile_images(label, l, -size - 1 + (size+1)/2);
free_image(label);
label = n;
++string;
}
image b = border_image(label, label.h*.25);
free_image(label);
return b;
}
void draw_label(image a, int r, int c, image label, const float *rgb)
{
int w = label.w;
int h = label.h;
if (r - h >= 0) r = r - h;
int i, j, k;
for(j = 0; j < h && j + r < a.h; ++j){
for(i = 0; i < w && i + c < a.w; ++i){
for(k = 0; k < label.c; ++k){
float val = get_pixel(label, i, j, k);
set_pixel(a, i+c, j+r, k, rgb[k] * val);
}
}
}
}
void draw_box(image a, int x1, int y1, int x2, int y2, float r, float g, float b)
{
//normalize_image(a);
int i;
if(x1 < 0) x1 = 0;
if(x1 >= a.w) x1 = a.w-1;
if(x2 < 0) x2 = 0;
if(x2 >= a.w) x2 = a.w-1;
if(y1 < 0) y1 = 0;
if(y1 >= a.h) y1 = a.h-1;
if(y2 < 0) y2 = 0;
if(y2 >= a.h) y2 = a.h-1;
for(i = x1; i <= x2; ++i){
a.data[i + y1*a.w + 0*a.w*a.h] = r;
a.data[i + y2*a.w + 0*a.w*a.h] = r;
a.data[i + y1*a.w + 1*a.w*a.h] = g;
a.data[i + y2*a.w + 1*a.w*a.h] = g;
a.data[i + y1*a.w + 2*a.w*a.h] = b;
a.data[i + y2*a.w + 2*a.w*a.h] = b;
}
for(i = y1; i <= y2; ++i){
a.data[x1 + i*a.w + 0*a.w*a.h] = r;
a.data[x2 + i*a.w + 0*a.w*a.h] = r;
a.data[x1 + i*a.w + 1*a.w*a.h] = g;
a.data[x2 + i*a.w + 1*a.w*a.h] = g;
a.data[x1 + i*a.w + 2*a.w*a.h] = b;
a.data[x2 + i*a.w + 2*a.w*a.h] = b;
}
}
void draw_box_width(image a, int x1, int y1, int x2, int y2, int w, float r, float g, float b)
{
int i;
for(i = 0; i < w; ++i){
draw_box(a, x1+i, y1+i, x2-i, y2-i, r, g, b);
}
}
image float_to_image(int w, int h, int c, float *data)
{
image out = make_empty_image(w,h,c);
out.data = data;
return out;
}
image threshold_image(image im, float thresh)
{
int i;
image t = make_image(im.w, im.h, im.c);
for(i = 0; i < im.w*im.h*im.c; ++i){
t.data[i] = im.data[i]>thresh ? 1 : 0;
}
return t;
}
void draw_detections(image im, detection *dets, int num, float thresh, char **names, image **alphabet, int classes)
{
int i,j;
for(i = 0; i < num; ++i){
char labelstr[4096] = {0};
int class_t = -1;
for(j = 0; j < classes; ++j){
//printf("%s: %.0f%%\n", names[j], dets[i].prob[j]*100);
if (dets[i].prob[j] > thresh){
if (class_t < 0) {
strcat(labelstr, names[j]);
class_t = j;
} else {
strcat(labelstr, ", ");
strcat(labelstr, names[j]);
}
printf("%s: %.0f%%\n", names[j], dets[i].prob[j]*100);
}
}
if(class_t >= 0){
int width = im.h * .006;
//printf("%d %s: %.0f%%\n", i, names[class], prob*100);
int offset = class_t*123457 % classes;
float red = get_color(2,offset,classes);
float green = get_color(1,offset,classes);
float blue = get_color(0,offset,classes);
float rgb[3];
//width = prob*20+2;
rgb[0] = red;
rgb[1] = green;
rgb[2] = blue;
box b = dets[i].bbox;
//printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
int left = (b.x-b.w/2.)*im.w;
int right = (b.x+b.w/2.)*im.w;
int top = (b.y-b.h/2.)*im.h;
int bot = (b.y+b.h/2.)*im.h;
if(left < 0) left = 0;
if(right > im.w-1) right = im.w-1;
if(top < 0) top = 0;
if(bot > im.h-1) bot = im.h-1;
draw_box_width(im, left, top, right, bot, width, red, green, blue);
if (alphabet) {
image label = get_label(alphabet, labelstr, (im.h*.03));
draw_label(im, top + width, left, label, rgb);
free_image(label);
}
if (dets[i].mask){
image mask = float_to_image(14, 14, 1, dets[i].mask);
image resized_mask = resize_image(mask, b.w*im.w, b.h*im.h);
image tmask = threshold_image(resized_mask, .5);
embed_image(tmask, im, left, top);
free_image(mask);
free_image(resized_mask);
free_image(tmask);
}
}
}
}
void extract_detections(int * sendData, image im, detection *dets, int num, float thresh, char **names, int classes)
{
int i,j;
int * sendDataIter = sendData+1;
for(i = 0; i < num; ++i){
char labelstr[4096] = {0};
int class_t = -1;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j] > thresh){
strcat(labelstr, names[j]);
class_t = j;
}
}
if(class_t >= 0){
box b = dets[i].bbox;
strncpy(((char *) sendDataIter), labelstr, 32);
sendDataIter[8] = b.w * 640;
sendDataIter[9] = b.h * 640;
sendDataIter[10] = b.x * 640;
sendDataIter[11] = b.y * 640 - 80;
sendDataIter += 12;
//Update number classfied
sendData[0]++;
}
}
}
//////////////////////image end
//////////////////////////HLS begin
//#define MIN(x,y) ((x)<(y)?(x):(y))
//#define S 2
//#define K 3
//
//#define Tn 1
//#define Tm 16
//#define Tr 13
//#define Tc 13
//#define OnChipIB_Width ((Tc-1)*S+K)
//#define OnChipIB_Height ((Tr-1)*S+K)
#define MAX(x,y) ((x)>(y)?(x):(y))
#define MIN(x,y) ((x)<(y)?(x):(y))
#define S 2
#define K 3
#define Tn 4
#define Tm 32
#define Tr 26
#define Tc 26
#define OnChipIB_Width ((Tc-1)*S+K)
#define OnChipIB_Height ((Tr-1)*S+K)
#define ALPHA_BETA_MAX_NUM 1024
#define INTERWIDTH 20
void copy_mem2dev(uint8_t *orig,uint32_t byte_num, unsigned long in_buffer)
{
int fd = open("/dev/mem", O_RDWR);
unsigned char *virtual_addr;
uint32_t RequestByteNum;// must page
if(byte_num%(4*1024)==0)
RequestByteNum = byte_num;
else
{
RequestByteNum = (byte_num/(4*1024)+1)*(4*1024);
}
virtual_addr = (unsigned char *)mmap(NULL, RequestByteNum, PROT_READ | PROT_WRITE, MAP_SHARED, fd, (off_t)in_buffer);
if(virtual_addr == MAP_FAILED)
{
perror("Virtual_addr_in mappong for absolute memory access failed!\n");
return;
}
memcpy(virtual_addr,orig,byte_num);
munmap((void *)virtual_addr, byte_num);
close(fd);
}
void copy_dev2mem(uint8_t *dst,uint32_t byte_num, unsigned long in_buffer)
{
int fd = open("/dev/mem", O_RDWR);
unsigned char *virtual_addr;
uint32_t RequestByteNum;// must page
if(byte_num%(4*1024)==0)
RequestByteNum = byte_num;
else
{
RequestByteNum = (byte_num/(4*1024)+1)*(4*1024);
}
virtual_addr = (unsigned char *)mmap(NULL, RequestByteNum, PROT_READ | PROT_WRITE, MAP_SHARED, fd, (off_t)in_buffer);
if(virtual_addr == MAP_FAILED)
{
perror("Virtual_addr_in mappong for absolute memory access failed!\n");
return;
}
memcpy((uint8_t *)dst,virtual_addr,byte_num);
munmap((void *)virtual_addr, byte_num);
close(fd);
}
int copy_file2mem(char *bin_file,uint32_t byte_num,unsigned long in_buffer)
{
unsigned char *buffer = (unsigned char *)malloc(1024*1024);
if(buffer==NULL){
printf("cannot malloc buffer 1024*1024 byte\n");
return -1;
}
FILE *fp;
if( (fp = fopen(bin_file, "rb")) == NULL)fprintf(stderr,"CANNOT OPEN bin_file\n");
int rd_num;
unsigned long offset = 0;
while(rd_num = fread(buffer, sizeof(unsigned char), 1024*1024, fp))
{
copy_mem2dev(buffer,rd_num, in_buffer+offset);
// printf("rd_num=%d\n",rd_num);
offset += rd_num;
}
printf("copy_file2mem offset=%d\n",offset);
fclose(fp);
free(buffer);
return 0;
}
int copy_mem2file(char *bin_file,uint32_t byte_num,unsigned long in_buffer)
{
void *buffer = malloc(1024*1024);
if(buffer==NULL){
printf("cannot malloc buffer 1024*1024 byte\n");
return -1;
}
FILE *fp;
if( (fp = fopen(bin_file, "wb")) == NULL)fprintf(stderr,"CANNOT OPEN bin_file\n");
int x = byte_num;
int addbyte;
unsigned long offset = 0;
while(addbyte=((x<1024*1024)?x:(1024*1024)))
{
copy_dev2mem((uint8_t *)buffer,addbyte, in_buffer+offset);
fwrite(buffer , sizeof(unsigned char), addbyte, fp);
x -= addbyte;
offset += addbyte;
}
printf("copy_mem2file offset=%d\n",offset);
fclose(fp);
free(buffer);
return 0;
}
//double what_time_is_it_now()
//{
// struct timeval time;
// if (gettimeofday(&time,NULL)){
// return 0;
// }
// return (double)time.tv_sec + (double)time.tv_usec * .000001;
//}
int YOLO2_FPGA(int In_Address,int Out_Address,int Weight_offset,int Beta_offset,const int InFM_num,const int OutFM_num,
const int Kernel_size,const int Kernel_stride,
const int Input_w,const int Input_h,const int Output_w,const int Output_h,
const int Padding,const bool IsNL,const bool IsBN,
const int TM,const int TN,const int TR,const int TC,
const int mLoops,const int nLoops,const int rLoops,const int cLoops,const int LayerType,
int InputQ,int OutputQ,int WeightQ,int BetaQ,unsigned int WEIGHT_BASE,unsigned int BETA_BASE)
{
int T2Rate;
switch(Input_w)
{
case 26:
T2Rate = 2;
break;
case 13:
T2Rate = 4;
break;
default:
T2Rate = 1;
break;
}
const unsigned char TRow = (TR-1)*Kernel_stride+Kernel_size;
int trow_loops = (int)ceil(((float)TRow/T2Rate));
unsigned int ap_idle;
unsigned int ap_done;
unsigned long int PhysicalAddress = YOLO2_BASEADDR;
int map_len = 0x180;
int fd = open("/dev/mem", O_RDWR);
unsigned char *xbase_address;
xbase_address = (unsigned char *)mmap(NULL, map_len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, (off_t)PhysicalAddress);
if(xbase_address == MAP_FAILED)
{
perror("1:Init Mapping memory for absolute memory access failed.\n");
return -1;
}
while(1)
{
ap_idle = ((ReadReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_AP_CTRL) >> 2) && 0x1);
if(ap_idle)
break;
}
//#define WEIGHT_BASE (0x10000000)
//#define BETA_BASE (0x1C25F000)
WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_INPUT_R_DATA, In_Address);
WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_INPUT1_DATA, In_Address);
WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_INPUT2_DATA, In_Address);
WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_INPUT3_DATA, In_Address);
WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_OUTPUT_R_DATA, Out_Address);
WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_OUTPUT1_DATA, Out_Address);
// WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_OUTPUT2_DATA, Out_Address);
// WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_OUTPUT3_DATA, Out_Address);
WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_WEIGHT_DATA, WEIGHT_BASE + Weight_offset*4);
WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_BETA_DATA, BETA_BASE + Beta_offset*4);
WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_INFM_NUM_DATA, InFM_num);
WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_OUTFM_NUM_DATA, OutFM_num);
WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_KERNEL_SIZE_DATA, Kernel_size);
WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_KERNEL_STRIDE_DATA, Kernel_stride);
WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_INPUT_W_DATA, Input_w);
WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_INPUT_H_DATA, Input_h);
WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_OUTPUT_W_DATA, Output_w);
WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_OUTPUT_H_DATA, Output_h);
WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_PADDING_DATA, Padding);
WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_ISNL_DATA, IsNL);
WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_ISBN_DATA, IsBN);
WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_TM_DATA, TM);
WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_TN_DATA, TN);
WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_TR_DATA, TR);
WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_TC_DATA, TC);
WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_MLOOPS_DATA, mLoops);
WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_NLOOPS_DATA, nLoops);
WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_RLOOPS_DATA, rLoops);
WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_CLOOPS_DATA, cLoops);
WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_LAYERTYPE_DATA, LayerType);
WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_INPUTQ_DATA, InputQ);
WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_OUTPUTQ_DATA, OutputQ);
WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_WEIGHTQ_DATA, WeightQ);
WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_BETAQ_DATA, BetaQ);
WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_TROW_LOOPS_DATA, trow_loops);
// double time1,time2;
// time1 = what_time_is_it_now();
WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_GIE, 0x0);
WriteReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_AP_CTRL, 0x1);//Start
while(1)
{
ap_done = ((ReadReg(xbase_address, XYOLO2_FPGA_CTRL_BUS_ADDR_AP_CTRL) >> 1) && 0x1);
if(ap_done)
break;
}
// time2 = what_time_is_it_now();
// printf("START TO DONE in %f seconds.\n",time2 - time1);
munmap((void *)xbase_address, map_len);
close(fd);
return 0;
}
////////////////////////////////////////////////////////PL v3 end
void yolov2_hls_ps(int firstRun, network *net, float *input,unsigned int WEIGHT_BASE,unsigned int BETA_BASE,unsigned int MEM_BASE)
{
int x;
network orig = *net;
net->input = input;
int weight_offset[32] = {864, 18432, 73728, 8192, 73728,
294912, 32768, 294912, 1179648, 131072, 1179648, 131072,
1179648, 4718592, 524288, 4718592, 524288, 4718592, 9437184,
9437184, 32768, 11796480, 435200, 0, 0, 0, 0, 0, 0, 0, 0, 0};
int beta_offset[32] = {32, 64, 128, 64, 128, 256, 128, 256, 512, 256, 512, 256, 512, 1024,
512, 1024, 512, 1024, 1024, 1024, 64, 1024, 425, 0, 0, 0, 0, 0, 0, 0, 0, 0};
int offset_index = 0;
double time1,time2;
if (firstRun) {
time1 = what_time_is_it_now();
copy_file2mem("weightsv2_comb_reorg_ap16.bin",(203767168)/2,WEIGHT_BASE);//->C253D80
printf("yolov2_w copy ok\n");
copy_file2mem("biasv2_comb_ap16.bin",(43044+4)/2,BETA_BASE);//->C268724 203812864 = C25F000
printf("yolov2_b copy ok\n");
time2 = what_time_is_it_now();
printf("Predicted in %f seconds.\n",time2 - time1);
}
time1 = what_time_is_it_now();
float *region_buf = (float *)calloc(13*13*432,sizeof(float));
if(!region_buf) printf("region_buf calloc fail\n");
#define MEM_LEN (416*416*32*2+208*208*32*2)
unsigned int Memory_top = MEM_BASE;
unsigned int Memory_bottom = MEM_BASE + MEM_LEN;
int in_ptr[32];
int out_ptr[32];
/////////////////////
#define QNUM 23
int inputQ[QNUM+1];
int weightQ[QNUM];
int betaQ[QNUM];
FILE *Qin;
Qin = fopen("yolov2_ap16_inout_maxQ_24.bin","rb");
if(!Qin) file_error("Qin error 1\n");
fread(inputQ,sizeof(int),QNUM+1,Qin);
fclose(Qin);
if(inputQ[20] < inputQ[21])
inputQ[21] = inputQ[20];
else
inputQ[20] = inputQ[21];
Qin = fopen("weightsv2_comb_reorg_ap16_maxQ_23.bin","rb");
if(!Qin) file_error("Qin error 2\n");
fread(weightQ,sizeof(int),QNUM,Qin);
fclose(Qin);
Qin = fopen("biasv2_comb_ap16_maxQ_23.bin","rb");
if(!Qin) file_error("Qin error 4\n");
fread(betaQ,sizeof(int),QNUM,Qin);
fclose(Qin);
const double LastLayerOutputPara = pow(2.0,-inputQ[23]);
/////////////////////
#define ROUTE16_LEN (26*26*512*4/2)
#define CONV27_LEN (13*13*256*4/2)
#define CONV24_LEN (13*13*1024*4/2)
int *input_tmp_mem = (int *)calloc(416*416*32/2,sizeof(int));
if(!input_tmp_mem) file_error("input_tmp_mem error \n");
int *region_input_buffer = (int *)calloc(13*13*432*4/2,sizeof(int));
if(!region_input_buffer) file_error("region_input_buffer error \n");
int tmp_in;
short current_in,next_in;
bool NextPixelInFlag = true;
int InputPixelOffset = 0;
time2 = what_time_is_it_now();
//printf("PREAMBLE in %f seconds.\n",time2 - time1);
for(x=0;x<416*416*3;x++)//1st Layer input Q14
{
if(NextPixelInFlag)
{
current_in = (short)(input[x]*pow(2.0,14));
NextPixelInFlag = false;
}
else
{
next_in = (short)(input[x]*pow(2.0,14));
tmp_in = (next_in<<16) + (current_in);
input_tmp_mem[InputPixelOffset] = tmp_in;
InputPixelOffset++;
NextPixelInFlag = true;
}
}
copy_mem2dev((uint8_t *)input_tmp_mem,416*416*3*4/2, MEM_BASE);
free(input_tmp_mem);
for(x=0;x<18;x++)
{
if(x%2==0)
{
in_ptr[x] = Memory_top;
out_ptr[x] = Memory_bottom - net->layers[x].outputs*4/2 ;
}
else
{
in_ptr[x] = out_ptr[x-1];
out_ptr[x] = Memory_top;
}
}
for(x=18;x<25;x++)
{
if(x%2==0)
{
in_ptr[x] = Memory_top;
out_ptr[x] = Memory_bottom - ROUTE16_LEN - net->layers[x].outputs*4/2;
}else
{
in_ptr[x] = out_ptr[x-1];
out_ptr[x] = Memory_top;
}
}
in_ptr[26] = Memory_bottom - ROUTE16_LEN;
out_ptr[26] = Memory_top;
in_ptr[27] = Memory_top;
out_ptr[27] = Memory_bottom - ROUTE16_LEN - CONV24_LEN - CONV27_LEN;
in_ptr[29] = out_ptr[27];
out_ptr[29] = Memory_top;
in_ptr[30] = Memory_top;
out_ptr[30] = Memory_bottom - (net->layers[30].outputs + 13*13*3)*4/2;
if(out_ptr[30]%(4*1024)!=0)
{
out_ptr[30] = (out_ptr[30]/(4*1024)-1)*(4*1024);
}
in_ptr[31] = out_ptr[30];
network netp = *net;
int i;
int j;
int woffset = 0;
int boffset = 0;
int TR,TC,TM,TN;
int output_w,output_h;
int rLoops,cLoops,mLoops,nLoops;
double time_sum = 0.0;
int INPUTQ;
for(i = 0; i < netp.n; ++i)
{
netp.index = i;
layer l = netp.layers[i];
//printf("Layer[%2d]: ",i);
switch(l.type)
{
case CONVOLUTIONAL:
//printf("outputMemory:%8d;BN=%d;Activation=%d;conv %5d %2d x%2d /%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BFLOPs\n",l.outputs,l.batch_normalize,l.activation, l.n, l.size, l.size, l.stride, l.w, l.h, l.c, l.out_w, l.out_h, l.out_c, (2.0 * l.n * l.size*l.size*l.c/l.groups * l.out_h*l.out_w)/1000000000.);
output_w = (l.w - l.size + 2*l.pad)/l.stride + 1 ;
output_h = (l.h - l.size + 2*l.pad)/l.stride + 1 ;
TR = MIN(((OnChipIB_Height-l.size)/l.stride+1),Tr);//keep Kernel_stride>=1
TR = MIN(output_h,TR);
TC = MIN(((OnChipIB_Width-l.size)/l.stride+1),Tc);
TC = MIN(output_w,TC);
TM = MIN(l.n,Tm);
TN = MIN(l.c,Tn);
rLoops = (int)ceil(((float)output_h)/TR);
cLoops = (int)ceil(((float)output_w)/TC);
mLoops = (int)ceil(((float)l.n)/TM);
nLoops = (int)ceil(((float)l.c)/TN);
INPUTQ = inputQ[offset_index];
if(i==26)
INPUTQ = inputQ[12];
time1 = what_time_is_it_now();
YOLO2_FPGA(in_ptr[i],out_ptr[i],woffset/2,boffset/2,
l.c,l.n,l.size,
l.stride,l.w,l.h,output_w,output_h,
l.pad,l.activation==LEAKY?1:0,l.batch_normalize?1:0,
TM,TN,TR,TC,
mLoops,nLoops,rLoops,cLoops,0,
INPUTQ,inputQ[offset_index+1],weightQ[offset_index],betaQ[offset_index],
WEIGHT_BASE,BETA_BASE);
time2 = what_time_is_it_now();
//printf("Predicted in %f seconds.\n",time2 - time1);
time_sum += (time2 - time1);
woffset += weight_offset[offset_index];
boffset += beta_offset[offset_index];
offset_index++;
break;
case MAXPOOL:
//printf("outputMemory:%8d;max %d x %d / %d %4d x%4d x%4d -> %4d x%4d x%4d\n",l.outputs, l.size, l.size, l.stride, l.w, l.h, l.c, l.out_w, l.out_h, l.out_c);
output_w = l.out_h;
output_h = l.out_w;
TR = MIN(((OnChipIB_Height-l.size)/l.stride+1),Tr);//keep Kernel_stride>=1
TC = MIN(((OnChipIB_Width-l.size)/l.stride+1),Tc);
TR = MIN(output_h,TR);
TC = MIN(output_w,TC);
TM = MIN(Tm,Tn);
TM = MIN(l.c,TM);
TN = TM;
rLoops = (int)ceil(((float)output_h)/TR);
cLoops = (int)ceil(((float)output_w)/TC);
mLoops = (int)ceil(((float)l.c)/TM);
time1 = what_time_is_it_now();
YOLO2_FPGA(in_ptr[i],out_ptr[i],NULL,NULL,l.c,l.c,
l.size,l.stride,l.w,l.h,output_w,output_h,
0,0,0,TM,TN,TR,TC,mLoops,1,rLoops,cLoops,1,
inputQ[offset_index],inputQ[offset_index],INTERWIDTH,INTERWIDTH,
WEIGHT_BASE,BETA_BASE);
time2 = what_time_is_it_now();
//printf("Predicted in %f seconds.\n",time2 - time1);
time_sum += (time2 - time1);
break;
case REORG:
//printf("outputMemory:%8d;reorg /%2d %4d x%4d x%4d -> %4d x%4d x%4d\n",l.outputs, l.stride, l.w, l.h, l.c, l.out_w, l.out_h, l.out_c);
output_w = 26;
output_h = 32*13;
TR = MIN(((OnChipIB_Height-l.stride)/l.stride+1),Tr);//keep Kernel_stride>=1
TR = MIN(output_h,TR);
TC = MIN(((OnChipIB_Width-l.stride)/l.stride+1),Tc);
TC = MIN(output_w,TC);
TM = 4;
TN = TM;
rLoops = (int)ceil(((float)output_h)/TR);
cLoops = (int)ceil(((float)output_w)/TC);
mLoops = 1;
time1 = what_time_is_it_now();
YOLO2_FPGA(in_ptr[i],out_ptr[i],NULL,NULL,1,4,
l.stride,l.stride,52,32*26,output_w,output_h,
0,0,0,TM,TN,TR,TC,mLoops,1,rLoops,cLoops,2,
inputQ[offset_index],inputQ[offset_index],INTERWIDTH,INTERWIDTH,
WEIGHT_BASE,BETA_BASE);
time2 = what_time_is_it_now();
//printf("Predicted in %f seconds.\n",time2 - time1);
time_sum += (time2 - time1);
break;
case ROUTE:
//printf("outputMemory:%8d;route ",l.outputs);
for(j = 0; j < l.n; ++j){
printf(" %d", l.input_layers[j]);
}
//printf("\n");
break;
case REGION:
// first=time(NULL);
time1 = what_time_is_it_now();
//printf("outputMemory:%8d;Detection\n",l.outputs);
copy_dev2mem((uint8_t *)region_input_buffer,13*13*432*4/2, in_ptr[i]);
bool NextPixelFlag = true;
int OutputPixelOffset = 0;
short current_p,next_p,output_p;
int *Output_ptr = (int *)(region_input_buffer);
for(j=0;j<l.outputs;j++)
{
if(NextPixelFlag)
{
int tmp_p = Output_ptr[OutputPixelOffset];
OutputPixelOffset++;
current_p = tmp_p;
next_p = tmp_p >> 16;
output_p = current_p;
NextPixelFlag = false;
}else
{
output_p = next_p;
NextPixelFlag = true;
}
region_buf[j] = output_p*LastLayerOutputPara;
}
netp.input = region_buf;
//netp.input = in_ptr[i];
forward_region_layer(l,netp);
time2 = what_time_is_it_now();
//printf("Predicted in %f seconds.\n",time2 - time1);
time_sum += (time2 - time1);
break;
}
netp.input = l.output;
}
//printf("TIME_SUM Predicted in %f seconds.\n",time_sum);
*net = orig;
free(region_input_buffer);
free(region_buf);
// free(Memory_buf);
// free(Weight_buf);
// free(Alpha_buf);
// free(Beta_buf);
}
//////////////////////////HLS end
#endif
|
GB_unaryop__minv_uint8_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint8_uint16
// op(A') function: GB_tran__minv_uint8_uint16
// C type: uint8_t
// A type: uint16_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 8)
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 8) ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT8 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint8_uint16
(
uint8_t *restrict Cx,
const uint16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint8_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
TriMesh.h | #ifndef TRIMESH_H
#define TRIMESH_H
/*
Szymon Rusinkiewicz
Princeton University
TriMesh.h
Class for triangle meshes.
*/
#define LARGENUM 10000000.0
#define ONE 1
#define CURVATURE 2
#define NOISE 3
#define EPS 1e-6
//#define SPEEDTYPE ONE
#include "Vec.h"
#include "Color.h"
#include "KDtree.h"
#include "math.h"
#include <vector>
#include <list>
#include <map>
#include <limits>
#include <iostream>
#include <fstream>
// SHIREEN
#include <iterator> // -- PM
#include <vnl/vnl_math.h>
#include <vnl/vnl_sparse_matrix.h>
#include <vnl/algo/vnl_svd.h>
#include <vnl/algo/vnl_sparse_lu.h>
#include <vcl_legacy_aliases.h>
//#define SHOW_WARNING 1
#define NUM_THREADS 8
#ifdef MP_USE_OPENMP
#include <omp.h> // -- PM
#endif
// end SHIREEN
// Praful
#include <vgl/algo/vgl_homg_operators_2d.h>
#include <vgl/vgl_conic.h>
#include <vnl/vnl_matrix.h>
#include <vnl/vnl_vector.h>
#include <vnl/algo/vnl_matrix_inverse.h>
#include <string>
#include <fstream>
#include <cstdlib>
#include <vcl_compiler.h>
// Praful end
// itk files to generate Face Index -- PM
#include "itkImage.h"
#include "itkImageRegionConstIteratorWithIndex.h"
#include "itkImageRegionIteratorWithIndex.h"
#include "itkImageFileWriter.h"
#include "itkTimeProbe.h"
#include "itkResampleImageFilter.h"
#include "itkIdentityTransform.h"
#include "itkLinearInterpolateImageFunction.h"
#include "itkBSplineInterpolateImageFunction.h"
// sets for the face index set
#include <set>
// Danny Perry's functor
#include "map.h"
///* Prateep */
//// include alglib headers
//#include "alglib/ap.h"
//#include "alglibinternal.h"
//#include "alglibmisc.h"
//#include "solvers.h"
//#include "optimization.h"
//#include "interpolation.h"
typedef float PixelType;
//using std::set; // -- PM
// end SHIREEN
using std::vector;
using std::map;
// SHIREEN
#include <algorithm>
#define PI 3.141592653589793
#ifndef MIN
#define MIN(a,b) ((a)<(b))?(a):(b)
#endif
#ifndef MAX
#define MAX(a,b) ((a)>(b))?(a):(b)
#endif
static int ITER_BLOCK = 0;
/*Prateep */
template<class TIn, class TOut, class Mesh>
struct MapFunctor
{
typedef typename TIn::ConstPointer TInP;
typedef typename TOut::Pointer TOutP;
typedef typename TOut::RegionType OutRegion;
typedef typename TIn::PixelType TPix;
// Store size, origin and spacing of super-voxel (Filled in getFaceIndexMap)
float supVoxelOrigin[3];
float supVoxelSpacing[3];
int supVoxelSize[3];
// super voxel face list
map<int, vector<int> > superVoxelFaceList;
Mesh mesh;
TOutP out_;
MapFunctor(TOutP out) : out_(out) {}
void operator()(const TInP &in, const OutRegion & threadRegion)
{
ITER_BLOCK = ITER_BLOCK + 1;
std::cout << "Iteration : " << ITER_BLOCK << std::endl;
typedef itk::ImageRegionConstIteratorWithIndex<TIn> It;
It itI(in, threadRegion);
for(itI.GoToBegin(); !itI.IsAtEnd(); ++itI) {
if(itI.Get() == 1)
{
point tmPoint;
typename TIn::PointType itkPoint;
in->TransformIndexToPhysicalPoint(itI.GetIndex(), itkPoint);
for(int i = 0; i < 3; i++) { tmPoint[i] = itkPoint[i]; }
// Get neartest k vertices
vector<int> adjFaces; adjFaces.clear();
vector<int>::iterator adjFacesIt;
// find triangles enclosed inside each supervoxel
int tmpInd = mesh.physicalPointToLinearIndex(tmPoint, supVoxelOrigin, supVoxelSpacing, supVoxelSize);
for(vector<int>::iterator it = superVoxelFaceList[tmpInd].begin(); it != superVoxelFaceList[tmpInd].end(); it++) {
adjFaces.push_back((*it));
}
// std::cout << "Number of neighbors : " << adjFaces.size() << std::endl;
if(adjFaces.empty() )
{
// We can either abort here or ignore the voxel
// typename TIn::IndexType ind = itI.GetIndex();
// std::cout << "-1 : " << ind[0] << ' ' << ind[1] << ' ' << ind[2] << std::endl;
out_->SetPixel(itI.GetIndex(), -1);
} else {
//std::cout << "Adjacent faces : " << this->adjacentfaces[imatch].size() << std::endl;
double minDist = LARGENUM;
int fid = -1;
for(adjFacesIt = adjFaces.begin(); adjFacesIt != adjFaces.end(); adjFacesIt++) {
point projPoint;
double dist = mesh.pointTriangleDistance(tmPoint, mesh.faces[*(adjFacesIt)], projPoint);
if(dist + EPS <= minDist) {
minDist = dist;
fid = *(adjFacesIt);
}
}
out_->SetPixel(itI.GetIndex(), fid);
adjFaces.clear();
}
} else {
// typename TIn::IndexType ind = itI.GetIndex();
// std::cout << "-1 : " << ind[0] << ' ' << ind[1] << ' ' << ind[2] << std::endl;
out_->SetPixel(itI.GetIndex(), -1);
}
}
}
};
/* Prateep */
template<class TIn, class TOut, class Mesh>
struct MapFunctorKDtree
{
typedef typename TIn::ConstPointer TInP;
typedef typename TOut::Pointer TOutP;
typedef typename TOut::RegionType OutRegion;
typedef typename TIn::PixelType TPix;
Mesh mesh;
KDtree *kd;
TOutP out_;
MapFunctorKDtree(TOutP out) : out_(out) {}
void setKD() {
kd = new KDtree(mesh.vertices);
}
void operator()(const TInP &in, const OutRegion & threadRegion)
{
ITER_BLOCK = ITER_BLOCK + 1;
std::cout << "Iteratio : " << ITER_BLOCK << std::endl;
typedef itk::ImageRegionConstIteratorWithIndex<TIn> It;
It itI(in, threadRegion);
for(itI.GoToBegin(); !itI.IsAtEnd(); ++itI)
{
if(itI.Get() == 1)
{
point tmPoint;
itk::Image<PixelType, 3>::PointType itkPoint;
in->TransformIndexToPhysicalPoint(itI.GetIndex(), itkPoint);
for(int i = 0; i < 3; i++) { tmPoint[i] = itkPoint[i]; }
// Get neartest vertex
const float *match = kd->closest_to_pt( tmPoint, 10.0 * sqr( mesh.getMaximumEdgeLength() ) );
if(!match)
{
out_->SetPixel(itI.GetIndex(), -1);
} else {
int imatch = (match - (const float*) &(mesh.vertices[0][0])) / 3;
//std::cout << "Adjacent faces : " << mesh.adjacentfaces[imatch].size() << std::endl;
vector<int> adjFaces; adjFaces.clear();
vector<int>::iterator adjFacesIt;
// Check one-ring to get list of adjacent faces
for(size_t f = 0; f < mesh.adjacentfaces[imatch].size(); f++)
{
adjFaces.push_back(mesh.adjacentfaces[imatch][f]);
}
int fid = 0;
double minDist = LARGENUM;
for(adjFacesIt = adjFaces.begin(); adjFacesIt != adjFaces.end(); adjFacesIt++) {
point projPoint;
double dist = mesh.pointTriangleDistance(tmPoint, mesh.faces[*(adjFacesIt)], projPoint);
if(dist + EPS <= minDist) {
minDist = dist;
fid = *(adjFacesIt);
}
}
out_->SetPixel(itI.GetIndex(), fid);
adjFaces.clear();
}
} else {
out_->SetPixel(itI.GetIndex(), -1);
}
}
}
};
class TriMesh {
protected:
static bool read_helper(const char *filename, TriMesh *mesh);
public:
// Types
struct Face {
int v[3];
float speedInv;
float T[3];
vec3 edgeLens; // edge length for 01, 12, 20
Face() {}
Face(const int &v0, const int &v1, const int &v2)
{
v[0] = v0; v[1] = v1; v[2] = v2;
}
Face(const int *v_)
{
v[0] = v_[0]; v[1] = v_[1]; v[2] = v_[2];
}
int &operator[] (int i) { return v[i]; }
const int &operator[] (int i) const { return v[i]; }
operator const int * () const { return &(v[0]); }
operator const int * () { return &(v[0]); }
operator int * () { return &(v[0]); }
int indexof(int v_) const
{
return (v[0] == v_) ? 0 :
(v[1] == v_) ? 1 :
(v[2] == v_) ? 2 : -1;
}
};
class BBox {
public:
point min, max;
bool valid;
// Construct as empty
BBox() : min(point(std::numeric_limits<float>::max(),
std::numeric_limits<float>::max(),
std::numeric_limits<float>::max())),
max(point(std::numeric_limits<float>::min(),
std::numeric_limits<float>::min(),
std::numeric_limits<float>::min())),
valid(false)
{}
// Initialize to one point or two points
BBox(const point &p) : min(p), max(p), valid(true)
{}
BBox(const point &min_, const point &max_) :
min(min_), max(max_), valid(true)
{}
// Mark invalid
void clear()
{
min = point(std::numeric_limits<float>::max(),
std::numeric_limits<float>::max(),
std::numeric_limits<float>::max());
max = point(std::numeric_limits<float>::min(),
std::numeric_limits<float>::min(),
std::numeric_limits<float>::min());
valid = false;
}
// Return center point and (vector) diagonal
point center() const { return 0.5f * (min+max); }
vec size() const { return max - min; }
// Grow a bounding box to encompass a point
BBox &operator += (const point &p)
{ min.min(p); max.max(p); return *this; }
BBox &operator += (const BBox &b)
{ min.min(b.min); max.max(b.max); return *this; }
// The following appear to be necessary for Visual Studio,
// despite the fact that the operators shouldn't need
// to be friends...
friend const TriMesh::BBox operator + (const TriMesh::BBox &b, const point &p);
friend const TriMesh::BBox operator + (const point &p, const TriMesh::BBox &b);
friend const TriMesh::BBox operator + (const TriMesh::BBox &b1, const TriMesh::BBox &b2);
};
/*
struct BBox {
point min, max;
point center() const { return 0.5f * (min+max); }
vec size() const { return max - min; }
bool valid;
BBox() : valid(false)
{}
};
*/
struct BSphere {
point center;
float r;
bool valid;
BSphere() : valid(false)
{}
};
// Enums
enum tstrip_rep { TSTRIP_LENGTH, TSTRIP_TERM };
enum { GRID_INVALID = -1 };
//enum speed_type { ONE = 0, CURVATURE, NOISE };
// The basics: vertices and faces
vector< point > vertices;
vector<Face> faces;
int speedType;
// SHIREEN
// Face Index Map -- PM
typedef int VoxelIndexType;
map<VoxelIndexType, vector<int> > faceIndexMap;
// map< face, ...> didnot work
//map<Face, double > areaInvPerTri;
//map<Face, double > areaPerTri; // shireen
// map<Face,int> faceids;
// std::vector <double> areaPerTri;
// std::vector <double> areaInvPerTri;
// Store the size and index of the image domain (Filled in getFaceIndexMap)
float imageSpacing[3];
float imageOrigin[3];
int imageSize[3];
int imageIndex[3];
int number_of_voxels;
int number_of_subvoxels;
// end SHIREEN
// Triangle strips
vector<int> tstrips;
// Grid, if present
vector<int> grid;
int grid_width, grid_height;
// Other per-vertex properties
vector<Color> colors;
vector<float> confidences;
vector<unsigned> flags;
unsigned flag_curr;
// Computed per-vertex properties
vector<vec> normals;
vector<vec> pdir1, pdir2;
vector<float> curv1, curv2;
vector<float> abs_curv;
vector< Vec<4,float> > dcurv;
vector<vec> cornerareas;
vector<float> pointareas;
KDtree *kd;
double maxEdgeLength;
vector< map<unsigned int, float> > geodesicMap;
float *geodesic;
vector< vector<float> > features;
vector < vector<point> > featureGradients; //Praful - load more accurate gradient on vertices using volume for use in shapeworks
// Bounding structures
BBox bbox;
BSphere bsphere;
// Connectivity structures:
// For each vertex, all neighboring vertices
vector< vector<int> > neighbors;
// For each vertex, all neighboring faces
vector< vector<int> > adjacentfaces;
vector<double> radiusInscribe;
vector<int> getTwoNeighbors(int v){
vector<int> twoNeighbors;
// for each neighbor
for(int i=0; i < this->neighbors[v].size(); i++){
// add self
int n = this->neighbors[v][i];
twoNeighbors.push_back(n);
// add neighbors
for(int j=0; j < this->neighbors[n].size(); j++)
twoNeighbors.push_back( this->neighbors[n][j] );
}
return twoNeighbors;
}
vector< vector<Face> > vertOneringFaces;
// For each face, the three faces attached to its edges
// (for example, across_edge[3][2] is the number of the face
// that's touching the edge opposite vertex 2 of face 3)
vector<Face> across_edge;
vector<float> noiseOnVert;
int getSpeedType(){
return speedType;
}
//int SPEEDTYPE;
// Compute all this stuff...
void setSpeedType(int st)
{
//ST = st;
speedType = st;
if(st == ONE){
//iMap = &geoIndex;
//dMap = &geoMap;
}
else if(st == CURVATURE){
//iMap = &adaptIndex;
//dMap = &adaptMap;
}
else{
std::cout << "Impossible SpeedType set" << std::endl;
throw(1); //exit(1337);
}
}
void need_tstrips();
void convert_strips(tstrip_rep rep);
void unpack_tstrips();
void triangulate_grid();
void need_faces()
{
if (!faces.empty())
return;
if (!tstrips.empty())
unpack_tstrips();
else if (!grid.empty())
triangulate_grid();
}
void need_faceedges();
void need_speed();
void need_noise(int nNoiseIter);
void need_oneringfaces();
void need_kdtree();
void need_maxedgelength();
void need_normals();
void need_pointareas();
void need_curvatures();
void need_abs_curvatures();
void need_dcurv();
void need_bbox();
void need_bsphere();
void need_neighbors();
void need_adjacentfaces();
void need_across_edge();
void need_meshinfo();
void need_Rinscribe();
// Input and output
static TriMesh *read(const char *filename);
bool write(const char *filename);
// Statistics
// XXX - Add stuff here
float feature_size();
// Useful queries
// XXX - Add stuff here
bool is_bdy(int v)
{
if (neighbors.empty()) need_neighbors();
if (adjacentfaces.empty()) need_adjacentfaces();
return neighbors[v].size() != adjacentfaces[v].size();
}
vec trinorm(int f)
{
if (faces.empty()) need_faces();
return ::trinorm(vertices[faces[f][0]], vertices[faces[f][1]],
vertices[faces[f][2]]);
}
// FIM: check angle for at a given vertex, for a given face
bool IsNonObtuse(int v, Face f)
{
int iV = f.indexof(v);
point A = this->vertices[v];
point B = this->vertices[f[(iV+1)%3]];
point C = this->vertices[f[(iV+2)%3]];
float a = dist(B,C);
float b = dist(A,C);
float c = dist(A,B);
float angA = 0.0; /* = acos( (b*b + c*c - a*a) / (2*b*c) )*/
if ((a > 0) && (b > 0) && (c > 0))// Manasi stack overflow
{// Manasi stack overflow
angA = acos( (b*b + c*c - a*a) / (2*b*c) );// Manasi stack overflow
}// Manasi stack overflow
//return ( angA - PI/2.0f < -0.00001 );
return ( angA < M_PI/2.0f );
}
// FIM: given a vertex, find an all-acute neighborhood of faces
void SplitFace(vector<Face> &acFaces, int v, Face cf, int nfAdj/*, int currentVert*/)
{
// get all the four vertices in order
/* v1 v4
+-------+
\ . \
\ . \
\ . \
+-------+
v2 v3 */
int iV = cf.indexof(v); // get index of v in terms of cf
int v1 = v;
int v2 = cf[(iV+1)%3];
int v4 = cf[(iV+2)%3];
iV = this->faces[nfAdj].indexof(v2); // get index of v in terms of adjacent face
int v3 = this->faces[nfAdj][(iV+1)%3];
// create faces (v1,v3,v4) and (v1,v2,v3), check angle at v1
Face f1(v1,v3,v4);
//f1.T[f1.indexof(v1)] = this->vertT[currentVert][v1];
//f1.T[f1.indexof(v3)] = this->vertT[currentVert][v3];
//f1.T[f1.indexof(v4)] = this->vertT[currentVert][v4];
Face f2(v1,v2,v3);
//f2.T[f2.indexof(v1)] = this->vertT[currentVert][v1];
//f2.T[f2.indexof(v2)] = this->vertT[currentVert][v2];
//f2.T[f2.indexof(v3)] = this->vertT[currentVert][v3];
if (IsNonObtuse(v,f1))
{
//switch (SPEEDTYPE)
switch(speedType)
{
case CURVATURE:
/*
f1.speedInv = ( abs(curv1[f1[0]] + curv2[f1[0]]) +
abs(curv1[f1[1]] + curv2[f1[1]]) +
abs(curv1[f1[2]] + curv2[f1[2]]) ) / 6;
*/
f1.speedInv = ( abs_curv[f1[0]] +
abs_curv[f1[1]] +
abs_curv[f1[2]] ) / 3.0;
break;
case ONE:
f1.speedInv = 1.0;
break;
case NOISE:
f1.speedInv = (noiseOnVert[f1[0]] +
noiseOnVert[f1[1]] +
noiseOnVert[f1[2]]) / 3;
default:
f1.speedInv = 1.0;
break;
}
vec3 edge01 = (vec3)(vertices[f1[1]] - vertices[f1[0]]);
vec3 edge12 = (vec3)(vertices[f1[2]] - vertices[f1[1]]);
vec3 edge20 = (vec3)(vertices[f1[0]] - vertices[f1[2]]);
f1.edgeLens[0] =sqrt(edge01[0]*edge01[0] + edge01[1]*edge01[1] + edge01[2]*edge01[2]);
f1.edgeLens[1] =sqrt(edge12[0]*edge12[0] + edge12[1]*edge12[1] + edge12[2]*edge12[2]);
f1.edgeLens[2] =sqrt(edge20[0]*edge20[0] + edge20[1]*edge20[1] + edge20[2]*edge20[2]);
acFaces.push_back(f1);
}
else
{
int nfAdj_new = this->across_edge[nfAdj][this->faces[nfAdj].indexof(v2)];
if (nfAdj_new > -1)
{
SplitFace(acFaces,v,f1,nfAdj_new/*, currentVert*/);
}
else
{
//printf("NO cross edge!!! Maybe a hole!!\n");
}
//SplitFace(acFaces,v,f1,nfAdj_new, currentVert);
}
if (IsNonObtuse(v,f2))
{
//switch (SPEEDTYPE)
switch(speedType)
{
case CURVATURE:
/*
f2.speedInv = ( abs(curv1[f2[0]] + curv2[f2[0]]) +
abs( curv1[f2[1]] + curv2[f2[1]]) +
abs(curv1[f2[2]] + curv2[f2[2]]) ) / 6;
*/
f2.speedInv = ( abs_curv[f2[0]] +
abs_curv[f2[1]] +
abs_curv[f2[2]] ) / 3.0;
break;
case ONE:
f2.speedInv = 1.0;
break;
case NOISE:
f2.speedInv = (noiseOnVert[f2[0]] +
noiseOnVert[f2[1]] +
noiseOnVert[f2[2]]) / 3;
break;
default:
f2.speedInv = 1.0;
break;
}
vec3 edge01 = (vec3)(vertices[f2[1]] - vertices[f2[0]]);
vec3 edge12 = (vec3)(vertices[f2[2]] - vertices[f2[1]]);
vec3 edge20 = (vec3)(vertices[f2[0]] - vertices[f2[2]]);
f2.edgeLens[0] =sqrt(edge01[0]*edge01[0] + edge01[1]*edge01[1] + edge01[2]*edge01[2]);
f2.edgeLens[1] =sqrt(edge12[0]*edge12[0] + edge12[1]*edge12[1] + edge12[2]*edge12[2]);
f2.edgeLens[2] =sqrt(edge20[0]*edge20[0] + edge20[1]*edge20[1] + edge20[2]*edge20[2]);
acFaces.push_back(f2);
}
else
{
int nfAdj_new = this->across_edge[nfAdj][this->faces[nfAdj].indexof(v4)];
if (nfAdj_new > -1)
{
SplitFace(acFaces,v,f2,nfAdj_new/*,currentVert*/);
}
else
{
//printf("NO cross edge!!! Maybe a hole!!\n");
}
//SplitFace(acFaces,v,f2,nfAdj_new,currentVert);
}
}
// FIM: one ring function
vector<Face> GetOneRing(int v/*, int currentVert*/)
{
// make sure we have the across-edge map
if (this->across_edge.empty())
this->need_across_edge();
// variables required
vector<Face> oneRingFaces;
vector<Face> t_faces;
// get adjacent faces
int naf = this->adjacentfaces[v].size();
if (!naf)
{
std::cout << "vertex " << v << " has 0 adjacent faces..." << std::endl;
}
else
{
for (int af = 0; af < naf; af++)
{
Face cf = this->faces[adjacentfaces[v][af]];
t_faces.clear();
if(IsNonObtuse(v,cf))// check angle: if non-obtuse, return existing face
{
//this->colors[cf[0]] = Color::red();
//this->colors[cf[1]] = Color::red();
//this->colors[cf[2]] = Color::red();
t_faces.push_back(cf);
}
else
{
//t_faces.push_back(cf);
int nfae = this->across_edge[this->adjacentfaces[v][af]][cf.indexof(v)];
if (nfae > -1)
{
SplitFace(t_faces,v,cf,nfae/*,currentVert*/);// if obtuse, split face till we get all acute angles
}
else
{
//printf("NO cross edge!!! Maybe a hole!!\n");
}
//SplitFace(t_faces,v,cf,nfae,currentVert);// if obtuse, split face till we get all acute angles
}
for (int tf = 0; tf < t_faces.size(); tf++)
{
//this->colors[t_faces[tf][0]] = Color::red();
//this->colors[t_faces[tf][1]] = Color::red();
//this->colors[t_faces[tf][2]] = Color::red();
oneRingFaces.push_back(t_faces[tf]);
}
}
}
//this->colors[v] = Color::green();
return oneRingFaces;
}
// FIM: initialize attributes
//typedef std::<int> ListType;
void InitializeAttributes(int currentVert , std::vector<int> seeds = vector<int>() )
{
int nv = this->vertices.size();
this->geodesic = new float[nv];
for(int v= 0; v < nv; v++){
geodesic[v] = LARGENUM;
}
// initialize seed points if present...
if (!seeds.empty()){
int ns = seeds.size();
for (int s = 0; s < ns; s++){
//this->vertMap[currentVert][seeds[s]].d = 0;
geodesic[seeds[s]] = 0;
}
}
// pre-compute faces, normals, and other per-vertex properties that may be needed
this->need_neighbors();
this->need_normals();
this->need_adjacentfaces();
this->need_across_edge();
this->need_faces();
/* HOW DO WE DO THIS USING NEW GEODESIC DATA STRUCTURE?
// for all faces: initialize per-vertex travel time and face-speed
int nf = this->faces.size();
for (int f = 0; f < nf; f++)
{
Face cf = this->faces[f];
// travel time
faces[f].T[0] = this->vertT[currentVert][cf[0]];
faces[f].T[1] = this->vertT[currentVert][cf[1]];
faces[f].T[2] = this->vertT[currentVert][cf[2]];
}
*/
}
// FIM: Remove data lingering from computation
void CleanupAttributes(int currentVert)
{
delete [] this->geodesic;
}
/* Prateep */
void WriteFaceIndexMap(const char* outfilename)
{
std::ofstream fout(outfilename, std::ios::out);
map<VoxelIndexType, vector<int> >::iterator faceIndexMapIt;
vector<int>::iterator faceIndexIt;
for(faceIndexMapIt = this->faceIndexMap.begin(); faceIndexMapIt != this->faceIndexMap.end(); faceIndexMapIt++)
{
fout << (int) (*faceIndexMapIt).first << ": ";
for(faceIndexIt = faceIndexMapIt->second.begin(); faceIndexIt != faceIndexMapIt->second.end(); faceIndexIt++) {
fout << (*faceIndexIt) << " ";
}
fout << std::endl;
}
fout.close();
}
/* Prateep */
void ReadFaceIndexMap(const char* infilename)
{
std::ifstream infile(infilename);
if(!infile.is_open())
{
std::cout << "File Not Found:" << infilename << std::endl;
}
else
{
std::cout << "reading face indices from " << infilename << std::endl;
// map<VoxelIndexType, set<int> > tmpFaceIndexMap;
std::string line;
while(infile)
{
getline(infile, line);
std::stringstream ss(line);
VoxelIndexType index;
char delim;
ss >> index >> delim;
int fid;
while(ss >> fid) {
this->faceIndexMap[index].push_back(fid);
// tmpFaceIndexMap[index].insert(fid);
}
}
// if(tmpFaceIndexMap.size() != 0 )
// {
// this->faceIndexMap = tmpFaceIndexMap;
// }
// tmpFaceIndexMap.clear(); // clear memory
infile.close();
}
}
void ClearFaceIndexMap()
{
this->faceIndexMap.clear();
}
/* Prateep */
float getMaximumEdgeLength()
{
double s = 0.0f;
for(unsigned int f = 0; f < this->faces.size(); f++)
{
if(s < this->faces[f].edgeLens[0]) { // 01
s = this->faces[f].edgeLens[0];
}
if(s < this->faces[f].edgeLens[1]) { // 12
s = this->faces[f].edgeLens[1];
}
if(s < this->faces[f].edgeLens[2]) { // 20
s = this->faces[f].edgeLens[2];
}
}
return s;
}
/* Prateep */
void physicalPointToXYZ(point x, VoxelIndexType* imageX) // physical to image coordinates
{
imageX[0] = static_cast<VoxelIndexType> ( (x[0] - this->imageOrigin[0]) / this->imageSpacing[0] );
imageX[1] = static_cast<VoxelIndexType> ( (x[1] - this->imageOrigin[1]) / this->imageSpacing[1] );
imageX[2] = static_cast<VoxelIndexType> ( (x[2] - this->imageOrigin[2]) / this->imageSpacing[2] );
}
void physicalPointToXYZ(point x, VoxelIndexType* imageX, float imageOrigin[3], float imageSpacing[3])
{
imageX[0] = static_cast<VoxelIndexType> ( (x[0] - imageOrigin[0]) / imageSpacing[0] );
imageX[1] = static_cast<VoxelIndexType> ( (x[1] - imageOrigin[1]) / imageSpacing[1] );
imageX[2] = static_cast<VoxelIndexType> ( (x[2] - imageOrigin[2]) / imageSpacing[2] );
}
/* Prateep */
point indexToPhysicalPoint(VoxelIndexType* imageX)
{
point ret;
ret[0] = static_cast<float> ( imageX[0] * this->imageSpacing[0] + this->imageOrigin[0] );
ret[1] = static_cast<float> ( imageX[1] * this->imageSpacing[1] + this->imageOrigin[1] );
ret[2] = static_cast<float> ( imageX[2] * this->imageSpacing[2] + this->imageOrigin[2] );
return ret;
}
/* Prateep */
point indexToPhysicalPoint(VoxelIndexType* imageX, float origin[3], float spacing[3])
{
point ret;
ret[0] = static_cast<float> ( imageX[0] * spacing[0] + origin[0] );
ret[1] = static_cast<float> ( imageX[1] * spacing[1] + origin[1] );
ret[2] = static_cast<float> ( imageX[2] * spacing[2] + origin[2] );
return ret;
}
/* Prateep */
bool isInsideImageBuffer(itk::Image<int,3>::IndexType ind)
{
if(ind[0] >= this->imageIndex[0] && ind[0] < this->imageSize[0] &&
ind[1] >= this->imageIndex[1] && ind[1] < this->imageSize[1] &&
ind[2] >= this->imageIndex[2] && ind[2] < this->imageSize[2]
) {
return true;
}
else {
return false;
}
}
// SHIREEN
VoxelIndexType physicalPointToLinearIndex(point x)
{
VoxelIndexType imageX[3];
this->physicalPointToXYZ(x,imageX,this->imageOrigin, this->imageSpacing);
VoxelIndexType linearIndX = this->indexToLinearIndex(imageX, this->imageSize);
return linearIndX;
}
VoxelIndexType physicalPointToLinearIndex(point x, float imageOrigin[3], float imageSpacing[3], int imageSize[3])
{
VoxelIndexType imageX[3];
this->physicalPointToXYZ(x,imageX,imageOrigin,imageSpacing);
VoxelIndexType linearIndX = this->indexToLinearIndex(imageX, imageSize);
return linearIndX;
}
void linearIndexToXYZ(VoxelIndexType linearIndX, VoxelIndexType* imageX, int imageSize[3])
{
// convert linear index to r, c, s
imageX[2] = linearIndX / (imageSize[0]*imageSize[1]); // slice value (Note: integer division)
linearIndX %= (imageSize[0]*imageSize[1]);
imageX[1] = linearIndX / imageSize[0]; // column value (Note: integer division)
imageX[0] = linearIndX % imageSize[0]; // row value
}
void linearIndexToXYZ(VoxelIndexType linearIndX, VoxelIndexType* imageX)
{
// convert linear index to r, c, s
imageX[2] = linearIndX / (imageSize[0]*imageSize[1]); // slice value (Note: integer division)
linearIndX %= (imageSize[0]*imageSize[1]);
imageX[1] = linearIndX / imageSize[0]; // column value (Note: integer division)
imageX[0] = linearIndX % imageSize[0]; // row value
}
VoxelIndexType indexToLinearIndex(VoxelIndexType* imageX, int imageSize[3])
{
VoxelIndexType linearIndX = imageX[0] + imageX[1] * imageSize[0] + imageX[2] * imageSize[0] * imageSize[1];
return linearIndX;
}
VoxelIndexType indexToLinearIndex(VoxelIndexType* imageX)
{
VoxelIndexType linearIndX = imageX[0] + imageX[1] * imageSize[0] + imageX[2] * imageSize[0] * imageSize[1];
return linearIndX;
}
point linearIndexToPhysicalPoint(VoxelIndexType linearIndX, float imageOrigin[3], float imageSpacing[3], int imageSize[3])
{
VoxelIndexType imageX[3];
this->linearIndexToXYZ(linearIndX, imageX, imageSize);
point p = this->indexToPhysicalPoint(imageX, imageOrigin, imageSpacing);
return p;
}
point linearIndexToPhysicalPoint(VoxelIndexType linearIndX)
{
VoxelIndexType imageX[3];
this->linearIndexToXYZ(linearIndX, imageX, imageSize);
point p = this->indexToPhysicalPoint(imageX, imageOrigin, imageSpacing);
return p;
}
// end SHIREEN
/* Prateep
* http://www.geometrictools.com/Documentation/DistancePoint3Triangle3.pdf
* ^t
* \ |
* \reg2|
* \ |
* \ |
* \ |
* \|
* *P2
* |\
* | \
* reg3 | \ reg1
* | \
* |reg0\
* | \
* | \ P1
* -------*-------*------->s
* |P0 \
* reg4 | reg5 \ reg6
*/
double pointTriangleDistance(point P, Face face, point& PP)
{
// rewrite vertices in normal form
point B = this->vertices[face.v[0]];
point E0 = this->vertices[face.v[1]] - B;
point E1 = this->vertices[face.v[2]] - B;
point D = B - P;
float a = E0 DOT E0;
float b = E0 DOT E1;
float c = E1 DOT E1;
float d = E0 DOT D;
float e = E1 DOT D;
float f = D DOT D;
float det = a*c - b*b;
float s = b*e - c*d;
float t = b*d - a*e;
float distSqr = 0.0f;
if(s+t <= det) {
if(s < 0) {
if(t < 0) {
// region 4
if(d < 0) {
t = 0;
if(-d >= a) {
s = 1.0;
distSqr = a + 2.0f*d + f;
} else {
s = -d/a;
distSqr = d*s + f;
}
} else {
s = 0.0f;
if(e >= 0.0f) {
t = 0.0f;
distSqr = f;
} else {
if(-e >= c) {
t = 1.0f;
distSqr = c + 2.0f*e + f;
} else {
t = -e/c;
distSqr = e*t + f;
}
}
} // end of region 4
} else {
// region 3
s = 0.0f;
if(e >= 0.0f) {
t = 0.0f;
distSqr = f;
} else {
if(-e >= c) {
t = 1.0f;
distSqr = c + 2.0f*e + f;
} else {
t = -e/c;
distSqr = e*t + f;
}
}
} // end of region 3
} else {
if(t < 0.0f) {
// region 5
t = 0.0f;
if (d >= 0.0f) {
s = 0.0f;
distSqr = f;
} else {
if(-d >= a) {
s = 1.0f;
distSqr = a + 2*d + f;
} else {
s = -d/a;
distSqr = d*s + f;
}
}
// end of region 5
} else {
// region 0
float invDet = 1.0f/det;
s *= invDet;
t *= invDet;
distSqr = s * (a*s + b*t + 2*d) + t*(b*s + c*t + 2*e) + f;
// end of region 0
}
}
} else {
if(s < 0.0f) {
// region 2
float tmp0 = b+d;
float tmp1 = c+e;
if(tmp1 > tmp0) {
float numer = tmp1 - tmp0;
float denom = a - 2*b + c;
if(numer >= denom) {
s = 1.0f;
t = 0.0f;
distSqr = a + 2*d + f;
} else {
s = numer / denom;
t = 1.0 - s;
distSqr = s*(a*s + b*t + 2*d) + t*(b*s + c*t + 2*e) + f;
}
} else {
s = 0.0f;
if(tmp1 <= 0.0f) {
t = 1.0f;
distSqr = c + 2*e + f;
} else {
if(e >= 0.0f) {
t = 0.0f;
distSqr = f;
} else {
t = -e/c;
distSqr = e*t+f;
}
}
}
// end of region 2
} else {
if(t < 0) {
// region 6
float tmp0 = b + e;
float tmp1 = a + d;
if(tmp1 > tmp0) {
float numer = tmp1 - tmp0;
float denom = a-2*b+c;
if(numer >= denom) {
t = 1.0f;
s = 0.0f;
distSqr = c + 2*e + f;
} else {
t = numer / denom;
s = 1.0 - t;
distSqr = s*(a*s + b*t + 2*d) + t*(b*s + c*t + 2*e) + f;
}
} else {
t = 0.0f;
if(tmp1 <= 0.0f) {
s = 1.0f;
distSqr = a + 2*d + f;
} else {
if(d >= 0.0f) {
s = 0.0f;
distSqr = f;
} else {
s = -d/a;
distSqr = d*s + f;
}
}
}
// end of region 6
} else {
// region 1
float numer = c + e - b - d;
if(numer <= 0) {
s = 0.0f;
t = 1.0f;
distSqr = c + 2*e + f;
} else {
float denom = a - 2*b + c;
if(numer >= denom) {
s = 1.0f;
t = 0.0f;
distSqr = a + 2*d + f;
} else {
s = numer / denom;
t = 1.0f-s;
distSqr = s*(a*s + b*t + 2*d) + t*(b*s + c*t + 2*e) + f;
}
}
// end of region 1
}
}
}
if (distSqr < 0.0f) distSqr = 0.0f;
float dist = std::sqrt(distSqr);
PP = B + s * E0 + t * E1;
return dist;
}
/* Prateep */
int pointTriangleRegion(point P, Face face)
{
// rewrite vertices in normal form
point B = this->vertices[face.v[0]];
point E0 = this->vertices[face.v[1]] - B;
point E1 = this->vertices[face.v[2]] - B;
point D = B - P;
float a = E0 DOT E0;
float b = E0 DOT E1;
float c = E1 DOT E1;
float d = E0 DOT D;
float e = E1 DOT D;
float f = D DOT D;
float det = a*c - b*b;
float s = b*e - c*d;
float t = b*d - a*e;
float distSqr = 0.0f;
int region;
if(s+t <= det) {
if(s < 0) {
if(t < 0) {
// region 4
region = 4;
// end of region 4
} else {
// region 3
region = 3;
// end of region 3
}
} else if(t < 0.0f) {
// region 5
region = 5;
// end of region 5
} else {
// region 0
region = 0;
// end of region 0
}
} else {
if(s < 0.0f) {
// region 2
region = 2;
// end of region 2
} else {
if(t < 0) {
// region 6
region = 6;
// end of region 6
} else {
// region 1
region = 1;
// end of region 1
}
}
}
return region;
}
/* Prateep */
void generateFaceIndexMapViaKDtree(itk::Image<PixelType, 3>::ConstPointer narrowBand, int number_of_subvoxels = 1,
int num_threads = 1, std::string debug_prefix = "")
{
if( !this->kd ) this->need_kdtree();
this->faceIndexMap.clear();
this->number_of_subvoxels = number_of_subvoxels;
const double eps = 1e-10;
// inline DeepCopy
itk::Image<PixelType, 3>::Pointer OutputImage = itk::Image<PixelType, 3>::New();
OutputImage->SetRegions( narrowBand->GetLargestPossibleRegion() );
OutputImage->Allocate();
OutputImage->SetOrigin( narrowBand->GetOrigin() );
OutputImage->SetSpacing( narrowBand->GetSpacing() );
OutputImage->SetDirection( narrowBand->GetDirection() );
itk::ImageRegionConstIteratorWithIndex< itk::Image<PixelType,3> > narrowBandIt(narrowBand, narrowBand->GetLargestPossibleRegion());
itk::ImageRegionIterator< itk::Image<PixelType,3> > OutputImageIt(OutputImage, narrowBand->GetLargestPossibleRegion() );
narrowBandIt.GoToBegin();
OutputImageIt.GoToBegin();
while(!narrowBandIt.IsAtEnd() ) {
OutputImageIt.Set( narrowBandIt.Get() );
++narrowBandIt;
++OutputImageIt;
}
narrowBandIt.GoToBegin();
OutputImageIt.GoToBegin();
std::cout << "FidsViaKDTree. Starting functor ...\n";
itk::TimeProbe clock;
clock.Start();
{
typedef MapFunctorKDtree< itk::Image<PixelType,3>, itk::Image<PixelType,3>, TriMesh > FType;
FType functor(OutputImage);
// We need kdtree, so input vertices, faces and adjacentfaces. Rest are just function calls for TriMesh::. So, other members need not be past.
functor.mesh = *this;
functor.mesh.faces = this->faces;
functor.mesh.vertices = this->vertices;
functor.mesh.adjacentfaces = this->adjacentfaces;
functor.setKD();
bambam::map< itk::Image<PixelType, 3>, itk::Image<PixelType,3>, FType>::run(narrowBand, functor, num_threads);
}
clock.Stop();
std::cout << "Time taken (functor)\n";
std::cout << "Mean : " << clock.GetMean() << std::endl;
std::cout << "Total : " << clock.GetTotal() << std::endl;
std::cout << "---------------------------\n";
if(debug_prefix.compare("") != 0)
{
itk::ImageFileWriter< itk::Image<PixelType,3> >::Pointer writer = itk::ImageFileWriter< itk::Image<PixelType, 3> >::New();
std::string f = debug_prefix + ".faceInd.nrrd";
writer->SetFileName( f.c_str() );
writer->SetInput( OutputImage );
writer->SetUseCompression(true);
writer->Update();
saveFidsViaKDtreeDistanceMap(OutputImage, debug_prefix);
}
// Collect values in faceIndexMap
itk::Image<PixelType, 3>::IndexType index = OutputImage->GetLargestPossibleRegion().GetIndex();
itk::Image<PixelType, 3>::SizeType size = OutputImage->GetLargestPossibleRegion().GetSize();
itk::Image<PixelType, 3>::PointType origin = OutputImage->GetOrigin();
itk::Image<PixelType, 3>::SpacingType spacing = OutputImage->GetSpacing();
// Store origin of image domain
this->imageOrigin[0] = origin[0];
this->imageOrigin[1] = origin[1];
this->imageOrigin[2] = origin[2];
// Store spacing of image domain of the original DT
this->imageSpacing[0] = spacing[0] * number_of_subvoxels;
this->imageSpacing[1] = spacing[1] * number_of_subvoxels;
this->imageSpacing[2] = spacing[2] * number_of_subvoxels;
// Store size of image domain of the original DT
this->imageSize[0] = size[0] / number_of_subvoxels;
this->imageSize[1] = size[1] / number_of_subvoxels;
this->imageSize[2] = size[2] / number_of_subvoxels;
// collect the results from the subvoxels to the original voxels
for(unsigned int i = 0; i < size[0]; i++) { // X
for(unsigned int j = 0; j < size[1]; j++) { // Y
for(unsigned int k = 0; k < size[2]; k++) { // Z
itk::Image<PixelType, 3>::IndexType idx;
idx[0] = i; idx[1] = j; idx[2] = k;
//if((i==256) && (j==80) &&(k==256) )
// int p=0;
if(OutputImage->GetPixel(idx) > -1)
{
itk::Image<PixelType, 3>::IndexType idx2;
idx2[0] = (VoxelIndexType)floor((float)i / (float)number_of_subvoxels);
idx2[1] = (VoxelIndexType)floor((float)j / (float)number_of_subvoxels);
idx2[2] = (VoxelIndexType)floor((float)k / (float)number_of_subvoxels);
VoxelIndexType idx1 = idx2[0] + idx2[1] * this->imageSize[0] + idx2[2] * this->imageSize[0] * this->imageSize[1];
this->faceIndexMap[idx1].push_back( OutputImage->GetPixel(idx) );
}
}
}
}
std::cout << "\nLength of face Index Map " << this->faceIndexMap.size() << std::endl;
if(debug_prefix.compare("") > 0)
{
std::cout << "Now saving distance map...";
saveFidsViaKDtreeDistanceMap(OutputImage, debug_prefix);
// std::cout << "Now saving signed distance map...";
// saveFidsSignedDistanceMap(OutputImage, debug_prefix);
// std::cout << "Done\n";
}
}
/* Prateep */
void generateFaceIndexMapViaSuperVoxel(itk::Image<PixelType, 3>::ConstPointer narrowBand, itk::Image<PixelType, 3>::Pointer scaledDT,
float q, float ldelta,
int number_of_subvoxels = 1, int number_of_voxels = 1, float radiusFactor = 1.0, // search for all neighbors
int num_threads = 1, std::string debug_prefix = "", std::string debug_suffix = "", bool saveFaceIndMap = false)
{
this->faceIndexMap.clear();
this->number_of_subvoxels = number_of_subvoxels;
this->number_of_voxels = number_of_voxels;
// inline DeepCopy
itk::Image<int, 3>::Pointer OutputImage = itk::Image<int, 3>::New();
OutputImage->SetRegions( narrowBand->GetLargestPossibleRegion() );
OutputImage->Allocate();
OutputImage->SetOrigin( narrowBand->GetOrigin() );
OutputImage->SetSpacing( narrowBand->GetSpacing() );
OutputImage->SetDirection( narrowBand->GetDirection() );
itk::ImageRegionConstIteratorWithIndex< itk::Image<PixelType,3> > narrowBandIt(narrowBand, narrowBand->GetLargestPossibleRegion());
//itk::ImageRegionIteratorWithIndex< itk::Image<int,3> > narrowBandItShared(narrowBand, narrowBand->GetLargestPossibleRegion());
itk::ImageRegionIterator< itk::Image<int,3> > OutputImageIt(OutputImage, narrowBand->GetLargestPossibleRegion() );
narrowBandIt.GoToBegin();
OutputImageIt.GoToBegin();
while(!narrowBandIt.IsAtEnd() ) {
OutputImageIt.Set( narrowBandIt.Get() );
++narrowBandIt;
++OutputImageIt;
}
narrowBandIt.GoToBegin();
//narrowBandItShared.GoToBegin();
OutputImageIt.GoToBegin();
#ifdef MP_USE_OPENMP
omp_set_num_threads(6);
#endif
int nf = this->faces.size();
itk::Image<int, 3>::PointType ori = OutputImage->GetOrigin();
itk::Image<int, 3>::SpacingType sp = OutputImage->GetSpacing();
itk::Image<int, 3>::SizeType si = OutputImage->GetLargestPossibleRegion().GetSize();
float supVoxelOrigin[3];
supVoxelOrigin[0] = (float) (ori[0]);
supVoxelOrigin[1] = (float) (ori[1]);
supVoxelOrigin[2] = (float) (ori[2]);
float supVoxelSpacing[3];
supVoxelSpacing[0] = (float) (sp[0] * number_of_voxels * number_of_subvoxels);
supVoxelSpacing[1] = (float) (sp[1] * number_of_voxels * number_of_subvoxels);
supVoxelSpacing[2] = (float) (sp[2] * number_of_voxels * number_of_subvoxels);
int supVoxelSize[3];
supVoxelSize[0] = (int) ((si[0] / (float)number_of_subvoxels) / (float)number_of_voxels);
supVoxelSize[1] = (int) ((si[1] / (float)number_of_subvoxels) / (float)number_of_voxels);
supVoxelSize[2] = (int) ((si[2] / (float)number_of_subvoxels) / (float)number_of_voxels);
// super voxel face list
map<int, vector<int> > superVoxelFaceList;
// 1. Compute \sigma --> maximum physical distance for each supVoxel from isosurface
//int sigmaSV = -LARGENUM;
// // // shireen debug
// VoxelIndexType vox_ind = 2524975; //2574480; // vox_ind == 2524975 || vox_ind == 8340369)
// // VoxelIndexType voxX[3], voxXX[3];
// // this->linearIndexToXYZ(vox_ind, voxX, this->imageSize);
// // VoxelIndexType vox_ind22 = this->indexToLinearIndex(voxX, this->imageSize);
// // point vox_p = this->indexToPhysicalPoint(voxX, this->imageOrigin, this->imageSpacing);
// // this->physicalPointToXYZ(vox_p, voxXX, this->imageOrigin, this->imageSpacing);
// // VoxelIndexType vox_ind2 = this->physicalPointToLinearIndex(vox_p, this->imageOrigin, this->imageSpacing, this->imageSize);
// point vox_p = linearIndexToPhysicalPoint(vox_ind, this->imageOrigin, this->imageSpacing, this->imageSize);
// // VoxelIndexType vox_ind2;
// // vox_ind2 = this->physicalPointToLinearIndex(vox_p, this->imageOrigin, this->imageSpacing, this->imageSize);
// VoxelIndexType imageX[3];
// this->physicalPointToXYZ(vox_p, imageX, supVoxelOrigin, supVoxelSpacing);
// VoxelIndexType imageX_[3];
// this->physicalPointToXYZ(vox_p, imageX_, this->imageOrigin, this->imageSpacing);
// int iter2 = 0;
// for(int i = 0; i < supVoxelSize[0]; i++) {
// for(int j = 0; j < supVoxelSize[1]; j++) {
// for(int k = 0; k < supVoxelSize[2]; k++) {
// if(i == imageX[0] && j == imageX[1] && k ==imageX[2])
// {
// int hihi = 0;
// }
// iter2++;
// }
// }
// }
// // end shireen debug
int iter = 0;
for(int i = 0; i < supVoxelSize[0]; i++) {
for(int j = 0; j < supVoxelSize[1]; j++) {
for(int k = 0; k < supVoxelSize[2]; k++) {
// the super voxel index
VoxelIndexType p[3];
p[0] = i; p[1] = j; p[2] = k;
// converting the supervoxel index to a physical point in space
point supV = this->indexToPhysicalPoint(p, supVoxelOrigin, supVoxelSpacing);
// poit to itkpoint
itk::Image<int, 3>::PointType supVp;
for(int ii = 0; ii < 3; ii++) supVp[ii] = supV[ii];
// Get the ball center
point supVCent = supV;
for(int ii = 0; ii < 3; ii++) {
supVCent[ii] += (float) q/2.0;
}
// // shireen debug
// if(imageX[0] == i && imageX[1] == j && imageX[2] == k)
// {
// int test = 0;
// }
// VoxelIndexType sind = this->physicalPointToLinearIndex(vox_p, supVoxelOrigin, supVoxelSpacing, supVoxelSize);
// VoxelIndexType sind2 = this->physicalPointToLinearIndex(supV , supVoxelOrigin, supVoxelSpacing, supVoxelSize);
// if (sind == sind2)
// int test = 0;
// // end shireen debug
// get the subvoxel index of the supervoxel point (where is it lying in the scaled distance transform
itk::Image<PixelType,3>::IndexType supVInd;
//for(int ii = 0; ii < 3; ii++) supVInd[ii] = supV[ii];
scaledDT->TransformPhysicalPointToIndex(supVp, supVInd); // shireen, refer to the center not the corner
// get the distance (encoded in the distance transform) of this supervoxel from the mesh surface
float sigma = scaledDT->GetPixel(supVInd); // shireen debug float not int
// isotropic scaling
sigma *= scaledDT->GetSpacing()[0] * this->number_of_subvoxels;
// 2. Get ball radius
float ballRadiusSV = q + std::sqrt(ldelta*ldelta + sigma*sigma);
ballRadiusSV /= radiusFactor;
VoxelIndexType ind = this->physicalPointToLinearIndex(supV, supVoxelOrigin, supVoxelSpacing, supVoxelSize);
// 3. Get supVoxelFaceList
// #pragma omp parallel
{
// #pragma omp for
for(int f = 0; f < nf; f++) {
// if (f == 1536 || f == 4608)
// int tst = 0;
//std::cout << "Face # : " << f << std::endl;
point pp;
double d = this->pointTriangleDistance(supVCent, this->faces[f], pp);
if(d < ballRadiusSV + EPS)
{
//VoxelIndexType ind = this->physicalPointToLinearIndex(supV, supVoxelOrigin, supVoxelSpacing, supVoxelSize);
superVoxelFaceList[ind].push_back(f);
}
}
}
iter++;
if(superVoxelFaceList[ind].size() != 0)
std::cout << "iter : " << iter << " ,length(Facelist) = " << superVoxelFaceList[ind].size() << "\n";
//else
// std::cout << "iter : " << iter << "ZERO!!!!!!!!! "<< "\n";
}
}
}
/*
std::string outfilename = "tmpSize.txt";
std::ofstream fout( outfilename.c_str(), std::ios::out);
for(map<int, set<int> >::iterator it = superVoxelFaceList.begin(); it != superVoxelFaceList.end(); it++)
{
fout << (int) (*it).first << ": ";
// for(set<int>::iterator it1 = it->second.begin(); it1 != it->second.end(); it1++) {
// fout << (*it1) << " ";
// }
fout << (int) (*it).second.size();
fout << std::endl;
}
fout.close();
*/
std::cout << "Starting functor ...\n";
itk::TimeProbe clock;
clock.Start();
{
typedef MapFunctor< itk::Image<PixelType,3>, itk::Image<int,3>, TriMesh > FType;
FType functor(OutputImage);
functor.superVoxelFaceList = superVoxelFaceList;
for(int i = 0; i < 3; i++) {
functor.supVoxelOrigin[i] = supVoxelOrigin[i];
functor.supVoxelSize[i] = supVoxelSize[i];
functor.supVoxelSpacing[i] = supVoxelSpacing[i];
}
// We only need faces. Rest are just function calls for TriMesh::. So, other members need not be past.
functor.mesh = *this;
functor.mesh.faces = this->faces;
bambam::map< itk::Image<PixelType, 3>, itk::Image<int,3>, FType>::run(narrowBand, functor, num_threads);
}
clock.Stop();
std::cout << "Time taken (functor)\n";
std::cout << "Mean : " << clock.GetMean() << std::endl;
std::cout << "Total : " << clock.GetTotal() << std::endl;
std::cout << "---------------------------\n";
if(saveFaceIndMap)
{
if(debug_prefix.compare ("") != 0)
{
itk::ImageFileWriter< itk::Image<int,3> >::Pointer writer = itk::ImageFileWriter< itk::Image<int, 3> >::New();
//std::stringstream ss; ss << radiusFactor;
//std::stringstream sp; sp << this->imageSpacing[0];
std::string f = debug_prefix + ".faceInd" + debug_suffix + ".nrrd";
writer->SetFileName( f.c_str() );
writer->SetInput( OutputImage );
writer->SetUseCompression(true);
writer->Update();
//saveFidsViaSuperVoxelDistanceMap(OutputImage, debug_prefix, radiusFactor); // using the subvoxel resolution not the original one
}
}
std::cout << "Computing .fids ...";
// Collect values in faceIndexMap
itk::Image<int, 3>::IndexType index = OutputImage->GetLargestPossibleRegion().GetIndex();
itk::Image<int, 3>::SizeType size = OutputImage->GetLargestPossibleRegion().GetSize();
itk::Image<int, 3>::PointType origin = OutputImage->GetOrigin();
itk::Image<int, 3>::SpacingType spacing = OutputImage->GetSpacing();
// Store origin of image domain
this->imageOrigin[0] = origin[0];
this->imageOrigin[1] = origin[1];
this->imageOrigin[2] = origin[2];
// Store spacing of image domain
this->imageSpacing[0] = spacing[0] * number_of_subvoxels;
this->imageSpacing[1] = spacing[1] * number_of_subvoxels;
this->imageSpacing[2] = spacing[2] * number_of_subvoxels;
// Store size of image domain
this->imageSize[0] = size[0] / number_of_subvoxels;
this->imageSize[1] = size[1] / number_of_subvoxels;
this->imageSize[2] = size[2] / number_of_subvoxels;
this->imageIndex[0] = (int) (index[0] / number_of_subvoxels);
this->imageIndex[1] = (int) (index[1] / number_of_subvoxels);
this->imageIndex[2] = (int) (index[2] / number_of_subvoxels);
VoxelIndexType minIndex = this->imageIndex[0] + this->imageIndex[1] * this->imageSize[0] + this->imageIndex[2] * this->imageSize[0] * this->imageSize[1];
VoxelIndexType maxIndex = (this->imageSize[0]-1) + (this->imageSize[1]-1) * this->imageSize[0] + (this->imageSize[2]-1) * this->imageSize[0] * this->imageSize[1];
for(unsigned int i = index[0]; i < size[0]; i++) { // X
for(unsigned int j = index[1]; j < size[1]; j++) { // Y
for(unsigned int k = index[2]; k < size[2]; k++) { // Z
itk::Image<int, 3>::IndexType idx;
idx[0] = i; idx[1] = j; idx[2] = k;
if(OutputImage->GetPixel(idx) > -1) {
itk::Image<int, 3>::IndexType idx2;
idx2[0] = (int) (i / number_of_subvoxels);
idx2[1] = (int) (j / number_of_subvoxels);
idx2[2] = (int) (k / number_of_subvoxels);
for(int dx = -2; dx <= 2; dx++) {
for(int dy = -2; dy <= 2; dy++) {
for(int dz = -2; dz <= 2; dz++) {
itk::Image<int,3>::IndexType idx22;
idx22[0] = idx2[0] + dx;
idx22[1] = idx2[1] + dy;
idx22[2] = idx2[2] + dz;
if( !isInsideImageBuffer(idx22)) continue;
VoxelIndexType idx1 = idx22[0] + idx22[1] * this->imageSize[0] + idx22[2] * this->imageSize[0] * this->imageSize[1];
if(idx1 < 0) {
std::cout << "Index neg " << idx1 << std::endl;
continue;
}
// SHIREEN: when moving from set to vector for memory footprint, we need to make sure taht we are not pushing duplicate candidate faces
// otherwise the fids file will be huge (multiples of gigs)
int curf = OutputImage->GetPixel(idx);
if (std::find(this->faceIndexMap[idx1].begin(), this->faceIndexMap[idx1].end(), curf) == this->faceIndexMap[idx1].end()) // current candidate has not been pushed back before
this->faceIndexMap[idx1].push_back( curf );
// VoxelIndexType idx1 = idx2[0] + idx2[1] * this->imageSize[0] + idx2[2] * this->imageSize[0] * this->imageSize[1];
// this->faceIndexMap[idx1].insert( OutputImage->GetPixel(idx) );
}
}
}
}
}
}
}
std::cout << "Done";
std::cout << "\nLength of face Index Map " << this->faceIndexMap.size() << std::endl;
if(debug_prefix.compare("") > 0)
{
// std::cout << "Now saving distance map...";
// saveFidsDistanceMap(OutputImage, debug_prefix, radiusFactor);
std::cout << "Now saving signed distance map...";
saveFidsSignedDistanceMap(OutputImage, debug_prefix, debug_suffix, radiusFactor);
//saveFidsSignedDistanceMap(OutputImage, scaledDT, debug_prefix, debug_suffix, radiusFactor);
std::cout << "Done\n";
}
}
/* Prateep */
void generateFaceIndexMapViaSuperVoxelSerial(itk::Image<PixelType, 3>::ConstPointer narrowBand, itk::Image<PixelType, 3>::Pointer scaledDT, float q, float ldelta,
int number_of_subvoxels = 1, int number_of_voxels = 1, float radiusFactor = 1.0, // search for all neighbors
std::string debug_prefix = "")
{
this->faceIndexMap.clear();
this->number_of_subvoxels = number_of_subvoxels;
this->number_of_voxels = number_of_voxels;
const double eps = 1e-6;
// inline DeepCopy
itk::Image<int, 3>::Pointer OutputImage = itk::Image<int, 3>::New();
OutputImage->SetRegions( narrowBand->GetLargestPossibleRegion() );
OutputImage->Allocate();
OutputImage->SetOrigin( narrowBand->GetOrigin() );
OutputImage->SetSpacing( narrowBand->GetSpacing() );
OutputImage->SetDirection( narrowBand->GetDirection() );
itk::ImageRegionConstIteratorWithIndex< itk::Image<PixelType,3> > narrowBandIt(narrowBand, narrowBand->GetLargestPossibleRegion());
//itk::ImageRegionIteratorWithIndex< itk::Image<int,3> > narrowBandItShared(narrowBand, narrowBand->GetLargestPossibleRegion());
itk::ImageRegionIterator< itk::Image<int,3> > OutputImageIt(OutputImage, narrowBand->GetLargestPossibleRegion() );
narrowBandIt.GoToBegin();
OutputImageIt.GoToBegin();
while(!narrowBandIt.IsAtEnd() ) {
OutputImageIt.Set( (int) narrowBandIt.Get() );
++narrowBandIt;
++OutputImageIt;
}
narrowBandIt.GoToBegin();
//narrowBandItShared.GoToBegin();
OutputImageIt.GoToBegin();
#ifdef MP_USE_OPENMP
omp_set_num_threads(NUM_THREADS);
#endif
map<int, vector<int> > superVoxelFaceList;
int nf = this->faces.size();
itk::Image<int, 3>::PointType ori = OutputImage->GetOrigin();
itk::Image<int, 3>::SpacingType sp = OutputImage->GetSpacing();
itk::Image<int, 3>::SizeType si = OutputImage->GetLargestPossibleRegion().GetSize();
float supVoxelOrigin[3];
supVoxelOrigin[0] = (float) (ori[0]);
supVoxelOrigin[1] = (float) (ori[1]);
supVoxelOrigin[2] = (float) (ori[2]);
float supVoxelSpacing[3];
supVoxelSpacing[0] = (float) (sp[0] * number_of_voxels * number_of_subvoxels);
supVoxelSpacing[1] = (float) (sp[1] * number_of_voxels * number_of_subvoxels);
supVoxelSpacing[2] = (float) (sp[2] * number_of_voxels * number_of_subvoxels);
int supVoxelSize[3];
supVoxelSize[0] = (int) ((si[0] / (float)number_of_subvoxels) / (float)number_of_voxels);
supVoxelSize[1] = (int) ((si[1] / (float)number_of_subvoxels) / (float)number_of_voxels);
supVoxelSize[2] = (int) ((si[2] / (float)number_of_subvoxels) / (float)number_of_voxels);
/***
* ver 1. Search each face and map it onto a supervoxell
for(int f = 0; f < nf; f++)
{
// points in physical coordinates
point v0 = this->vertices[ this->faces[f].v[0] ],
v1 = this->vertices[ this->faces[f].v[1] ],
v2 = this->vertices[ this->faces[f].v[2] ];
VoxelType ind0 = this->physicalPointToLinearIndex(v0, supVoxelOrigin, supVoxelSpacing, supVoxelSize);
VoxelType ind1 = this->physicalPointToLinearIndex(v1, supVoxelOrigin, supVoxelSpacing, supVoxelSize);
VoxelType ind2 = this->physicalPointToLinearIndex(v2, supVoxelOrigin, supVoxelSpacing, supVoxelSize);
superVoxelFaceList[ind0].insert(f);
superVoxelFaceList[ind1].insert(f);
superVoxelFaceList[ind2].insert(f);
}
*/
/***
* ver 2.
*/
// 1. Compute \sigma --> maximum physical distance for each supVoxel from isosurface
//int sigmaSV = -LARGENUM;
int iter = 0;
for(int i = 0; i < supVoxelSize[0]; i++) {
for(int j = 0; j < supVoxelSize[1]; j++) {
for(int k = 0; k < supVoxelSize[2]; k++) {
VoxelIndexType p[3];
p[0] = i; p[1] = j; p[2] = k;
point supV = this->indexToPhysicalPoint(p, supVoxelOrigin, supVoxelSpacing);
point supVCent = supV;
itk::Image<int, 3>::IndexType supVInd;
for(int ii = 0; ii < 3; ii++) supVInd[ii] = supV[ii];
int sigma = scaledDT->GetPixel(supVInd);
// 2. Get ball radius
float ballRadiusSV = q + std::sqrt(ldelta*ldelta + sigma*sigma);
ballRadiusSV /= radiusFactor;
// Get center
for(int ii = 0; ii < 3; ii++) {
supVCent[ii] += (float) q/2;
}
// 3. Get supVoxelFaceList
for(int f = 0; f < nf; f++) {
//std::cout << "Face # : " << f << std::endl;
point pp;
double d = this->pointTriangleDistance(supVCent, this->faces[f], pp);
if(d < ballRadiusSV + eps)
{
VoxelIndexType ind = this->physicalPointToLinearIndex(supV, supVoxelOrigin, supVoxelSpacing, supVoxelSize);
superVoxelFaceList[ind].push_back(f);
}
}
iter++;
std::cout << "iter : " << iter << "\n";
}
}
}
/* debug - prateep */
std::string outfilename = "/home/sci/prateepm/Public/ForShireen/sphere/superVoxelList.txt";
std::ofstream fout( outfilename.c_str(), std::ios::out);
for(map<int, vector<int> >::iterator it = superVoxelFaceList.begin(); it != superVoxelFaceList.end(); it++)
{
fout << (int) (*it).first << ": ";
for(vector<int>::iterator it1 = it->second.begin(); it1 != it->second.end(); it1++) {
fout << (*it1) << " ";
}
fout << (int) (*it).second.size();
fout << std::endl;
}
fout.close();
/* debug - prateep */
int stop = 1;
int fid = -1;
int tn;
//#pragma omp parallel private(tn,fid,narrowBandIt)
{
#ifdef MP_USE_OPENMP
tn = omp_get_thread_num();
#endif
std::cout << "\nExecuting thread : " << tn << std::endl;
while( stop != 0 ) {
//#pragma omp critical
{
// Voxel is in narrow band
if(narrowBandIt.Get() == 1)
{
point tmPoint;
itk::Image<int, 3>::PointType itkPoint;
narrowBand->TransformIndexToPhysicalPoint(narrowBandIt.GetIndex(), itkPoint);
for(int i = 0; i < 3; i++) { tmPoint[i] = itkPoint[i]; }
VoxelIndexType vox_ind = this->physicalPointToLinearIndex(tmPoint, this->imageOrigin, this->imageSpacing, this->imageSize);
// if (vox_ind == 25744820) // shireen
if(vox_ind == 2501770)
int tst = 0;
// Get neartest k vertices
vector<int> adjFaces; adjFaces.clear();
vector<int>::iterator adjFacesIt;
// find triangles enclosed inside each supervoxel
VoxelIndexType tmpInd = this->physicalPointToLinearIndex(tmPoint, supVoxelOrigin, supVoxelSpacing, supVoxelSize);
// std::cout << "Super Voxel List : " << superVoxelFaceList[tmpInd].size() << std::endl;
for(vector<int>::iterator it = superVoxelFaceList[tmpInd].begin(); it != superVoxelFaceList[tmpInd].end(); it++) {
adjFaces.push_back((*it));
}
std::cout << "Number of neighbors : " << adjFaces.size() << std::endl;
if(adjFaces.empty() ) //|| adjFaces.size() == nf)
{
// We can either abort here or ignore the voxel
OutputImageIt.Set(-1);
} else {
//std::cout << "Adjacent faces : " << this->adjacentfaces[imatch].size() << std::endl;
double minDist = LARGENUM;
for(adjFacesIt = adjFaces.begin(); adjFacesIt != adjFaces.end(); adjFacesIt++) {
point projPoint;
double dist = this->pointTriangleDistance(tmPoint, this->faces[*(adjFacesIt)], projPoint);
int region = this->pointTriangleRegion(tmPoint, this->faces[*(adjFacesIt)]);
// if (vox_ind == 2501770)
// std::cout << "Adjacent faceId : " << *(adjFacesIt) << ", Dist : " << dist << ", Region : " << region << std::endl;
if(dist + eps <= minDist) {
minDist = dist;
fid = *(adjFacesIt);
}
}
OutputImageIt.Set(fid);
adjFaces.clear();
}
} else {
OutputImageIt.Set(-1);
}
++narrowBandIt;
++OutputImageIt;
}
if(narrowBandIt.IsAtEnd() || OutputImageIt.IsAtEnd())
{
stop = 0;
//#pragma omp flush(stop)
}
}
}
if(debug_prefix.compare ("") != 0)
{
itk::ImageFileWriter< itk::Image<int,3> >::Pointer writer = itk::ImageFileWriter< itk::Image<int, 3> >::New();
std::stringstream ss; ss << radiusFactor;
std::stringstream sp; sp << this->imageSpacing[0];
std::string f = debug_prefix + ".faceInd_r" + ss.str() + "_sp" + sp.str() + ".nrrd";
writer->SetFileName( f.c_str() );
writer->SetInput( OutputImage );
writer->SetUseCompression(true);
writer->Update();
}
// Collect values in faceIndexMap
itk::Image<int, 3>::IndexType index = OutputImage->GetLargestPossibleRegion().GetIndex();
itk::Image<int, 3>::SizeType size = OutputImage->GetLargestPossibleRegion().GetSize();
itk::Image<int, 3>::PointType origin = OutputImage->GetOrigin();
itk::Image<int, 3>::SpacingType spacing = OutputImage->GetSpacing();
// Store origin of image domain
this->imageOrigin[0] = origin[0];
this->imageOrigin[1] = origin[1];
this->imageOrigin[2] = origin[2];
// Store spacing of image domain
this->imageSpacing[0] = spacing[0] * number_of_subvoxels;
this->imageSpacing[1] = spacing[1] * number_of_subvoxels;
this->imageSpacing[2] = spacing[2] * number_of_subvoxels;
// Store size of image domain
this->imageSize[0] = size[0] / number_of_subvoxels;
this->imageSize[1] = size[1] / number_of_subvoxels;
this->imageSize[2] = size[2] / number_of_subvoxels;
VoxelIndexType minIndex = this->imageIndex[0] + this->imageIndex[1] * this->imageSize[0] + this->imageIndex[2] * this->imageSize[0] * this->imageSize[1];
VoxelIndexType maxIndex = (this->imageSize[0]-1) + (this->imageSize[1]-1) * this->imageSize[0] + (this->imageSize[2]-1) * this->imageSize[0] * this->imageSize[1];
for(unsigned int i = index[0]; i < size[0]; i++) { // X
for(unsigned int j = index[1]; j < size[1]; j++) { // Y
for(unsigned int k = index[2]; k < size[2]; k++) { // Z
itk::Image<int, 3>::IndexType idx;
idx[0] = i; idx[1] = j; idx[2] = k;
if(OutputImage->GetPixel(idx) > -1) {
itk::Image<int, 3>::IndexType idx2;
idx2[0] = (int) (i / number_of_subvoxels);
idx2[1] = (int) (j / number_of_subvoxels);
idx2[2] = (int) (k / number_of_subvoxels);
VoxelIndexType idx1 = idx2[0] + idx2[1] * this->imageSize[0] + idx2[2] * this->imageSize[0] * this->imageSize[1];
this->faceIndexMap[idx1].push_back( OutputImage->GetPixel(idx) );
if(idx1+1 <= maxIndex)
this->faceIndexMap[idx1+1].push_back( OutputImage->GetPixel(idx) );
if(idx1-1 >= minIndex)
this->faceIndexMap[idx1-1].push_back( OutputImage->GetPixel(idx) );
}
}
}
}
std::cout << "\nLength of face Index Map " << this->faceIndexMap.size() << std::endl;
}
int GetTriangleInfoForPoint(point x, Face& triangleX, float& alphaX, float& betaX, float& gammaX)
{
int faceID;
if(this->faceIndexMap.size() > 0) // there is a generated face index map so used it
{
// Physical point to Image Index
VoxelIndexType linearIndX = this->physicalPointToLinearIndex(x);
// collect face indices for this voxel
std::map<VoxelIndexType, vector<int> >::iterator it = this->faceIndexMap.find(linearIndX);
if(it != this->faceIndexMap.end()) // see if the linearIndX already exist in the face index map
{
// std::cout << "WOW, fids will be used ... \n" ;
vector<int> faceList = this->faceIndexMap[linearIndX];
double minDist = LARGENUM;
int winnerIndex;
for(vector<int>::iterator it = faceList.begin(); it != faceList.end(); ++it)
{
triangleX = this->faces[(*it)];
// project the point onto the plane of the current triangle
point projPoint;
double dist = this->pointTriangleDistance(x, triangleX, projPoint);
if (dist < minDist )
{
minDist = dist;
winnerIndex = (*it);
}
}
triangleX = this->faces[winnerIndex];
faceID = winnerIndex;
point projPoint;
double dist = this->pointTriangleDistance(x, triangleX, projPoint);
vec barycentric = this->ComputeBarycentricCoordinates(projPoint, triangleX);
alphaX = barycentric[0];
betaX = barycentric[1];
gammaX = barycentric[2];
}
else //kdtree based
{
#if SHOW_WARNING
std::cout << "warning: using kdtree for triangle info because voxel index " << linearIndX <<": "<< x <<" is not found in the face index map !!! ...\n" ;
#endif
// get vertex closest to first point - x
int vertX = this->FindNearestVertex(x);
// scan all adjacent faces to see which face (f) includes point x
triangleX = this->faces[ this->adjacentfaces[vertX][0] ];
for (unsigned int fNumber = 0; fNumber < this->adjacentfaces[vertX].size(); fNumber++)
{
// check if face contains x and store barycentric coordinates for x in face f
triangleX = this->faces[ this->adjacentfaces[vertX][fNumber] ];
faceID = this->adjacentfaces[vertX][fNumber] ;
vec barycentric = this->ComputeBarycentricCoordinates(x,triangleX);
alphaX = barycentric[0];
betaX = barycentric[1];
gammaX = barycentric[2];
if ( ( ( barycentric[0] >= 0 ) && ( barycentric[0] <= 1 ) ) &&
( ( barycentric[1] >= 0 ) && ( barycentric[1] <= 1 ) ) &&
( ( barycentric[2] >= 0 ) && ( barycentric[2] <= 1 ) ) )
{
fNumber = this->adjacentfaces[vertX].size();
}
}
}
}
else
{
#if SHOW_WARNING
std::cout << "warning: using kdtree for triangle info because there is no face index map !!! ...\n" ;
#endif
// get vertex closest to first point - x
int vertX = this->FindNearestVertex(x);
unsigned int fNumber;
// scan all adjacent faces to see which face (f) includes point x
triangleX = this->faces[ this->adjacentfaces[vertX][0] ];
faceID = this->adjacentfaces[vertX][0];
for (fNumber = 0; fNumber < this->adjacentfaces[vertX].size(); fNumber++)
{
// check if face contains x and store barycentric coordinates for x in face f
triangleX = this->faces[ this->adjacentfaces[vertX][fNumber] ];
faceID = this->adjacentfaces[vertX][fNumber];
vec barycentric = this->ComputeBarycentricCoordinates(x,triangleX);
alphaX = barycentric[0];
betaX = barycentric[1];
gammaX = barycentric[2];
if ( ( ( barycentric[0] >= 0 ) && ( barycentric[0] <= 1 ) ) &&
( ( barycentric[1] >= 0 ) && ( barycentric[1] <= 1 ) ) &&
( ( barycentric[2] >= 0 ) && ( barycentric[2] <= 1 ) ) )
{
fNumber = this->adjacentfaces[vertX].size();
}
}
if(alphaX < 0.0 || betaX < 0.0f || gammaX < 0.0f ) {
int t = 0;
}
}
return faceID;
}
int GetVertexInfoForPoint(point x)
{
int vertX;
Face triangleX;
float alphaX, betaX, gammaX;
if(this->faceIndexMap.size() > 0) // there is a generated face index map so used it
{
//std::cout << "WOW, fids will be used ... \n" ;
// Physical point to Image Index
VoxelIndexType linearIndX = this->physicalPointToLinearIndex(x);
// collect face indices for this voxel
std::map<VoxelIndexType, vector<int> >::iterator it = this->faceIndexMap.find(linearIndX);
if(it != this->faceIndexMap.end())
{
vector<int> faceList = this->faceIndexMap[linearIndX];
double minDist = LARGENUM;
int winnerIndex;
for(vector<int>::iterator it = faceList.begin(); it != faceList.end(); ++it)
{
triangleX = this->faces[(*it)];
// project the point onto the plane of the current triangle
point projPoint;
double dist = this->pointTriangleDistance(x, triangleX, projPoint);
if (dist < minDist )
{
minDist = dist;
winnerIndex = (*it);
}
}
triangleX = this->faces[winnerIndex];
point projPoint;
double dist = this->pointTriangleDistance(x, triangleX, projPoint);
vec barycentric = this->ComputeBarycentricCoordinates(projPoint, triangleX);
alphaX = barycentric[0];
betaX = barycentric[1];
gammaX = barycentric[2];
// get vertex closest to first point - x
vertX = this->FindNearestVertex(projPoint);
}
else //kdtree based
{
#if SHOW_WARNING
std::cout << "warning: using kdtree for triangle info because voxel index " << linearIndX << " is not found in the face index map !!! ...\n" ;
#endif
// get vertex closest to first point - x
vertX = this->FindNearestVertex(x);
// scan all adjacent faces to see which face (f) includes point x
triangleX = this->faces[ this->adjacentfaces[vertX][0] ];
for (unsigned int fNumber = 0; fNumber < this->adjacentfaces[vertX].size(); fNumber++)
{
// check if face contains x and store barycentric coordinates for x in face f
triangleX = this->faces[ this->adjacentfaces[vertX][fNumber] ];
vec barycentric = this->ComputeBarycentricCoordinates(x,triangleX);
alphaX = barycentric[0];
betaX = barycentric[1];
gammaX = barycentric[2];
if ( ( ( barycentric[0] >= 0 ) && ( barycentric[0] <= 1 ) ) &&
( ( barycentric[1] >= 0 ) && ( barycentric[1] <= 1 ) ) &&
( ( barycentric[2] >= 0 ) && ( barycentric[2] <= 1 ) ) )
{
fNumber = this->adjacentfaces[vertX].size();
}
}
}
}
else
{
#if SHOW_WARNING
std::cout << "warning: using kdtree for triangle info because there is no face index map !!! ...\n" ;
#endif
// get vertex closest to first point - x
vertX = this->FindNearestVertex(x);
// scan all adjacent faces to see which face (f) includes point x
triangleX = this->faces[ this->adjacentfaces[vertX][0] ];
for (unsigned int fNumber = 0; fNumber < this->adjacentfaces[vertX].size(); fNumber++)
{
// check if face contains x and store barycentric coordinates for x in face f
triangleX = this->faces[ this->adjacentfaces[vertX][fNumber] ];
vec barycentric = this->ComputeBarycentricCoordinates(x,triangleX);
alphaX = barycentric[0];
betaX = barycentric[1];
gammaX = barycentric[2];
if ( ( ( barycentric[0] >= 0 ) && ( barycentric[0] <= 1 ) ) &&
( ( barycentric[1] >= 0 ) && ( barycentric[1] <= 1 ) ) &&
( ( barycentric[2] >= 0 ) && ( barycentric[2] <= 1 ) ) )
{
fNumber = this->adjacentfaces[vertX].size();
}
}
}
return vertX;
}
// end SHIREEN
float GetEuclideanDistance(int v1,int v2)
{
float d = 0.000001f;
point p1, p2;
p1 = this->vertices[v1];
p2 = this->vertices[v2];
d = dist(p1,p2);
return d;
}
// SHIREEN
float GetEuclideanDistance(point p1, point p2)
{
float d = 0.000001f;
d = dist(p1,p2);
return d;
}
// end SHIREEN
float GetGeodesicDistance(int v1,int v2)
{
float gDist = 0.000001f;
if (v1 == v2) return gDist;
int vert = v1;
int key = v2;
if (v2 > v1)
{
vert = v2;
key = v1;
}
std::map<unsigned int,float>::iterator geoIter = this->geodesicMap[vert].find(key);
if (geoIter != this->geodesicMap[vert].end())
{
gDist = geoIter->second;
}
else
{
gDist = LARGENUM;
}
return gDist;
}
/* Prateep */
double GetGeodesicDistance(point x, point y)
{
float alphaX, betaX, gammaX;
Face triangleX;
GetTriangleInfoForPoint(x, triangleX, alphaX, betaX, gammaX);
float alphaY, betaY, gammaY;
Face triangleY;
GetTriangleInfoForPoint(y, triangleY, alphaY, betaY, gammaY);
// compute geodesic distance by interpolation
// level one, interpolate distance from source triangle to distination point (i.e. D(triangleX, y))
float dx0y = ( alphaY * this->GetGeodesicDistance( triangleX.v[0], triangleY.v[0] ) ) +
( betaY * this->GetGeodesicDistance( triangleX.v[0], triangleY.v[1] ) ) +
( gammaY * this->GetGeodesicDistance( triangleX.v[0], triangleY.v[2] ) );
float dx1y = ( alphaY * this->GetGeodesicDistance( triangleX.v[1], triangleY.v[0] ) ) +
( betaY * this->GetGeodesicDistance( triangleX.v[1], triangleY.v[1] ) ) +
( gammaY * this->GetGeodesicDistance( triangleX.v[1], triangleY.v[2] ) );
float dx2y = ( alphaY * this->GetGeodesicDistance( triangleX.v[2], triangleY.v[0] ) ) +
( betaY * this->GetGeodesicDistance( triangleX.v[2], triangleY.v[1] ) ) +
( gammaY * this->GetGeodesicDistance( triangleX.v[2], triangleY.v[2] ) );
// level 2, interpolate distance between x & y
float dxy = (alphaX * dx0y) + (betaX * dx1y) + (gammaX * dx2y);
return dxy;
}
/* Praful */
float TestReport(char* str, int numTri, int numPts)
{
// std::cout<<"Generating Test Report..."<<std::endl;
std::ofstream myfile;
myfile.open(str);
int numFaces = this->faces.size();
char str1[] = "Newton";
char str2[] = "Bary";
char str3[] = "LM";
int counter = 0;
for(int i=0; i<numTri; i++)
{
// std::cout<<"Counter: "<<++counter<<std::endl;
int f1 = rand() % numFaces;
int f2 = rand() % numFaces;
// int tmpf = f1 + 1 + (rand() % 5);
// std::cout<<"tmpf = "<<tmpf<<std::endl;
// double param = (double) (tmpf)/ (double) (numFaces);
// double intpart, fractpart;
// fractpart = std::modf(param, &intpart);
// int f2 = (int) std::floor(fractpart*(double)numFaces);
Face Sa = this->faces[f1];
Face Sb = this->faces[f2];
point pta, ptb;
// std::cout<<"Original: "<<std::endl;
// std::cout<<"Face a: "<<Sa.v[0]<<" "<<Sa.v[1]<<" "<<Sa.v[2]<<std::endl;
// std::cout<<"Face b: "<<Sb.v[0]<<" "<<Sb.v[1]<<" "<<Sb.v[2]<<std::endl;
for(int j=0; j<numPts; j++)
{
float alp = (float)(rand() % 50) / 100.0f;
float bet = (float)(rand() % 50) / 100.0f;
float gam = 1.0f - alp - bet;
// std::cout<<"numFaces = "<<numFaces<<std::endl;
std::cout<<"f1 = "<<f1<<" f2 = "<<f2<<std::endl;
std::cout<<"Original Barycoordinates: "<<std::endl;
std::cout<<alp<<" "<<bet<<" "<<gam<<std::endl;
vnl_vector <float> baryCoord(3);
baryCoord[0] = alp;
baryCoord[1] = bet;
baryCoord[2] = gam;
for(int ii=0; ii<3; ii++)
{
pta[ii] = alp * (float)(this->vertices[Sa.v[0]][ii]) + bet * (float)(this->vertices[Sa.v[1]][ii]) + gam*(float)(this->vertices[Sa.v[2]][ii]);
ptb[ii] = alp * (float)(this->vertices[Sb.v[0]][ii]) + bet * (float)(this->vertices[Sb.v[1]][ii]) + gam*(float)(this->vertices[Sb.v[2]][ii]);
}
float ctheta = (pta DOT ptb) / (len(pta) * len(ptb));
if(ctheta > 1.0f) ctheta = 1.0f;
else if(ctheta < -1.0f) ctheta = -1.0f;
float gth = std::acos(ctheta) * this->bsphere.r;
// std::cout<<"check1"<<std::endl;
float valApprox = this->GetBronsteinGeodesicDistance(pta, ptb, str3);
// float valApprox = this->GetBronsteinGeodesicDistance(pta, ptb, str3, Sa, Sb, baryCoord, baryCoord); %debugging
// std::cout<<"check2"<<std::endl;
float valBary = this->GetBronsteinGeodesicDistance(pta, ptb, str2);//, Sa, Sb, baryCoord, baryCoord);
// float valNewton = this->GetBronsteinGeodesicDistance(pta, ptb, str1, Sa, Sb, baryCoord, baryCoord);
myfile<<f1<<"\t"<<f2<<"\t"<<alp<<"\t"<<bet<<"\t"<<gam<<"\t"<<gth<<"\t"<<valApprox<<"\t"<<valBary<<"\n";
std::cout<<"gth: "<<gth<<"\t"<<"3PtApprox: "<<valApprox<<"\tBary: "<<valBary<<"\n";
}
}
myfile.close();
return 1.0f;
}
/* Praful */
float TestApproxGeodesic(int f1, int f2)
{
Face Sa = this->faces[f1];
Face Sb = this->faces[f2];
point pta, ptb;
for(int i=0; i<3; i++)
{
pta[i] = 0.33f*this->vertices[Sa.v[0]][i] + 0.33f*this->vertices[Sa.v[1]][i] + 0.34f*this->vertices[Sa.v[2]][i];
ptb[i] = 0.33f*this->vertices[Sb.v[0]][i] + 0.33f*this->vertices[Sb.v[1]][i] + 0.34f*this->vertices[Sb.v[2]][i];
}
vnl_vector <float> baryCoord(3,0.33);
float ctheta = (pta DOT ptb) / (len(pta) * len(ptb));
if(ctheta > 1.0f) ctheta = 1.0f;
else if(ctheta < -1.0f) ctheta = -1.0f;
char method[] = "Newton";
float gth = std::acos(ctheta) * this->bsphere.r;
float val = this->GetBronsteinGeodesicDistance(pta, ptb, method);//, Sa, Sb, baryCoord, baryCoord);
std::cout<<"**************"<<std::endl;
std::cout<<"Approx value = "<<val<<std::endl;
std::cout<<"Gth value = "<<gth<<std::endl;
std::cout<<"**************"<<std::endl;
return val;
}
/* Praful */
float GetBronsteinGeodesicDistance(point a, point b, char* method)//, Face Sa, Face Sb, vnl_vector <float> baryCoord_a, vnl_vector <float> baryCoord_b)
{
Face Sa, Sb;
vnl_vector <float> baryCoord_a(3), baryCoord_b(3);
float alp_a, alp_b, bet_a, bet_b, gam_a, gam_b;
GetTriangleInfoForPoint(a, Sa, alp_a, bet_a, gam_a);
GetTriangleInfoForPoint(b, Sb, alp_b, bet_b, gam_b);
float dGeo_a_2_b = GetBronsteinGeodesicDistance(Sa, Sb, baryCoord_a, baryCoord_b, method);
return dGeo_a_2_b;
}
/* Praful */
float GetBronsteinGeodesicDistance( Face Sa, Face Sb, vnl_vector <float> baryCoord_a, vnl_vector <float> baryCoord_b, char* method)
{
point a; a.clear();
point b; b.clear();
for (int d1 = 0; d1 < 3; d1++)
{
a[d1] = 0.0;
b[d1] = 0.0;
for (int d2 = 0; d2 < 3; d2++)
{
point vt = vertices[Sa.v[d2]];
a[d1] += baryCoord_a[d2]*vt[d1];
point vt2 = vertices[Sb.v[d2]];
b[d1] += baryCoord_b[d2]*vt2[d1];
}
}
float alp_a, alp_b, bet_a, bet_b, gam_a, gam_b;
alp_a = baryCoord_a[0];
bet_a = baryCoord_a[1];
gam_a = baryCoord_a[2];
alp_b = baryCoord_b[0];
bet_b = baryCoord_b[1];
gam_b = baryCoord_b[2];
if (alp_a<0.000001f)
{
alp_a=0.000001f;
}
if (bet_a<0.000001f)
{
bet_a=0.000001f;
}
if (gam_a<0.000001f)
{
gam_a=0.000001f;
}
if (alp_b<0.000001f)
{
alp_b=0.000001f;
}
if (bet_b<0.000001f)
{
bet_b=0.000001f;
}
if (gam_b<0.000001f)
{
gam_b=0.000001f;
}
alp_a /= (alp_a + bet_a + gam_a);
bet_a /= (alp_a + bet_a + gam_a);
gam_a /= (alp_a + bet_a + gam_a);
alp_b /= (alp_b + bet_b + gam_b);
bet_b /= (alp_b + bet_b + gam_b);
gam_b /= (alp_b + bet_b + gam_b);
baryCoord_a[0]=alp_a;
baryCoord_a[1]=bet_a;
baryCoord_a[2]=gam_a;
baryCoord_b[0]=alp_b;
baryCoord_b[1]=bet_b;
baryCoord_b[2]=gam_b;
vnl_vector<float> xA(2);
vnl_vector<float> xB(2);
vnl_matrix<float> Xa(2,3);
vnl_matrix<float> Xb(2,3);
if(baryCoord_a.max_value() >1.0f || baryCoord_a.min_value()<0.0f || baryCoord_b.max_value() >1.0f || baryCoord_b.min_value()<0.0f)
{
std::cerr<<"incorrect barycentric coordinates...!!"<<std::endl;
vcl_cerr<<"baryCoord_a: "<<baryCoord_a<<std::endl;
vcl_cerr<<"baryCoord_b: "<<baryCoord_b<<std::endl;
return EXIT_FAILURE;
}
ComputeCanonicalForm(a, xA, Xa);
ComputeCanonicalForm(b, xB, Xb);
vnl_matrix<float> dA_2_B(3,3);
bool tooFar = false;
for(int i=0; i<3; i++)
{
for(int j=0; j<3; j++)
{
dA_2_B(i,j) = this->GetGeodesicDistance(Sa.v[i], Sb.v[j]);
// SHIREEN: if triangles are too far, don't bother to complete
if ( dA_2_B(i,j) == LARGENUM)
{
tooFar = true;
break;
}
}
if (tooFar)
break;
}
if (tooFar)
return LARGENUM;
vnl_vector<float> geo_approx_2_B(3);
for(int vertB_id=0; vertB_id<3; vertB_id++)
geo_approx_2_B[vertB_id] = ComputeThreePointApproximatedGeodesic(xA, baryCoord_a, Xa, dA_2_B.get_column(vertB_id), method);
float dGeo_a_2_b=0.0f;
dGeo_a_2_b = ComputeThreePointApproximatedGeodesic(xB, baryCoord_b, Xb, geo_approx_2_B, method);
return dGeo_a_2_b;
}
/* Praful */
float ComputeGradient(vnl_vector<float> x0, vnl_vector<float> baryCoord, vnl_matrix<float> X, vnl_vector<float> ds, vnl_vector<float> & G)
{
G = vnl_vector<float>(2, 0.0f);
for(int k=0; k<2; k++)
{
for(int ii=0; ii<3; ii++)
{
vnl_vector<float> xi = X.get_column(ii);
vnl_vector<float> tmp = x0 - xi;
float residual = dot_product(tmp,tmp) - ds[ii]*ds[ii];
G[k] += 4*baryCoord[ii]*residual*tmp[k];
}
}
return 1.0f;
}
/* Praful */
float ComputeHessian(vnl_vector<float> x0, vnl_vector<float> baryCoord, vnl_matrix<float> X, vnl_vector<float> ds, vnl_matrix<float> & H )
{
H = vnl_matrix<float>(2,2,0.0f);
for(int k=0; k<2; k++)
{
for(int kp=0; kp<2; kp++)
{
for(int ii=0; ii<3; ii++)
{
vnl_vector<float> xi = X.get_column(ii);
vnl_vector<float> tmp = x0 - xi;
float residual = dot_product(tmp,tmp) - ds[ii]*ds[ii];
if(k==kp)
{
H(k,k) += 4*baryCoord[ii]*(residual + 2*tmp[k]*tmp[k]);
}
else
{
H(k,kp) += 8*baryCoord[ii]*tmp[k]*tmp[kp];
}
}
}
}
return 1.0f;
}
/* Praful */
float ComputeThreePointApproximatedGeodesic(vnl_vector<float> x, vnl_vector<float> baryCoord, vnl_matrix<float> X, vnl_vector<float> ds, char* method)
{
float geo_approx = -1.0f;
vnl_vector<float> x0;
// std::cout<<"check4"<<std::endl;
float n = GetVirtualSource(baryCoord, X, ds, x0);
// std::cout<<"check5"<<std::endl;
char check2[] = "Bary";
if(n==-1.0f || strcmp(method, check2)==0)
{
// std::cout<<"Using Bary..."<<std::endl;
geo_approx = dot_product(baryCoord, ds);
}
else
{
char check1[] = "Newton";
if(strcmp(method, check1)==0) //Newton method
{
// std::cout<<"Using Newton iterations..."<<std::endl;
// vcl_cout<<"Initial x0= "<<x0<<std::endl;
float eta = 1.0f;
for(int iter=0; iter<10; iter++)
{
vnl_matrix<float> H;
vnl_vector<float> G;
ComputeGradient(x0, baryCoord, X, ds, G);
ComputeHessian(x0, baryCoord, X, ds, H);
vnl_matrix<float> Hinv = vnl_matrix_inverse<float>(H);
x0 -= eta*Hinv*G;
}
// vcl_cout<<"Final x0= "<<x0<<std::endl;
}
else //LM method
{
// std::cout<<"LM..coming soon.."<<std::endl;
// std::cout<<"Using LM..."<<std::endl;
float v = 2.0f;
float eps1 = 0.000001f;
float eps2 = 0.000001f;
float tau = 0.001f;
int m = 3;
int n = 2;
float k = 0.0f;
float kmax = 10.0f;
// computing Jacobian
// vcl_cout<<"x0: "<<std::endl<<x0<<std::endl;
// vcl_cout<<"baryCoord: "<<std::endl<<baryCoord<<std::endl;
vnl_matrix<float> J(m, n, 0.0f);
for(int i = 0; i<m; i++)
{
vnl_vector<float> xi = X.get_column(i);
// vcl_cout<<"xi: "<<std::endl<<xi<<std::endl;
for(int j = 0; j<n; j++)
{
J(i,j)=2.0f * (float) (std::sqrt(baryCoord[i])) * (x0[j]-xi[j]);
}
}
// vcl_cout<<"J: "<<std::endl<<J.extract(m,n,0,0)<<std::endl;
// computing function values given the current guess
vnl_vector<float> f(m, 0.0f);
for(int i=0; i<m; i++)
{
vnl_vector<float> xi = X.get_column(i);
float di = ds[i];
vnl_vector<float> x0_m_xi;
x0_m_xi = x0 - xi;
float r_i = dot_product(x0_m_xi, x0_m_xi) - di*di;
f[i] = (float) (std::sqrt(baryCoord[i])) * r_i;
}
float F;
F = dot_product(f,f);
F = 0.5f*F;
vnl_matrix<float> A(n,n,0.0f);
A = J.transpose()*J;
vnl_vector<float> g(n,0.0f);
g = J.transpose()*f;
vnl_vector<float> diagA = A.get_diagonal();
float max_diagA = diagA.max_value();
float mu = tau * max_diagA;
float norm_g = g.two_norm();
vnl_matrix<float> muId(n,n,0.0f);
vnl_matrix<float> A_mu(n,n,0.0f);
vnl_matrix<float> A_mu_inv;
vnl_vector<float> hlm(n,0.0f);
vnl_vector<float> xnew(n,0.0f);
vnl_vector<float> fnew(m, 0.0f);
float Fnew=0.0f, delta_L=0.0f, rho=0.0f;
// std::cout<<"****************"<<std::endl;
bool found = norm_g <= eps1;
while(!found && k<kmax)
{
k = k + 1.0f;
muId.set_identity();
muId = mu*muId;
A_mu = A + muId;
// std::cout<<"check4"<<std::endl;
// vcl_cout<<"A: "<<std::endl<<A.extract(n,n,0,0)<<std::endl;
// std::cout<<"mu: "<<mu<<std::endl;
// vcl_cout<<"A_mu: "<<std::endl<<A_mu.extract(n,n,0,0)<<std::endl;
A_mu_inv = vnl_matrix_inverse<float>(A_mu);
// std::cout<<"check51"<<std::endl;
// vcl_cout<<"A_mu_inv: "<<std::endl<<A_mu_inv.extract(n,n,0,0)<<std::endl;
A_mu_inv = -1.0f*A_mu_inv;
// vcl_cout<<"A_mu_inv: "<<std::endl<<A_mu_inv.extract(n,n,0,0)<<std::endl;
hlm = A_mu_inv*g;
float norm_hlm = hlm.two_norm();
float norm_x0 = x0.two_norm();
if(norm_hlm <= (eps1 * (norm_x0 + eps2)))
{
found = true;
}
else
{
xnew = x0 + hlm;
for(int i = 0; i<m ; i++)
{
vnl_vector<float> xi = X.get_column(i);
float di = ds[i];
vnl_vector<float> x_m_xi;
x_m_xi = xnew - xi;
float r_i = dot_product(x_m_xi, x_m_xi) - di*di;
fnew[i] = (float) (std::sqrt(baryCoord[i])) * r_i;
}
Fnew = dot_product(fnew,fnew);
Fnew = 0.5f*Fnew;
delta_L = 0.5f*dot_product(hlm, (mu*hlm-g));
rho = (F-Fnew)/delta_L;
if(rho>0.0f)
{
x0 = xnew;
// computing Jacobian
for(int i = 0; i<m; i++)
{
vnl_vector<float> xi = X.get_column(i);
for(int j = 0; j<n; j++)
{
J(i,j)=2.0f * (float) (std::sqrt(baryCoord[i])) * (x0[j]-xi[j]);
}
}
// computing function values given the current guess
for(int i=0; i<m; i++)
{
vnl_vector<float> xi = X.get_column(i);
float di = ds[i];
vnl_vector<float> x0_m_xi;
x0_m_xi = x0 - xi;
float r_i = dot_product(x0_m_xi, x0_m_xi) - di*di;
f[i] = (float) (std::sqrt(baryCoord[i])) * r_i;
}
F = dot_product(f,f);
F = 0.5f*F;
A = J.transpose()*J;
g = J.transpose()*f;
norm_g = g.two_norm();
found = norm_g <= eps1;
// std::cout<<"=================="<<std::endl;
// std::cout<<"mu= "<<mu<<std::endl;
// std::cout<<"=================="<<std::endl;
float cmp1 = 1.0f - (2.0f*rho - 1.0f)*(2.0f*rho - 1.0f)*(2.0f*rho - 1.0f);
if(0.3f > cmp1)
{
mu = mu*0.3f;
}
else
{
mu = mu*cmp1;
}
// std::cout<<"=================="<<std::endl;
// std::cout<<"cmp1= "<<cmp1<<" mu= "<<mu<<std::endl;
// std::cout<<"=================="<<std::endl;
v = 2.0f;
}
else
{
mu = mu*v;
v = 2.0f*v;
}
}
}
// vcl_cout<<x0<<std::endl;
}
geo_approx = (x0-x).two_norm();
} //end else xinit not empty
// std::cout<<"Returning geo_approx..."<<geo_approx<<std::endl;
return geo_approx;
}
/* Praful */
float GetVirtualSource(vnl_vector<float> baryCoord, vnl_matrix<float> X, vnl_vector<float> ds, vnl_vector< float > & x0)
{
// vcl_cout<<"X:"<<std::endl<<X.extract(2,3,0,0);
// vcl_cout<<"ds: "<<ds<<std::endl;
vgl_homg_point_2d<float> centre1(X(0,0), X(1,0),1);
vgl_homg_point_2d<float> centre2(X(0,1), X(1,1),1);
vgl_homg_point_2d<float> centre3(X(0,2), X(1,2),1);
vgl_conic<float> circle1(centre1, ds[0], ds[0], 0.0f);
vgl_conic<float> circle2(centre2, ds[1], ds[1], 0.0f);
vgl_conic<float> circle3(centre3, ds[2], ds[2], 0.0f);
// vcl_cout<<"Circle1: "<<circle1<<std::endl;
// vcl_cout<<"Circle2: "<<circle2<<std::endl;
// vcl_cout<<"Circle3: "<<circle3<<std::endl;
vcl_list<vgl_homg_point_2d<float> > pts1;
pts1 = vgl_homg_operators_2d<float>::intersection(circle1, circle2);
int n1 = (int) (pts1.size());
vcl_list<vgl_homg_point_2d<float> > pts2;
pts2 = vgl_homg_operators_2d<float>::intersection(circle2, circle3);
int n2 = (int) (pts2.size());
vcl_list<vgl_homg_point_2d<float> > pts3;
pts3 = vgl_homg_operators_2d<float>::intersection(circle1, circle3);
int n3 = (int) (pts3.size());
int n = n1+n2+n3;
// std::cout<<"n= "<<n<<std::endl;
if(n==0)
{
x0 = vnl_vector<float>(2,-1.0f);
return -1.0f;
}
else
{
vnl_matrix< float > xinit(2,n,0);
int i=0;
typedef vcl_list< vgl_homg_point_2d < float > > container;
vgl_homg_point_2d<float> temp;
for(container::iterator p = pts1.begin(); p!=pts1.end(); p++)
{
// std::cout<<"n1 = "<<n1<<std::endl;
temp = *p;
if (!std::isfinite(temp.x()) || !std::isfinite(temp.y()) || !std::isfinite(temp.w())) continue;
// std::cout<<"x: "<<temp.x()<<" y: "<<temp.y()<<" w: "<<temp.w()<<std::endl;
xinit(0,i)=temp.x()/temp.w();
xinit(1,i)=temp.y()/temp.w();
// vcl_cout<<"i= "<<i<<" xinit(i)="<<xinit.get_column(i)<<std::endl;
i++;
}
for(container::iterator p = pts2.begin(); p!=pts2.end(); p++)
{
// std::cout<<"n2 = "<<n2<<std::endl;
temp = *p;
if (!std::isfinite(temp.x()) || !std::isfinite(temp.y()) || !std::isfinite(temp.w())) continue;
// std::cout<<"x: "<<temp.x()<<" y: "<<temp.y()<<" w: "<<temp.w()<<std::endl;
xinit(0,i)=temp.x()/temp.w();
xinit(1,i)=temp.y()/temp.w();
// vcl_cout<<"i= "<<i<<" xinit(i)="<<xinit.get_column(i)<<std::endl;
i++;
}
for(container::iterator p = pts3.begin(); p!=pts3.end(); p++)
{
// std::cout<<"n3 = "<<n3<<std::endl;
temp = *p;
if (!std::isfinite(temp.x()) || !std::isfinite(temp.y()) || !std::isfinite(temp.w())) continue;
// std::cout<<"x: "<<temp.x()<<" y: "<<temp.y()<<" w: "<<temp.w()<<std::endl;
xinit(0,i)=temp.x()/temp.w();
xinit(1,i)=temp.y()/temp.w();
// vcl_cout<<"i= "<<i<<" xinit(i)="<<xinit.get_column(i)<<std::endl;
i++;
}
if (i==0)
{
x0 = vnl_vector<float>(2,-1.0f);
return -1.0f;
}
// vcl_cout<<"xinit:"<<std::endl<<xinit.extract(2,n,0,0)<<std::endl;
// vcl_cout<<"xinit:"<<std::endl<<xinit.extract(2,i,0,0)<<std::endl;
double minE = 10000000000.0;
int flag = 0;
int winner = -1;
for(int i1=0; i1<i; i1++)
{
double energy = 0.0;
vnl_vector<float> pt = xinit.get_column(i1);
// vcl_cout<<"pt= "<<pt<<std::endl;
for(int j=0; j<3; j++)
{
vnl_vector<float> tmp1 = pt - X.get_column(j);
float residual = std::abs(tmp1.squared_magnitude() - ds[j]*ds[j]); //write the dot product in place of tmp1.*tmp1
energy += (double)(residual*baryCoord[j]);
// float residual = tmp1.squared_magnitude() - ds[j]*ds[j]; //write the dot product in place of tmp1.*tmp1
// energy += (double)(residual*residual*baryCoord[j]);
}
// std::cout<<"Energy: "<<energy<<std::endl;
if(flag==0)
{
minE = energy;
winner = i1;
flag = 1;
}
else
{
if(energy < minE)
{
minE = energy;
winner = i1;
}
}
}
// std::cout<<winner<<std::endl;
x0 = xinit.get_column(winner);
// vcl_cout<<"x0: "<<x0<<std::endl;
return 1.0f;
}
}
/* Praful */
float ComputeCanonicalForm(point s, vnl_vector<float> & x, vnl_matrix<float> & X)//, Face S)
{
Face S;
float alpS, betS, gamS;
GetTriangleInfoForPoint(s, S, alpS, betS, gamS);
vnl_matrix<float> S_(3,3);
vnl_vector<float> muS(3,0);
for(int i = 0; i < 3; i++) {
point vertex = this->vertices[ S.v[i] ];
for(int j = 0; j < 3; j++) S_(i,j) = (float)(vertex[j]);
}
// std::cout<<"*****************"<<std::endl;
// vcl_cout<<"Face: "<<std::endl<<S_.extract(3,3,0,0)<<std::endl;
// std::cout<<"*****************"<<std::endl;
S_ = S_.transpose();
// std::cout<<"*****************"<<std::endl;
// vcl_cout<<"Transposed: "<<std::endl<<S_.extract(3,3,0,0)<<std::endl;
// std::cout<<"*****************"<<std::endl;
for(int r = 0; r < 3; r++) {
for(int c = 0; c < 3; c++) muS[r] += S_(r,c);
muS[r] /= 3.0f;
}
// std::cout<<"*****************"<<std::endl;
// vcl_cout<<"muS: "<<std::endl<<muS<<std::endl;
// std::cout<<"*****************"<<std::endl;
// Scent = S - muS
vnl_matrix<float> Scent(3,3);
for(int r = 0; r < 3; r++) {
for(int c = 0; c < 3; c++) Scent(r,c) = S_(r,c) - muS[r];
}
// vcl_cout<<"Scent: "<<Scent.extract(3,3,0,0)<<std::endl;
vnl_svd<float> svd(Scent);
// bool vld_svd = vnl_svd<float>::valid();
// std::cout<<"Valid SVD? "<<vld_svd<<std::endl;
// std::cout<<"checkpoint SVD"<<std::endl;
// vnl_diag_matrix<point::value_type> W_ = svd.W();
vnl_matrix<float> U_ = svd.U();
// vcl_cout<<"U_: "<<U_.extract(3,2,0,0)<<std::endl;
// std::cout<<"check32"<<std::endl;
// vnl_matrix<point::value_type> V_ = svd.V();
/* top 2 eigen vectors */
vnl_matrix<float> U(3,2);
for(int r = 0; r < 3; r++) {
for(int c = 0; c < 2; c++) U(r,c) = U_(r,c);
}
// std::cout<<"............................"<<std::endl;
// vcl_cout<<"U: "<<U.extract(2,3,0,0)<<std::endl;
// std::cout<<"............................"<<std::endl;
/*vnl_matrix<point::value_type>*/ X = U.transpose() * Scent;
vnl_vector<float> sCent(3);
for(int c = 0; c < 3; c++) sCent[c] = s[c] - muS[c];
/*vnl_vector<point::value_type>*/ x = U.transpose() * sCent;
// std::cout<<"-----------------------------"<<std::endl;
// vcl_cout<<x<<std::endl;
// std::cout<<"-----------------------------"<<std::endl;
return 1.0f;
// printing for debugging
// std::cout<<std::endl<<std::endl<<"Canonical form computed..."<<std::endl;
// vcl_cerr<<x;
// std::cout<<std::endl;
// vcl_cerr<<X.extract(2,3,0,0);
}
// SHIREEN
void GetPointTriangleVertices(point x, point & v1, point & v2, point & v3)
{
Face triangleX;
float alphaX, betaX, gammaX;
GetTriangleInfoForPoint(x, triangleX, alphaX, betaX, gammaX);
v1[0] = this->vertices[triangleX.v[0]][0];
v1[1] = this->vertices[triangleX.v[0]][1];
v1[2] = this->vertices[triangleX.v[0]][2];
v2[0] = this->vertices[triangleX.v[1]][0];
v2[1] = this->vertices[triangleX.v[1]][1];
v2[2] = this->vertices[triangleX.v[1]][2];
v3[0] = this->vertices[triangleX.v[2]][0];
v3[1] = this->vertices[triangleX.v[2]][1];
v3[2] = this->vertices[triangleX.v[2]][2];
}
void GetPointTriangleVertices(point x, point & v1, point & v2, point & v3, ivec3 & vids)
{
Face triangleX;
float alphaX, betaX, gammaX;
GetTriangleInfoForPoint(x, triangleX, alphaX, betaX, gammaX);
v1[0] = this->vertices[triangleX.v[0]][0];
v1[1] = this->vertices[triangleX.v[0]][1];
v1[2] = this->vertices[triangleX.v[0]][2];
v2[0] = this->vertices[triangleX.v[1]][0];
v2[1] = this->vertices[triangleX.v[1]][1];
v2[2] = this->vertices[triangleX.v[1]][2];
v3[0] = this->vertices[triangleX.v[2]][0];
v3[1] = this->vertices[triangleX.v[2]][1];
v3[2] = this->vertices[triangleX.v[2]][2];
vids[0] = triangleX.v[0];
vids[1] = triangleX.v[1];
vids[2] = triangleX.v[2];
}
void GetPointBarycentricCoordinates(point x, float& alphaX, float& betaX, float& gammaX)
{
Face triangleX;
GetTriangleInfoForPoint(x, triangleX, alphaX, betaX, gammaX);
}
// END-SHIREEN
/* Prateep */
void saveFidsDistanceMap(itk::Image<int,3>::Pointer fidsVolume, std::string prefix, double r = 1.0)
{
itk::Image<PixelType,3>::Pointer distMap = itk::Image<PixelType,3>::New();
distMap->SetRegions( fidsVolume->GetLargestPossibleRegion() );
distMap->Allocate();
distMap->SetOrigin( fidsVolume->GetOrigin() );
distMap->SetSpacing( fidsVolume->GetSpacing() );
distMap->SetDirection( fidsVolume->GetDirection() );
itk::ImageRegionConstIteratorWithIndex< itk::Image<int,3> > fidsVolumeIt(fidsVolume, fidsVolume->GetLargestPossibleRegion() );
itk::ImageRegionIterator< itk::Image<PixelType,3> > distMapIt(distMap, distMap->GetLargestPossibleRegion() );
fidsVolumeIt.GoToBegin();
distMapIt.GoToBegin();
while(!fidsVolumeIt.IsAtEnd()) {
int fid = fidsVolumeIt.Get();
if(fid == -1) {
distMapIt.Set(-1.0);
} else {
itk::Image<int,3>::IndexType ind = fidsVolumeIt.GetIndex();
itk::Image<int,3>::PointType pp;
fidsVolume->TransformIndexToPhysicalPoint(ind, pp);
point p, junk;
for(int ii = 0; ii < 3; ii++) p[ii] = pp[ii];
double d = this->pointTriangleDistance(p, this->faces[fid], junk);
distMapIt.Set((float) d);
}
++fidsVolumeIt;
++distMapIt;
}
itk::Image<PixelType,3>::Pointer origDistMap = itk::Image<PixelType,3>::New();
resampleDistanceMap(distMap, origDistMap);
itk::ImageFileWriter< itk::Image<PixelType, 3> >::Pointer w =
itk::ImageFileWriter< itk::Image<PixelType, 3> >::New();
std::stringstream ss; ss << r;
std::string f = prefix + ".DistMap_r" + ss.str() + ".nrrd";
w->SetFileName( f.c_str() );
w->SetInput( origDistMap );
w->SetUseCompression(true);
w->Update();
}
/* Prateep */
// shireen: provide the approximate distance transform to fix leaking artifacts that might be caused by sharp corners or irregular triangulation
//void saveFidsSignedDistanceMap(itk::Image<int,3>::Pointer fidsVolume, itk::Image<PixelType, 3>::Pointer scaledDT, std::string prefix, std::string suffix = "", double r = 1.0)
void saveFidsSignedDistanceMap(itk::Image<int,3>::Pointer fidsVolume, std::string prefix, std::string suffix = "", double r = 1.0)
{
if(normals.empty()) this->need_normals();
// std::string file = prefix + ".normals.ply";
// this->write(file.c_str() );
itk::Image<PixelType,3>::Pointer distMap = itk::Image<PixelType,3>::New();
distMap->SetRegions( fidsVolume->GetLargestPossibleRegion() );
distMap->Allocate();
distMap->SetOrigin( fidsVolume->GetOrigin() );
distMap->SetSpacing( fidsVolume->GetSpacing() );
distMap->SetDirection( fidsVolume->GetDirection() );
itk::Image<PixelType,3>::PointType origin = fidsVolume->GetOrigin();
itk::Image<PixelType,3>::SpacingType spacing = fidsVolume->GetSpacing();
float m_origin[3];
m_origin[0] = origin[0];
m_origin[1] = origin[1];
m_origin[2] = origin[2];
float m_spacing[3];
m_spacing[0] = spacing[0];
m_spacing[1] = spacing[1];
m_spacing[2] = spacing[2];
itk::ImageRegionConstIteratorWithIndex< itk::Image<int,3> > fidsVolumeIt(fidsVolume, fidsVolume->GetLargestPossibleRegion() );
itk::ImageRegionIterator< itk::Image<PixelType,3> > distMapIt(distMap, distMap->GetLargestPossibleRegion() );
// shireen
//itk::ImageRegionIterator< itk::Image<PixelType,3> > approxDistMapIt(scaledDT, scaledDT->GetLargestPossibleRegion() );
fidsVolumeIt.GoToBegin();
distMapIt.GoToBegin();
//approxDistMapIt.GoToBegin();
point tstp;
while(!fidsVolumeIt.IsAtEnd()) {
int fid = fidsVolumeIt.Get();
double sign;
double d;
if(fid == -1) {
// distMapIt.Set(-1.0);
/* Prateep :
* - fix DT to get distance values for every voxel.
*/
itk::Image<int,3>::IndexType ind_subv = fidsVolumeIt.GetIndex();
VoxelIndexType _ind_subv[3];
_ind_subv[0] = ind_subv[0];
_ind_subv[1] = ind_subv[1];
_ind_subv[2] = ind_subv[2];
point p = this->indexToPhysicalPoint(_ind_subv, m_origin, m_spacing), j;
tstp = p;
float alphaP, betaP, gammaP;
Face triangleP;
int fidP = this->GetTriangleInfoForPoint(p, triangleP, alphaP, betaP, gammaP );
d = this->pointTriangleDistance( p, this->faces[fidP], j);
// (a) get face normal
vec v0 = this->vertices[this->faces[fidP].v[0]];
vec nv0 = this->normals[this->faces[fidP].v[0]];
vec v1 = this->vertices[this->faces[fidP].v[1]];
vec v2 = this->vertices[this->faces[fidP].v[2]];
vec facenormal = (v1 - v0) CROSS (v2 - v0);
float dot1 = facenormal DOT (nv0);
if(dot1 < 0.0f ) facenormal = -facenormal;
// (b) get sign
vec c = (v0 + v1 + v2); c /= 3.f;
float dot2 = (c - p) DOT facenormal;
if(dot2 < 0.0f) sign = -1.0f;
else sign = 1.0f;
} else {
itk::Image<int,3>::IndexType ind = fidsVolumeIt.GetIndex();
itk::Image<int,3>::PointType pp;
fidsVolume->TransformIndexToPhysicalPoint(ind, pp);
point p, j;
for(int ii = 0; ii < 3; ii++) p[ii] = pp[ii];
d = this->pointTriangleDistance(p, this->faces[fid], j);
tstp = p;
// (a) get face normal
vec v0 = this->vertices[this->faces[fid].v[0]];
vec nv0 = this->normals[this->faces[fid].v[0]];
vec v1 = this->vertices[this->faces[fid].v[1]];
vec v2 = this->vertices[this->faces[fid].v[2]];
vec facenormal = (v1 - v0) CROSS (v2 - v0);
float dot1 = facenormal DOT (nv0);
if(dot1 < 0.0f ) facenormal = -facenormal;
// (b) get sign
vec c = (v0 + v1 + v2); c /= 3.f;
float dot2 = (c - p) DOT facenormal;
if(dot2 < 0.0f) sign = -1.0f;
else sign = 1.0f;
}
// if(std::fabs(sign*d + 1.0f) < EPS) {
// std::cout << "bug : " << tstp[0] << ' ' << tstp[1] << ' ' << tstp[2] << std::endl;
// }
distMapIt.Set((float)(sign*d));
// // shireen
// PixelType approx_d = approxDistMapIt.Get();
// double approx_sign;
// if(approx_d < 0.0f) approx_sign = -1.0f;
// else approx_sign = 1.0f;
// if (approx_sign == sign)
// distMapIt.Set((float)(sign*d));
// else
// {
// std::cout << "WARNING: sign doesn't match for fid = " << fid << ", using approximate distance ..." << std::endl;
// distMapIt.Set((float)(approx_d));
// }
++fidsVolumeIt;
++distMapIt;
//++approxDistMapIt;
}
itk::Image<PixelType,3>::Pointer origDistMap = itk::Image<PixelType,3>::New();
resampleDistanceMap(distMap, origDistMap);
itk::ImageFileWriter< itk::Image<PixelType, 3> >::Pointer w =
itk::ImageFileWriter< itk::Image<PixelType, 3> >::New();
//std::stringstream ss; ss << r;
//std::stringstream sp; sp << this->imageSpacing[0];
//std::string f = prefix + ".SignedDistMap_r" + ss.str() + "_sp" + sp.str() + ".nrrd";
std::string f = prefix + ".SignedDistMap" + suffix + ".nrrd";
w->SetFileName( f.c_str() );
w->SetInput( origDistMap );
w->SetUseCompression(true);
w->Update();
}
/* Prateep */
void resampleDistanceMap(itk::Image<PixelType,3>::Pointer img, itk::Image<PixelType,3>::Pointer output)
{
if(this->imageSize[0] == 0 || this->imageSize[1] == 0 || this->imageSize[2] == 0 ||
this->imageSpacing[0] == 0.f || this->imageSpacing[1] == 0.f || this->imageSpacing[2] == 0.f)
{
eprintf("Error!!! invalid image dimensions for resampling\n");
return;
}
typedef itk::ResampleImageFilter< itk::Image<PixelType,3>, itk::Image<PixelType,3> > ResamplerType;
// typedef itk::LinearInterpolateImageFunction< itk::Image<PixelType,3>, double> InterpolatorType;
typedef itk::BSplineInterpolateImageFunction<itk::Image<PixelType,3>, double, double> InterpolatorType;
typedef itk::IdentityTransform< double, 3> TransformType;
TransformType::Pointer identityTransform = TransformType::New();
identityTransform->SetIdentity();
InterpolatorType::Pointer interpolator = InterpolatorType::New();
interpolator->SetSplineOrder(3);
ResamplerType::Pointer resampler = ResamplerType::New();
resampler->SetTransform( identityTransform );
resampler->SetInterpolator( interpolator );
resampler->SetOutputOrigin( this->imageOrigin );
double spacing[3];
spacing[0] = this->imageSpacing[0];
spacing[1] = this->imageSpacing[1];
spacing[2] = this->imageSpacing[2];
resampler->SetOutputSpacing( spacing);
resampler->SetInput( img );
resampler->SetOutputDirection( img->GetDirection() );
itk::Size<3> size;
size[0] = this->imageSize[0];
size[1] = this->imageSize[1];
size[2] = this->imageSize[2];
resampler->SetSize( size );
resampler->Update();
// inline DeepCopy
output->SetRegions( resampler->GetOutput()->GetLargestPossibleRegion() );
output->Allocate();
output->SetOrigin( resampler->GetOutput()->GetOrigin());
output->SetSpacing( resampler->GetOutput()->GetSpacing());
output->SetDirection( resampler->GetOutput()->GetDirection());
itk::ImageRegionConstIterator < itk::Image<PixelType,3> > inputIt( resampler->GetOutput(), resampler->GetOutput()->GetLargestPossibleRegion() );
itk::ImageRegionIterator < itk::Image<PixelType,3> > outputIt(output, output->GetLargestPossibleRegion() );
while( !inputIt.IsAtEnd() )
{
outputIt.Set( inputIt.Get() );
++inputIt;
++outputIt;
}
}
/* Prateep */
void saveFidsViaSuperVoxelDistanceMap(itk::Image<int,3>::Pointer fidsVolume, std::string prefix, double r = 1.0)
{
itk::Image<double,3>::Pointer distMap = itk::Image<double,3>::New();
distMap->SetRegions( fidsVolume->GetLargestPossibleRegion() );
distMap->Allocate();
distMap->SetOrigin( fidsVolume->GetOrigin() );
distMap->SetSpacing( fidsVolume->GetSpacing() );
distMap->SetDirection( fidsVolume->GetDirection() );
itk::ImageRegionConstIteratorWithIndex< itk::Image<int,3> > fidsVolumeIt(fidsVolume, fidsVolume->GetLargestPossibleRegion() );
itk::ImageRegionIterator< itk::Image<double,3> > distMapIt(distMap, distMap->GetLargestPossibleRegion() );
fidsVolumeIt.GoToBegin();
distMapIt.GoToBegin();
while(!fidsVolumeIt.IsAtEnd()) {
int fid = fidsVolumeIt.Get();
if(fid == -1) {
distMapIt.Set(-1.0);
} else {
itk::Image<int,3>::IndexType ind = fidsVolumeIt.GetIndex();
itk::Image<int,3>::PointType pp;
fidsVolume->TransformIndexToPhysicalPoint(ind, pp);
point p, junk;
for(int ii = 0; ii < 3; ii++) p[ii] = pp[ii];
double d = this->pointTriangleDistance(p, this->faces[fid], junk);
distMapIt.Set(d);
}
++fidsVolumeIt;
++distMapIt;
}
itk::ImageFileWriter< itk::Image<double, 3> >::Pointer w =
itk::ImageFileWriter< itk::Image<double, 3> >::New();
std::stringstream ss; ss << r;
std::string f = prefix + ".fidsSV_distMap_r" + ss.str() + ".nrrd";
w->SetFileName( f.c_str() );
w->SetInput( distMap );
w->SetUseCompression(true);
w->Update();
}
/* Prateep */
void saveFidsViaKDtreeDistanceMap(itk::Image<PixelType,3>::Pointer fidsVolume, std::string prefix)
{
itk::Image<PixelType,3>::Pointer distMap = itk::Image<PixelType,3>::New();
distMap->SetRegions( fidsVolume->GetLargestPossibleRegion() );
distMap->Allocate();
distMap->SetOrigin( fidsVolume->GetOrigin() );
distMap->SetSpacing( fidsVolume->GetSpacing() );
distMap->SetDirection( fidsVolume->GetDirection() );
itk::ImageRegionConstIteratorWithIndex< itk::Image<PixelType,3> > fidsVolumeIt(fidsVolume, fidsVolume->GetLargestPossibleRegion() );
itk::ImageRegionIterator< itk::Image<PixelType,3> > distMapIt(distMap, distMap->GetLargestPossibleRegion() );
fidsVolumeIt.GoToBegin();
distMapIt.GoToBegin();
while(!fidsVolumeIt.IsAtEnd()) {
int fid = fidsVolumeIt.Get();
if(fid == -1) {
distMapIt.Set(-1.0);
} else {
itk::Image<int,3>::IndexType ind = fidsVolumeIt.GetIndex();
itk::Image<int,3>::PointType pp;
fidsVolume->TransformIndexToPhysicalPoint(ind, pp);
point p, junk;
for(int ii = 0; ii < 3; ii++) p[ii] = pp[ii];
double d = this->pointTriangleDistance(p, this->faces[fid], junk);
// (a) get face normal
vec v0 = this->vertices[this->faces[fid].v[0]];
vec nv0 = this->normals[this->faces[fid].v[0]];
vec v1 = this->vertices[this->faces[fid].v[1]];
vec v2 = this->vertices[this->faces[fid].v[2]];
vec facenormal = (v1 - v0) CROSS (v2 - v0);
float dot1 = facenormal DOT (nv0);
if(dot1 < 0.0f ) facenormal = -facenormal;
// (b) get sign
vec c = (v0 + v1 + v2); c /= 3.f;
float dot2 = (c - p) DOT facenormal;
double sign;
if(dot2 < 0.0f) sign = -1.0f;
else sign = 1.0f;
distMapIt.Set(sign*d);
}
++fidsVolumeIt;
++distMapIt;
}
itk::Image<PixelType,3>::Pointer origDistMap = itk::Image<PixelType,3>::New();
resampleDistanceMap(distMap, origDistMap);
itk::ImageFileWriter< itk::Image<PixelType, 3> >::Pointer w =
itk::ImageFileWriter< itk::Image<PixelType, 3> >::New();
std::string f = prefix + ".fidsKD_distMap" + ".nrrd";
w->SetFileName( f.c_str() );
w->SetInput( origDistMap );
w->SetUseCompression(true);
w->Update();
}
int FindNearestVertex(point pt)
{
if ( !kd )
{
kd = new KDtree(this->vertices);
}
if (maxEdgeLength == 0.0)
{
need_maxedgelength();
}
const float *match = kd->closest_to_pt(pt,100000.0*sqr(maxEdgeLength)); // SHIREEN - enlargen the neighborhood size for kdtree to find a match
int imatch = 0;
if (!match)
{
std::cout << "failed to find vertex within " << maxEdgeLength << " for point " << pt << ". using vertex 0" << std::endl;
return imatch;
}
imatch = (match - (const float *) &(vertices[0][0])) / 3;
return imatch;
}
// SHIREEN
float cotangent(point a, point b, point c)
{
// compute the cotangent of the non-degenerate triangle abc at vertex b
vec3 ba = (vec3) (a - b);
vec3 bc = (vec3) (c - b);
float cot = (bc DOT ba) / (EPS + len(bc CROSS ba));
return cot;
}
vec3 ComputeGeneralizedBarycentricCoordinates(point p, Face f) // suffers from numerical instability with very small faces, even worth than face area way
{
// this assumes that p is strictly within the given face
// this implementation is based on the below paper to handle numerical instability that arise in case of small area triangles:
// Meyer, Mark, Alan Barr, Haeyoung Lee, and Mathieu Desbrun. "Generalized barycentric coordinates on irregular polygons."
// Journal of graphics tools 7, no. 1 (2002): 13-22.
vec3 bCoords; bCoords.clear();
point v0,v1,v2;
v0 = this->vertices[ f.v[0] ];
v1 = this->vertices[ f.v[1] ];
v2 = this->vertices[ f.v[2] ];
point n = (v1 - v0) CROSS (v2 - v0);
normalize(n);
float area = ( (v1-v0) CROSS (v2-v0) ) DOT n ;
if (area < 0.0001) // a small face
{
float scale = 1000.0;
point center = (v0 + v1 + v2);
center[0] /= 3.0; center[1] /= 3.0; center[2] /= 3.0;
v0 = v0 - center; v0[0] *= scale; v0[1] *= scale; v0[2] *= scale;
v1 = v1 - center; v1[0] *= scale; v1[1] *= scale; v1[2] *= scale;
v2 = v2 - center; v2[0] *= scale; v2[1] *= scale; v2[2] *= scale;
}
point vcur, vprev, vnext;
vec3 curedge;
// the edge connecting p and v0
vcur = v0; vprev = v2; vnext = v1;
curedge = (vec3) (p - vcur);
bCoords[0] = ( cotangent(p, vcur, vprev) + cotangent(p, vcur, vnext) ) / len2(curedge);
// the edge connecting p and v1
vcur = v1; vprev = v0; vnext = v2;
curedge = (vec3) (p - vcur);
bCoords[1] = ( cotangent(p, vcur, vprev) + cotangent(p, vcur, vnext) ) / len2(curedge);
// the edge connecting p and v2
vcur = v2; vprev = v1; vnext = v0;
curedge = (vec3) (p - vcur);
bCoords[2] = ( cotangent(p, vcur, vprev) + cotangent(p, vcur, vnext) ) / len2(curedge);
float sum = bCoords.sum();
bCoords[0] /= sum;
bCoords[1] /= sum;
bCoords[2] /= sum;
return bCoords;
}
// end SHIREEN
vec3 ComputeBarycentricCoordinates2(point p, Face f)
{
vec3 bCoords; bCoords.clear();
point a,b,c;
a = this->vertices[ f.v[0] ];
b = this->vertices[ f.v[1] ];
c = this->vertices[ f.v[2] ];
point n = (b - a) CROSS (c - a);
normalize(n);
float denominator = ( (b - a) CROSS (c - a) ) DOT n;
bCoords[0] = ( ( (c - b) CROSS (p - b) ) DOT n ) / denominator;
bCoords[1] = ( ( (a - c) CROSS (p - c) ) DOT n ) / denominator;
bCoords[2] = ( ( (b - a) CROSS (p - a) ) DOT n ) / denominator;
return bCoords;
}
vec3 ComputeBarycentricCoordinates(point p, Face f)//, bool useGeneralized = false)
{
//if (useGeneralized)
// return ComputeGeneralizedBarycentricCoordinates(p,f);
vec3 bCoords; bCoords.clear();
point a,b,c;
a = this->vertices[ f.v[0] ];
b = this->vertices[ f.v[1] ];
c = this->vertices[ f.v[2] ];
point n = (b - a) CROSS (c - a);
normalize(n);
float area = ( (b-a) CROSS (c-a) ) DOT n ;
float inv_area = 1.0f / (area + EPS);
// shireen
// if (area < 0.0001) // a small face
// {
// float scale = 1000.0;
// point center = (a + b + c);
// center[0] /= 3.0; center[1] /= 3.0; center[2] /= 3.0;
// a = a - center; a[0] *= scale; a[1] *= scale; a[2] *= scale;
// b = b - center; b[0] *= scale; b[1] *= scale; b[2] *= scale;
// c = c - center; c[0] *= scale; c[1] *= scale; c[2] *= scale;
// point n = (b-a) CROSS (c-a);
// normalize(n);
// area = ( (b-a) CROSS (c-a) ) DOT n;
// //vec3 ab = (vec3) (b-a);
// //vec3 ac = (vec3) (c-a);
// //area = len(ab CROSS ac)/2.0; // half the area of the parallelogram constructed by ab and ac
// inv_area = 1 / (area + EPS);
// }
// /* Prateep */ // this made the situation worse in case of projecting particles very near or almost on the surface
// vec3 pb = (vec3)(p - b); vec3 pa = (vec3)(p-a); vec3 pc = (vec3)(p-c);
// float normpb = len2(pb); float normpa = len2(pa); float normpc = len2(pc);
// float tot = (normpa + normpb + normpc);
// normpa = normpa / tot;
// normpb = normpb / tot;
// normpc = normpc / tot;
// if(IsCloseToAnEdge(p,f)) {
// // interpolate
// bCoords[0] = normpa;
// bCoords[1] = normpb;
// bCoords[2] = normpc;
// return bCoords;
// }
// shireen
bCoords[0] = ( ( (c - b) CROSS (p - b) ) DOT n ) * inv_area; // * areaInvPerTri[f];map <face, double> didnot work
bCoords[1] = ( ( (a - c) CROSS (p - c) ) DOT n ) * inv_area; // * areaInvPerTri[f];map <face, double> didnot work
bCoords[2] = ( ( (b - a) CROSS (p - a) ) DOT n ) * inv_area; // * areaInvPerTri[f];map <face, double> didnot work
float sum = bCoords.sum();
bCoords[0] /= sum;
bCoords[1] /= sum;
bCoords[2] /= sum;
return bCoords;
}
// vec3 ComputeBarycentricCoordinates(point p, Face f)
// {
// vec3 bCoords; bCoords.clear();
// point a,b,c;
// a = this->vertices[ f.v[0] ];
// b = this->vertices[ f.v[1] ];
// c = this->vertices[ f.v[2] ];
// point n = (b - a) CROSS (c - a);
// normalize(n);
// float denominator = ( (b - a) CROSS (c - a) ) DOT n;
// // if (denominator < 0.0001) // small face
// // {
// // float scale = 1000.0;
// // point center = (a + b + c);
// // center[0] /= 3.0; center[1] /= 3.0; center[2] /= 3.0;
// // a = a - center; a[0] *= scale; a[1] *= scale; a[2] *= scale;
// // b = b - center; b[0] *= scale; b[1] *= scale; b[2] *= scale;
// // c = c - center; c[0] *= scale; c[1] *= scale; c[2] *= scale;
// // }
// // denominator = ( (b - a) CROSS (c - a) ) DOT n;
// bCoords[0] = ( ( (c - b) CROSS (p - b) ) DOT n ) / (denominator + 1e-7);
// bCoords[1] = ( ( (a - c) CROSS (p - c) ) DOT n ) / (denominator + 1e-7);
// bCoords[2] = ( ( (b - a) CROSS (p - a) ) DOT n ) / (denominator + 1e-7);
// // // Transcribed from Christer Ericson's Real-Time Collision Detection
// // vec3 v0 = b - a, v1 = c - a, v2 = p - a;
// // float d00 = v0 DOT v0;
// // float d01 = v0 DOT v1;
// // float d11 = v1 DOT v1;
// // float d20 = v2 DOT v0;
// // float d21 = v2 DOT v1;
// // float Denom = (d00 * d11 - d01 * d01);
// // float invDenom = 1.0 / (d00 * d11 - d01 * d01);
// // bCoords[1] = (d11 * d20 - d01 * d21) * invDenom;
// // bCoords[2] = (d00 * d21 - d01 * d20) * invDenom;
// // bCoords[0] = 1.0f - bCoords[1] - bCoords[2];
// return bCoords;
// }
/* Prateep */
bool IsCloseToAnEdge(point p, Face f)
{
point a,b,c;
a = this->vertices[ f.v[0] ];
b = this->vertices[ f.v[1] ];
c = this->vertices[ f.v[2] ];
vec3 pb = (vec3)(p - b);
vec3 pa = (vec3)(p - a);
vec3 pc = (vec3)(p - c);
float norma; float normb;
// check if p is \epsilon close to edge BC
vec3 bc = (vec3)(c-b);
vec3 tb = (vec3)( bc CROSS pb );
norma = len2(tb);
normb = len2(bc);
if(norma < EPS * normb) {
return true;
}
// check if p is \epsilon close to edge AC
vec3 ac = (vec3)(a-c);
vec3 tc = (vec3)( ac CROSS pc );
norma = len2(tc);
normb = len2(ac);
if(norma < EPS * normb) {
return true;
}
// check if p is \epsilon close to edge AB
vec3 ab = (vec3)(b-a);
vec3 ta = (vec3)( ab CROSS pa );
norma = len2(ta);
normb = len2(ab);
if(norma < EPS * normb) {
return true;
}
return false;
}
// // map< face, ...> didnot work
// /* Prateep */
// void CacheFaceIds() // shireen
// {
// faceids.clear();
// for(unsigned int i = 0; i < this->faces.size(); i++)
// {
// Face f = this->faces[i];
// faceids[f] = i;
// }
// }
// void CacheAreaInvPerTriangle()
// {
// areaInvPerTri.clear();
// for(unsigned int i = 0; i < this->faces.size(); i++)
// {
// Face f = this->faces[i];
// point a,b,c;
// a = this->vertices[ f.v[0] ];
// b = this->vertices[ f.v[1] ];
// c = this->vertices[ f.v[2] ];
// point n = (b-a) CROSS (c-a);
// normalize(n);
// float denominator = ( (b-a) CROSS (c-a) ) DOT n;
// float inv_area = 1.0f / (denominator + EPS);
// areaInvPerTri.push_back((double)inv_area); // shireen
// // // SHIREEN
// // vec3 ab = (vec3) (b-a);
// // vec3 ac = (vec3) (c-a);
// // float area = len(ab CROSS ac)/2.0; // half the area of the parallelogram constructed by ab and ac
// // areaInvPerTri[f] = 1.0f / (area + EPS);
// }
// }
// // SHIREEN
// void CacheAreaPerTriangle()
// {
// areaPerTri.clear();
// for(unsigned int i = 0; i < this->faces.size(); i++)
// {
// Face f = this->faces[i];
// point a,b,c;
// a = this->vertices[ f.v[0] ];
// b = this->vertices[ f.v[1] ];
// c = this->vertices[ f.v[2] ];
// // // shireen
// // vec3 ab = (vec3) (b-a);
// // vec3 ac = (vec3) (c-a);
// // float area = len(ab CROSS ac)/2.0; // half the area of the parallelogram constructed by ab and ac
// // areaPerTri[f] = area;
// point n = (b-a) CROSS (c-a);
// normalize(n);
// float denominator = EPS + (( (b-a) CROSS (c-a) ) DOT n);
// areaPerTri.push_back((double)denominator); // shireen
// //areaPerTri[f] = (double)denominator;
// }
// }
// // end SHIREEN
void ReadFeatureFromFile(const char *infilename)
{
std::ifstream infile(infilename, std::ios::binary);
if (!infile.is_open())
{
std::cerr << "File Not Found: " << infilename << std::endl;
throw(1);
}
else
{
// read # vertices
unsigned int numVert;
infile.read(reinterpret_cast<char *>(&numVert), sizeof(unsigned int));
if ( numVert != this->vertices.size() )
{
std::cerr << "size of feature vector does not match # vertices in mesh" << std::endl;
throw(1);
}
else
{
// std::cout << "reading feature from file " << infilename << std::endl;
vector< float > tmpFeatureVec;
// loop over vertices
for (int i = 0; i < numVert; i++)
{
// read feature value
float value;
infile.read( reinterpret_cast<char *>(&value), sizeof(float) );
tmpFeatureVec.push_back(value);
}
this->features.push_back( tmpFeatureVec );
}
infile.close();
}
}
void ReadFeatureFromList(const char *infilename)
{
std::ifstream infile(infilename);
if (!infile.is_open())
{
std::cerr << "File Not Found" << std::endl;
throw(1); //exit(1);
}
else
{
// std::cout << "reading feature from file " << infilename << std::endl;
vector< float > tmpFeatureVec;
float value;
// loop over vertices
while (infile)
{
// read feature value
infile >> value;
tmpFeatureVec.push_back(value);
}
tmpFeatureVec.pop_back();
if ( tmpFeatureVec.size() == this->vertices.size() )
{
this->features.push_back( tmpFeatureVec );
}
else
{
std::cerr << "size of feature vector does not match # vertices in the mesh ! Aborting..." << std::endl;
throw(1);//exit(1);
}
infile.close();
}
}
/* Praful */
void ReadFeatureGradientFromFile(const char *infilename)
{
std::ifstream infile(infilename);
if (!infile.is_open())
{
std::cerr << "File Not Found" << std::endl;
throw(1);//exit(1);
}
else
{
// read # vertices
unsigned int numVert;
infile.read(reinterpret_cast<char *>(&numVert), sizeof(unsigned int));
if ( numVert != this->vertices.size() )
{
std::cerr << "size of feature vector does not match # vertices in mesh" << std::endl;
throw(1); //exit(1);
}
else
{
// std::cout << "reading feature gradient from file " << infilename << std::endl;
vector<point> tempFeatureGradVec;
// loop over vertices
for (int i = 0; i < numVert; i++)
{
// read feature gradient
point val;
float value;
for (int j = 0; j < 3; j ++)
{
infile.read( reinterpret_cast<char *>(&value), sizeof(float) );
val[j] = (float) value;
}
tempFeatureGradVec.push_back(val);
}
this->featureGradients.push_back( tempFeatureGradVec );
}
infile.close();
}
}
void WriteFeatureToFile(int featureIndex, const char *outfilename)
{
//std::ofstream outfile(outfilename, std::ios::binary);
//// write numVertices to facilitate reading later
//int numVert = this->vertices.size();
//outfile.write( reinterpret_cast<char *>(&numVert), sizeof(int) );
//// loop over each vertex
//for (int i = 0; i < numVert; i++)
//{
// // write distance to curve
// unsigned short value = this->(features[featureIndex])[i];
// outfile.write( reinterpret_cast<char *>(&value), sizeof(unsigned short) );
//}
//outfile.close();
}
/* Praful */
void GetFeatureValues(point x, std::vector<float> & vals)
{
float alphaX, betaX, gammaX;
Face triangleX;
GetTriangleInfoForPoint(x, triangleX, alphaX, betaX, gammaX);
if (alphaX < 0.000001f)
alphaX = 0.000001f;
if (betaX < 0.000001f)
betaX = 0.000001f;
if (gammaX < 0.000001f)
gammaX = 0.000001f;
alphaX /= (alphaX + betaX + gammaX);
betaX /= (alphaX + betaX + gammaX);
gammaX /= (alphaX + betaX + gammaX);
vals.resize(this->GetNumberOfFeatures());
for (unsigned int i = 0; i < this->GetNumberOfFeatures(); i++)
{
float f0 = this->features[i][ triangleX.v[0] ];
float f1 = this->features[i][ triangleX.v[1] ];
float f2 = this->features[i][ triangleX.v[2] ];
vals[i] = (alphaX * f0) + (betaX * f1) + (gammaX * f2);
}
}
/* Prateep */
float GetFeatureValue(point x, int featureIndex)
{
float alphaX, betaX, gammaX;
Face triangleX;
GetTriangleInfoForPoint(x, triangleX, alphaX, betaX, gammaX);
// SHIREEN
if (alphaX < 0.000001f)
alphaX = 0.000001f;
if (betaX < 0.000001f)
betaX = 0.000001f;
if (gammaX < 0.000001f)
gammaX = 0.000001f;
alphaX /= (alphaX + betaX + gammaX);
betaX /= (alphaX + betaX + gammaX);
gammaX /= (alphaX + betaX + gammaX);
// end SHIREEN
// interpolate feature values on triangle face
float f0 = this->features[featureIndex][ triangleX.v[0] ];
float f1 = this->features[featureIndex][ triangleX.v[1] ];
float f2 = this->features[featureIndex][ triangleX.v[2] ];
// SHIREEN
float featureValue = (alphaX * f0) + (betaX * f1) + (gammaX * f2);
// HACK(01/13/2015) : Prateep ( getting -ve featureValue. Incorrect nearest face).
// float featureValue = (f0 + f1 + f2) / 3.0;
// if(featureValue < 0.0f) {
// std::cout << "bug\n";
// }
return featureValue;
}
/* Prateep -- updated Praful */
point GetFeatureDerivative(point p, int fIndex = 0)
{
point dP; dP.clear();
dP[0] = 0.0f; dP[1] = 0.0f; dP[2] = 0.0f;
float alphaP, betaP, gammaP;
Face triangleP;
GetTriangleInfoForPoint(p, triangleP, alphaP, betaP, gammaP);
if (alphaP < 0.000001f)
alphaP = 0.000001f;
if (betaP < 0.000001f)
betaP = 0.000001f;
if (gammaP < 0.000001f)
gammaP = 0.000001f;
alphaP /= (alphaP + betaP + gammaP);
betaP /= (alphaP + betaP + gammaP);
gammaP /= (alphaP + betaP + gammaP);
// compute derivative at 3 vertices (A,B,C)
int A = triangleP.v[0];
int B = triangleP.v[1];
int C = triangleP.v[2];
// // Get derivatives of Barycentric coordinates
// vec fNorm = GetFaceNormal(triangleP);
// float mag = fNorm DOT fNorm;
// mag = std::sqrt(mag);
// fNorm[0] /= mag;
// fNorm[1] /= mag;
// fNorm[2] /= mag;
// float fArea = GetFaceArea(triangleP);
// vec v0 = this->vertices[triangleP.v[0]];
// vec v1 = this->vertices[triangleP.v[1]];
// vec v2 = this->vertices[triangleP.v[2]];
// vec dAlpha = GetGradientBaryCentricCoord(fNorm, v2-v1, fArea);
// vec dBeta = GetGradientBaryCentricCoord(fNorm, v0-v2, fArea);
// vec dGamma = GetGradientBaryCentricCoord(fNorm, v1-v0, fArea);
point dA = ComputeFeatureDerivative(A,fIndex);
point dB = ComputeFeatureDerivative(B,fIndex);
point dC = ComputeFeatureDerivative(C,fIndex);
// float f0 = this->features[fIndex][A];
// float f1 = this->features[fIndex][B];
// float f2 = this->features[fIndex][C];
// interpolate
dP[0] = ( alphaP * dA[0] ) + ( betaP * dB[0] ) + ( gammaP * dC[0] );// + ( dAlpha[0] * f0 ) + ( dBeta[0] * f1 ) + ( dGamma[0] * f2 );
dP[1] = ( alphaP * dA[1] ) + ( betaP * dB[1] ) + ( gammaP * dC[1] );// + ( dAlpha[1] * f0 ) + ( dBeta[1] * f1 ) + ( dGamma[1] * f2 );
dP[2] = ( alphaP * dA[2] ) + ( betaP * dB[2] ) + ( gammaP * dC[2] );// + ( dAlpha[2] * f0 ) + ( dBeta[2] * f1 ) + ( dGamma[2] * f2 );
return dP;
}
void GetFeatureDerivativeValues(point p, std::vector<point> & vals)
{
float alphaP, betaP, gammaP;
Face triangleP;
GetTriangleInfoForPoint(p, triangleP, alphaP, betaP, gammaP);
if (alphaP < 0.000001f)
alphaP = 0.000001f;
if (betaP < 0.000001f)
betaP = 0.000001f;
if (gammaP < 0.000001f)
gammaP = 0.000001f;
alphaP /= (alphaP + betaP + gammaP);
betaP /= (alphaP + betaP + gammaP);
gammaP /= (alphaP + betaP + gammaP);
// compute derivative at 3 vertices (A,B,C)
int A = triangleP.v[0];
int B = triangleP.v[1];
int C = triangleP.v[2];
// // Get derivatives of Barycentric coordinates
// vec fNorm = GetFaceNormal(triangleP);
// float fArea = GetFaceArea(triangleP);
// vec v0 = this->vertices[triangleP.v[0]];
// vec v1 = this->vertices[triangleP.v[1]];
// vec v2 = this->vertices[triangleP.v[2]];
// vec dAlpha = GetGradientBaryCentricCoord(fNorm, v2-v1, fArea);
// vec dBeta = GetGradientBaryCentricCoord(fNorm, v0-v2, fArea);
// vec dGamma = GetGradientBaryCentricCoord(fNorm, v1-v0, fArea);
// compute final derivatives
vals.resize(this->GetNumberOfFeatures());
point dP; dP.clear();
for (unsigned int fIndex = 0; fIndex < this->GetNumberOfFeatures(); fIndex++)
{
point dA = ComputeFeatureDerivative(A,fIndex);
point dB = ComputeFeatureDerivative(B,fIndex);
point dC = ComputeFeatureDerivative(C,fIndex);
// float f0 = this->features[fIndex][A];
// float f1 = this->features[fIndex][B];
// float f2 = this->features[fIndex][C];
dP[0] = ( alphaP * dA[0] ) + ( betaP * dB[0] ) + ( gammaP * dC[0] );// + ( dAlpha[0] * f0 ) + ( dBeta[0] * f1 ) + ( dGamma[0] * f2 );
dP[1] = ( alphaP * dA[1] ) + ( betaP * dB[1] ) + ( gammaP * dC[1] );// + ( dAlpha[1] * f0 ) + ( dBeta[1] * f1 ) + ( dGamma[1] * f2 );
dP[2] = ( alphaP * dA[2] ) + ( betaP * dB[2] ) + ( gammaP * dC[2] );// + ( dAlpha[2] * f0 ) + ( dBeta[2] * f1 ) + ( dGamma[2] * f2 );
vals[fIndex] = dP;
dP.clear();
}
}
/* Praful */
vec GetGradientBaryCentricCoord(vec fNorm, vec edge, float fArea)
{
vec gradB = edge CROSS fNorm;
gradB[0] /= 2.0*fArea;
gradB[1] /= 2.0*fArea;
gradB[2] /= 2.0*fArea;
return gradB;
}
float GetFaceArea(int fidP)
{
return GetFaceArea(this->faces[fidP]);
}
float GetFaceArea(Face fidP)
{
vec fn = GetFaceNormal(fidP);
float val = fn DOT fn;
val = 0.5*std::sqrt(val);
return val;
}
vec GetFaceNormal(int fidP)
{
return GetFaceNormal(this->faces[fidP]);
}
vec GetFaceNormal(Face fidP)
{
vec v0 = this->vertices[fidP.v[0]];
vec nv0 = this->normals[fidP.v[0]];
vec v1 = this->vertices[fidP.v[1]];
vec v2 = this->vertices[fidP.v[2]];
vec facenormal = (v1 - v0) CROSS (v2 - v0);
float dot1 = facenormal DOT (nv0);
if(dot1 < 0.0f ) facenormal = -facenormal;
return facenormal;
}
/* Praful */
point ComputeFeatureDerivative(int v,int nFeature = 0)
{
if (featureGradients.size() > 0)
return featureGradients[nFeature][v];
else
{
point df; df.clear();
df[0] = 0.0f; df[1] = 0.0f; df[2] = 0.0f;
// feature value at v
float valueV = this->features[nFeature][v];
point ptV = this->vertices[v];
// iterate over neighbors of v to compute derivative as central difference
for (unsigned int n = 0; n < this->neighbors[v].size(); n++)
{
int indexN = this->neighbors[v][n];
float valueN = this->features[nFeature][indexN];
point ptN = this->vertices[indexN];
float valueDiff = valueN - valueV;
point ptDiff = ptN - ptV;
df[0] = df[0] + valueDiff / (ptDiff[0] + 0.0001f);
df[1] = df[1] + valueDiff / (ptDiff[1] + 0.0001f);
df[2] = df[2] + valueDiff / (ptDiff[2] + 0.0001f);
}
df[0] = df[0] / (float) ( this->neighbors[v].size() );
df[1] = df[1] / (float) ( this->neighbors[v].size() );
df[2] = df[2] / (float) ( this->neighbors[v].size() );
return df;
}
}
int GetNumberOfFeatures()
{
return this->features.size();
}
// Debugging printout, controllable by a "verbose"ness parameter
static int verbose;
static void set_verbose(int);
static void (*dprintf_hook)(const char *);
static void set_dprintf_hook(void (*hook)(const char *));
static void dprintf(const char *format, ...);
// Same as above, but fatal-error printout
static void (*eprintf_hook)(const char *);
static void set_eprintf_hook(void (*hook)(const char *));
static void eprintf(const char *format, ...);
// Constructor
TriMesh() : grid_width(-1), grid_height(-1), flag_curr(0), speedType(ONE), maxEdgeLength(0.0)
{
//iMap = &geoIndex;
//dMap = &geoMap;
kd = NULL;
}
virtual ~TriMesh(){}
};
inline const TriMesh::BBox operator + (const TriMesh::BBox &b, const point &p)
{
return TriMesh::BBox(b) += p;
}
inline const TriMesh::BBox operator + (const point &p, const TriMesh::BBox &b)
{
return TriMesh::BBox(b) += p;
}
inline const TriMesh::BBox operator + (const TriMesh::BBox &b1, const TriMesh::BBox &b2)
{
return TriMesh::BBox(b1) += b2;
}
#endif
|
deadlock.c | #include <omp.h>
#include <stdio.h>
omp_lock_t A, B;
void T1() {
printf("Task 1: before locking A\n");
omp_set_lock(&A);
printf("Task 1: after locking A\n");
sleep(2);
printf("Task 1: before locking B\n");
omp_set_lock(&B);
printf("Task 1: after locking B\n");
printf("Task 1\n");
omp_unset_lock(&B);
omp_unset_lock(&A);
}
void T2() {
printf("Task 2: before locking B\n");
omp_set_lock(&B);
printf("Task 2: after locking B\n");
printf("Task 2: before locking A\n");
omp_set_lock(&A);
printf("Task 2: after locking A\n");
printf("Task 2\n");
omp_unset_lock(&A);
omp_unset_lock(&B);
}
int main()
{
omp_init_lock(&A);
omp_init_lock(&B);
#pragma omp parallel
{
#pragma omp single
{
#pragma omp task
T1();
#pragma omp task
T2();
}
}
}
|
GB_binop__lt_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lt_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__lt_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__lt_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__lt_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_int16)
// A*D function (colscale): GB (_AxD__lt_int16)
// D*A function (rowscale): GB (_DxB__lt_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__lt_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__lt_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_int16)
// C=scalar+B GB (_bind1st__lt_int16)
// C=scalar+B' GB (_bind1st_tran__lt_int16)
// C=A+scalar GB (_bind2nd__lt_int16)
// C=A'+scalar GB (_bind2nd_tran__lt_int16)
// C type: bool
// A type: int16_t
// A pattern? 0
// B type: int16_t
// B pattern? 0
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LT || GxB_NO_INT16 || GxB_NO_LT_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__lt_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lt_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lt_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lt_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lt_int16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lt_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lt_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lt_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lt_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lt_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lt_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lt_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__lt_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__lt_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
conv_dilate.c | /*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include "MKLDNN.h"
void im2col_cpu(
const float* data_im,
float* data_col,
const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w);
void forward_dilated_conv(
float* input,
float* output,
float* weight,
float* bias, float* input_col,
int N, int inC, int inH, int inW,
int kH, int kW,
int dH, int dW,
int padH, int padW,
int dilH, int dilW,
int outC, int outH, int outW)
{
const long input_batch_size = inC * inH * inW;
const long output_batch_size = outC * outH * outW;
const int dil_kernel_h = (kH - 1) * dilH + 1;
const int dil_kernel_w = (kW - 1) * dilW + 1;
const int colH = (inH + 2 * padH - dil_kernel_h) / dH + 1;
const int colW = (inW + 2 * padW - dil_kernel_w) / dW + 1;
const long map_size = outH * outW;
//temp transpose weight
const long width = inC*kH*kW;
float* weight_t = malloc(outC*width*sizeof(float));
mkl_somatcopy('r', 't', width, outC, 1.0, weight, outC, weight_t, width);
for (int n = 0; n < N; ++n)
{
float* input_batch = input + n * input_batch_size;
float* output_batch = output + n * output_batch_size;
float* input_col_batch = input_col + n * inC * kH * kW * map_size;
im2col_cpu(input_batch, input_col_batch,
inC, inH, inW, kH, kW, padH, padW, dH, dW, dilH, dilW);
if(bias)
{
// add bias to each output channel
for(int c = 0; c < outC; ++c)
{
float* output_ch = output_batch + c * map_size;
for(int i = 0; i < map_size; ++i)
output_ch[i] = bias[c];
}
}
else
{
// add bias to each output channel
for(int c = 0; c < outC; ++c)
{
float* output_ch = output_batch + c * map_size;
for(int i = 0; i < map_size; ++i)
output_ch[i] = 0.0;
}
}
cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, outC, map_size, width, 1.0, weight_t,
width, input_col_batch, map_size, 1.0, output_batch, map_size);
}
free(weight_t);
}
void col2im_cpu(const float* data_col, float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w);
void backward_dilated_conv(
float* gradout,
float* gradin,
float* weight,
float* gradin_col,
int N, int inC, int inH, int inW,
int kH, int kW,
int dH, int dW,
int padH, int padW,
int dilH, int dilW,
int outC, int outH, int outW)
{
const long width = inC * kH * kW;
const long input_batch_size = inC * inH * inW;
const long output_batch_size = outC * outH * outW;
const int dil_kernel_h = (kH - 1) * dilH + 1;
const int dil_kernel_w = (kW - 1) * dilW + 1;
const int colH = (inH + 2 * padH - dil_kernel_h) / dH + 1;
const int colW = (inW + 2 * padW - dil_kernel_w) / dW + 1;
const long map_size = outH * outW;
for (int n = 0; n < N; ++n)
{
float* gradout_batch = gradout + n * output_batch_size;
cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, width, map_size, outC, 1.0, weight,
outC, gradout_batch, map_size, 0.0, gradin_col, map_size);
float* gradin_batch = gradin + n * input_batch_size;
col2im_cpu(gradin_col, gradin_batch,
inC, inH, inW, kH, kW, padH, padW, dH, dW, dilH, dilW);
}
}
// bdata_dilated_conv(gradout_NCHW, gradbias_ptr, gradweight_ptr, input_column,
void bfilter_dilated_conv(
float* gradout,
float* grad_bias,
float* grad_weight,
float* input_col,
int N, int inC, int inH, int inW,
int kH, int kW,
int dH, int dW,
int padH, int padW,
int dilH, int dilW,
int outC, int outH, int outW)
{
const long width = inC * kH * kW;
const long input_batch_size = width*outH*outW;
const long output_batch_size = outC * outH * outW;
const int dil_kernel_h = (kH - 1) * dilH + 1;
const int dil_kernel_w = (kW - 1) * dilW + 1;
const int colH = (inH + 2 * padH - dil_kernel_h) / dH + 1;
const int colW = (inW + 2 * padW - dil_kernel_w) / dW + 1;
const long output_mapsize = outH * outW;
//zero the grad
for (long i = 0; i < outC*width; ++i)
{
grad_weight[i] = 0;
}
//temp transpose input_column
float* gradout_batch_t = malloc(output_batch_size * sizeof(float));
for (int n = 0; n < N; ++n)
{
float* gradout_batch = gradout + n * output_batch_size;
float* input_batch = input_col + n * input_batch_size;
mkl_somatcopy('r', 't', outC, output_mapsize, 1.0, gradout_batch, output_mapsize, gradout_batch_t, outC);
cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, width, outC, output_mapsize, 1.0, input_batch, output_mapsize,
gradout_batch_t, outC, 1.0, grad_weight, outC);
}
//for grad_bias
if(grad_bias != NULL)
{
for(int i=0; i<outC; ++i)
grad_bias[i] = 0;
for(int b=0; b<N; ++b)
{
for(int c=0; c<outC; ++c)
{
for(long s=0; s<output_mapsize; ++s)
{
grad_bias[c] += gradout[b * output_batch_size + c * output_mapsize + s];
}
}
}
}
free(gradout_batch_t);
}
void im2col_cpu(
const float* data_im,
float* data_col,
const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w)
{
int dil_kernel_h = (kernel_h - 1) * dilation_h + 1;
int dil_kernel_w = (kernel_w - 1) * dilation_w + 1;
int height_col = (height + 2 * pad_h - dil_kernel_h) / stride_h + 1;
int width_col = (width + 2 * pad_w - dil_kernel_w) / stride_w + 1;
int channels_col = channels * kernel_h * kernel_w;
#pragma omp parallel for if (channels_col > 1)
for (int c = 0; c < channels_col; ++c)
{
int w_offset = c % kernel_w;
int h_offset = (c / kernel_w) % kernel_h;
int c_im = c / kernel_h / kernel_w;
const int hc0 = h_offset * dilation_h - pad_h;
const int wc0 = w_offset * dilation_w - pad_w;
for (int h = 0; h < height_col; ++h)
{
int h_pad = h * stride_h + hc0;
const int row_offset = (c * height_col + h) * width_col;
const int srow_offset = (c_im * height + h_pad) * width;
for (int w = 0; w < width_col; ++w)
{
int w_pad = w * stride_w + wc0;
if ((((unsigned)h_pad) < ((unsigned)height)) && (((unsigned)w_pad) < ((unsigned)width)))
data_col[row_offset + w] = data_im[srow_offset + w_pad];
else
data_col[row_offset + w] = 0.;
}
}
}
}
void col2im_cpu(
const float* data_col,
float* data_im,
const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w)
{
int dil_patch_h = (kernel_h - 1) * dilation_h + 1;
int dil_patch_w = (kernel_w - 1) * dilation_w + 1;
int height_col = (height + 2 * pad_h - dil_patch_h) / stride_h + 1;
int width_col = (width + 2 * pad_w - dil_patch_w) / stride_w + 1;
long chunk_len = kernel_h * kernel_w;
const long length = height * width * channels;
#pragma omp parallel for
for (long i = 0; i < length; ++i)
{
data_im[i] = 0;
}
#pragma omp parallel for if (channels > 1)
for (int idx = 0; idx < channels; ++idx)
{
for (int inner_idx = 0; inner_idx < chunk_len; ++inner_idx)
{
int c = idx * chunk_len + inner_idx;
int w_offset = c % kernel_w;
int h_offset = (c / kernel_w) % kernel_h;
int c_im = c / kernel_h / kernel_w;
const int hc0 = h_offset * dilation_h - pad_h;
const int wc0 = w_offset * dilation_w - pad_w;
for (int h = 0; h < height_col; ++h)
{
for (int w = 0; w < width_col; ++w)
{
int h_pad = h * stride_h + hc0;
const int srow_offset = (c_im * height + h_pad) * width;
const int row_offset = (c * height_col + h) * width_col;
int w_pad = w * stride_w + wc0;
if ((((unsigned)h_pad) < ((unsigned)height)) && (((unsigned)w_pad) < ((unsigned)width)))
data_im[srow_offset + w_pad] += data_col[row_offset + w];
}
}
}
}
} |
pw_multithread.c | #include <omp.h>
#include <papi_wrapper.h>
#include <stdio.h>
#include <stdlib.h>
#include "test_lib.h"
int
main()
{
long long N = 10;
double x[N];
pw_init_instruments;
#pragma omp parallel
{
pw_start_instruments_loop(omp_get_thread_num());
for (int i = 0; i < N; ++i)
{
x[i] = i * 42.3;
x[i] = i / 29.8;
}
pw_stop_instruments_loop(omp_get_thread_num());
}
pw_print_instruments;
/* avoid code elimination */
for (int i = 0; i < N; ++i)
{
if (i % 1000000 == 0)
{
printf("x[%d]\t%f\n", i, x[i]);
}
}
return pw_test_pass(__FILE__);
}
|
equation_groupnorm.c | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/libxsmm/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Evangelos Georganas (Intel Corp.)
******************************************************************************/
#include <libxsmm.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <math.h>
#include <omp.h>
#define ALIGNDOWN(N, A) ((N) & ~((A)-1))
#define USE_VECTORIZED_PATH 1
float upconvert_bf16(libxsmm_bfloat16 x) {
union libxsmm_bfloat16_hp bf16_hp;
bf16_hp.i[1] = x;
bf16_hp.i[0] = 0;
return bf16_hp.f;
}
void tpp_groupnorm_fwd_fp32(long NP, long CP, long HW, long CB, long G, long num_HW_blocks, float *pinp, float *pgamma, float *pbeta, float *mean, float *var, float *pout,
libxsmm_matrix_eqn_function func10, libxsmm_meltwfunction_unary reduce_HW_kernel, libxsmm_meltwfunction_unary reduce_rows_kernel,
libxsmm_meltwfunction_unary reduce_groups_kernel, libxsmm_meltwfunction_unary all_zero_G_kernel, libxsmm_meltwfunction_unary all_zero_kernel, libxsmm_meltwfunction_binary add_kernel, float eps) {
LIBXSMM_VLA_DECL(4, float, inp, pinp, CP, HW, CB); /* [NP, CP, HW, CB] */
LIBXSMM_VLA_DECL(4, float, out, pout, CP, HW, CB);
LIBXSMM_VLA_DECL(2, float, gamma, pgamma, CB); /* [CP,CB] */
LIBXSMM_VLA_DECL(2, float, beta, pbeta, CB); /* [CP,CB] */
int np, group_size;
group_size = (CP*CB)/G;
if (group_size <= CB){
int cp;
#pragma omp parallel for collapse(2)
for(np = 0; np < NP; np++){
for (cp = 0; cp < CP; cp++){
LIBXSMM_ALIGNED(float tmp[2*CB], 64);
LIBXSMM_ALIGNED(float sum_X[G], 64);
LIBXSMM_ALIGNED(float sum_X2[G], 64);
LIBXSMM_ALIGNED(float s[CB], 64);
LIBXSMM_ALIGNED(float b[CB], 64);
int i, j, hwb, g;
libxsmm_matrix_eqn_param eqn_param;
libxsmm_meltw_unary_param m_reduce_groups_params, v_reduce_groups_params, reduce_HW_params;
libxsmm_meltw_unary_param all_zero_param;
libxsmm_meltw_binary_param add_param;
libxsmm_matrix_arg arg_array[5];
all_zero_param.out.primary = tmp;
all_zero_kernel(&all_zero_param);
all_zero_param.out.primary = &tmp[CB];
all_zero_kernel(&all_zero_param);
all_zero_param.out.primary = sum_X;
all_zero_G_kernel(&all_zero_param);
all_zero_param.out.primary = sum_X2;
all_zero_G_kernel(&all_zero_param);
/*************************** Process entire block code *****************************/
LIBXSMM_ALIGNED(float new_tmp[2*CB], 64);
reduce_HW_params.out.primary = new_tmp; /* [2*CB] */
for(hwb=0; hwb < num_HW_blocks; hwb++){
reduce_HW_params.in.primary = &LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW_block, CB] -----> [2 * CB] */
reduce_HW_kernel(&reduce_HW_params);
add_param.in0.primary = tmp;
add_param.in1.primary = new_tmp;
add_param.out.primary = tmp;
add_kernel(&add_param);
add_param.in0.primary = &tmp[CB];
add_param.in1.primary = &new_tmp[CB];
add_param.out.primary = &tmp[CB];
add_kernel(&add_param);
/* for (cb = 0; cb < 2*CB; cb++) { */
/* tmp[cb] += new_tmp[cb]; */
/* } */
}
for(i=0; i < CB; i += group_size){
g = (cp*CB + i)/group_size; /* determine current group */
m_reduce_groups_params.in.primary = &tmp[i];
m_reduce_groups_params.out.primary = &sum_X[g];
v_reduce_groups_params.in.primary = &tmp[CB + i];
v_reduce_groups_params.out.primary = &sum_X2[g];
reduce_groups_kernel(&m_reduce_groups_params);
reduce_groups_kernel(&v_reduce_groups_params);
mean[np*G + g] = sum_X[g] / ((float)group_size * HW);
var[np*G + g] = (sum_X2[g] / ((float)group_size * HW)) - (mean[np*G + g]*mean[np*G + g]); /* var = E[X^2] - (E[X])^2 */
for(j = 0; j < group_size; j++){
s[i + j] = 1.0f / ((float)sqrt(var[np*G + g] + eps)); /* 1/sqrt(var(X) + eps) */
b[i + j] = -1 * mean[np*G + g] * s[i + j]; /* -E[X]/sqrt(var(X) + eps) */
}
}
arg_array[1].primary = s; /* [CB] */
arg_array[2].primary = b; /* [CB] */
arg_array[3].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB); /* [CB] */
arg_array[4].primary = &LIBXSMM_VLA_ACCESS(2, beta, cp, 0, CB); /* [CB] */
for(hwb=0; hwb < num_HW_blocks; hwb++){
arg_array[0].primary = &LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW, CB] */
eqn_param.inputs = arg_array;
eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(4, out, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW, CB] */
func10(&eqn_param); /* Normalization equation -> y = ((s*x + b)*gamma + beta) */
}
}
}
}
else{ /* Case when group_size > CB */
#pragma omp parallel for
for(np = 0; np < NP; np++){
LIBXSMM_ALIGNED(float tmp[2*CB], 64);
LIBXSMM_ALIGNED(float sum_X[G], 64);
LIBXSMM_ALIGNED(float sum_X2[G], 64);
LIBXSMM_ALIGNED(float s[CP*CB], 64);
LIBXSMM_ALIGNED(float b[CP*CB], 64);
int i, j, cp, hwb, g;
float m, v;
libxsmm_matrix_eqn_param eqn_param;
libxsmm_meltw_unary_param m_reduce_rows_params, v_reduce_rows_params, m_reduce_groups_params, v_reduce_groups_params, reduce_HW_params;
libxsmm_meltw_unary_param all_zero_param;
libxsmm_meltw_binary_param add_param;
libxsmm_matrix_arg arg_array[5];
all_zero_param.out.primary = sum_X;
all_zero_G_kernel(&all_zero_param);
all_zero_param.out.primary = sum_X2;
all_zero_G_kernel(&all_zero_param);
LIBXSMM_ALIGNED(float new_tmp[2*CB], 64);
for (cp = 0; cp < CP; cp++){ /* [cp, HW, CB] */
all_zero_param.out.primary = tmp;
all_zero_kernel(&all_zero_param);
all_zero_param.out.primary = &tmp[CB];
all_zero_kernel(&all_zero_param);
/* for (cb = 0; cb < 2*CB; cb++) { */
/* tmp[cb] = 0.0f; */
/* } */
reduce_HW_params.out.primary = new_tmp; /* [2*CB] */
for(hwb=0; hwb < num_HW_blocks; hwb++){
reduce_HW_params.in.primary = &LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW, CB] -----> [2 * CB] */
reduce_HW_kernel(&reduce_HW_params);
add_param.in0.primary = tmp;
add_param.in1.primary = new_tmp;
add_param.out.primary = tmp;
add_kernel(&add_param);
add_param.in0.primary = &tmp[CB];
add_param.in1.primary = &new_tmp[CB];
add_param.out.primary = &tmp[CB];
add_kernel(&add_param);
/* #pragma omp simd */
/* for (cb = 0; cb < 2*CB; cb++) { */
/* tmp[cb] += new_tmp[cb]; */
/* } */
}
if (group_size >= CB){ /* Group size >= block size (Ex.- CP = 4, CB = 16, G = 2, group_size = 32) */
g = (cp*CB)/group_size; /* determine current group */
m_reduce_rows_params.in.primary = tmp;
m_reduce_rows_params.out.primary = &m;
v_reduce_rows_params.in.primary = &tmp[CB];
v_reduce_rows_params.out.primary = &v;
reduce_rows_kernel(&m_reduce_rows_params);
reduce_rows_kernel(&v_reduce_rows_params);
sum_X[g] += m;
sum_X2[g] += v;
}
else{ /* Group size < block size (Ex.- CP = 4, CB = 16, G = 32, group_size = 2) */
for(i=0; i < CB; i += group_size){
m_reduce_groups_params.in.primary = &tmp[i];
m_reduce_groups_params.out.primary = &sum_X[cp*(CB/group_size) + (i/group_size)];
v_reduce_groups_params.in.primary = &tmp[CB + i];
v_reduce_groups_params.out.primary = &sum_X2[cp*(CB/group_size) + (i/group_size)];
reduce_groups_kernel(&m_reduce_groups_params);
reduce_groups_kernel(&v_reduce_groups_params);
}
}
}
for(g = 0; g < G; g++){ /* mean and variance calculation */
mean[np*G + g] = sum_X[g] / ((float)group_size * HW);
var[np*G + g] = (sum_X2[g] / ((float)group_size * HW)) - (mean[np*G + g]*mean[np*G + g]); /* var = E[X^2] - (E[X])^2 */
for(j = 0; j < group_size; j++){
s[g*group_size + j] = 1.0f / ((float)sqrt(var[np*G + g] + eps)); /* 1/sqrt(var(X) + eps) */
b[g*group_size + j] = -1 * mean[np*G + g] * s[g*group_size + j]; /* -E[X]/sqrt(var(X) + eps) */
}
}
for (cp = 0; cp < CP; cp++){
arg_array[1].primary = &s[cp*CB]; /* [CB] */
arg_array[2].primary = &b[cp*CB]; /* [CB] */
arg_array[3].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB); /* [CB] */
arg_array[4].primary = &LIBXSMM_VLA_ACCESS(2, beta, cp, 0, CB); /* [CB] */
for(hwb=0; hwb < num_HW_blocks; hwb++){
arg_array[0].primary = &LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW, CB] */
eqn_param.inputs = arg_array;
eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(4, out, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW,CB] */
func10(&eqn_param); /* Normalization equation -> y = ((s*x + b)*gamma + beta) */
}
}
}
}
}
void tpp_groupnorm_fwd_bf16(long NP, long CP, long HW, long CB, long G, long num_HW_blocks, libxsmm_bfloat16 *pinp, libxsmm_bfloat16 *pgamma, libxsmm_bfloat16 *pbeta, float *mean, float *var,
libxsmm_bfloat16 *pout, libxsmm_matrix_eqn_function func10, libxsmm_meltwfunction_unary reduce_HW_kernel, libxsmm_meltwfunction_unary reduce_rows_kernel,
libxsmm_meltwfunction_unary reduce_groups_kernel, libxsmm_meltwfunction_unary all_zero_G_kernel, libxsmm_meltwfunction_unary all_zero_kernel, libxsmm_meltwfunction_binary add_kernel, float eps) {
LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, inp, pinp, CP, HW, CB); /* [NP, CP, HW, CB] */
LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, out, pout, CP, HW, CB);
LIBXSMM_VLA_DECL(2, libxsmm_bfloat16, gamma, pgamma, CB);
LIBXSMM_VLA_DECL(2, libxsmm_bfloat16, beta, pbeta, CB);
int np, group_size;
group_size = (CP*CB)/G;
if (group_size <= CB){
int cp;
#pragma omp parallel for collapse(2)
for(np = 0; np < NP; np++){
for (cp = 0; cp < CP; cp++){
LIBXSMM_ALIGNED(float tmp[2*CB], 64);
LIBXSMM_ALIGNED(float sum_X[G], 64);
LIBXSMM_ALIGNED(float sum_X2[G], 64);
LIBXSMM_ALIGNED(float s[CB], 64);
LIBXSMM_ALIGNED(float b[CB], 64);
int i, j, hwb, g;
libxsmm_matrix_eqn_param eqn_param;
libxsmm_meltw_unary_param m_reduce_groups_params, v_reduce_groups_params, reduce_HW_params;
libxsmm_meltw_unary_param all_zero_param;
libxsmm_meltw_binary_param add_param;
libxsmm_matrix_arg arg_array[5];
all_zero_param.out.primary = tmp;
all_zero_kernel(&all_zero_param);
all_zero_param.out.primary = &tmp[CB];
all_zero_kernel(&all_zero_param);
all_zero_param.out.primary = sum_X;
all_zero_G_kernel(&all_zero_param);
all_zero_param.out.primary = sum_X2;
all_zero_G_kernel(&all_zero_param);
/*************************** Process entire block code *****************************/
LIBXSMM_ALIGNED(float new_tmp[2*CB], 64);
reduce_HW_params.out.primary = new_tmp; /* [2*CB] */
for(hwb=0; hwb < num_HW_blocks; hwb++){
reduce_HW_params.in.primary = &LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW_block, CB] -----> [2 * CB] */
reduce_HW_kernel(&reduce_HW_params);
add_param.in0.primary = tmp;
add_param.in1.primary = new_tmp;
add_param.out.primary = tmp;
add_kernel(&add_param);
add_param.in0.primary = &tmp[CB];
add_param.in1.primary = &new_tmp[CB];
add_param.out.primary = &tmp[CB];
add_kernel(&add_param);
/* for (cb = 0; cb < 2*CB; cb++) { */
/* tmp[cb] += new_tmp[cb]; */
/* } */
}
for(i=0; i < CB; i += group_size){
g = (cp*CB + i)/group_size; /* determine current group */
m_reduce_groups_params.in.primary = &tmp[i];
m_reduce_groups_params.out.primary = &sum_X[g];
v_reduce_groups_params.in.primary = &tmp[CB + i];
v_reduce_groups_params.out.primary = &sum_X2[g];
reduce_groups_kernel(&m_reduce_groups_params);
reduce_groups_kernel(&v_reduce_groups_params);
mean[np*G + g] = sum_X[g] / ((float)group_size * HW);
var[np*G + g] = (sum_X2[g] / ((float)group_size * HW)) - (mean[np*G + g]*mean[np*G + g]); /* var = E[X^2] - (E[X])^2 */
for(j = 0; j < group_size; j++){
s[i + j] = 1.0f / ((float)sqrt(var[np*G + g] + eps)); /* 1/sqrt(var(X) + eps) */
b[i + j] = -1 * mean[np*G + g] * s[i + j]; /* -E[X]/sqrt(var(X) + eps) */
}
}
arg_array[1].primary = s; /* [CB] */
arg_array[2].primary = b; /* [CB] */
arg_array[3].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB); /* [CB] */
arg_array[4].primary = &LIBXSMM_VLA_ACCESS(2, beta, cp, 0, CB); /* [CB] */
for(hwb=0; hwb < num_HW_blocks; hwb++){
arg_array[0].primary = &LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW, CB] */
eqn_param.inputs = arg_array;
eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(4, out, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW, CB] */
func10(&eqn_param); /* Normalization equation -> y = ((s*x + b)*gamma + beta) */
}
}
}
}
else{
#pragma omp parallel for
for(np = 0; np < NP; np++){
LIBXSMM_ALIGNED(float tmp[2*CB], 64);
LIBXSMM_ALIGNED(float sum_X[G], 64);
LIBXSMM_ALIGNED(float sum_X2[G], 64);
LIBXSMM_ALIGNED(float s[CP*CB], 64);
LIBXSMM_ALIGNED(float b[CP*CB], 64);
int i, j, cp, g, hwb;
float m, v;
libxsmm_matrix_eqn_param eqn_param;
libxsmm_meltw_unary_param m_reduce_rows_params, m_reduce_groups_params, v_reduce_rows_params, v_reduce_groups_params, reduce_HW_params;
libxsmm_meltw_unary_param all_zero_param;
libxsmm_meltw_binary_param add_param;
libxsmm_matrix_arg arg_array[5];
all_zero_param.out.primary = sum_X;
all_zero_G_kernel(&all_zero_param);
all_zero_param.out.primary = sum_X2;
all_zero_G_kernel(&all_zero_param);
LIBXSMM_ALIGNED(float new_tmp[2*CB], 64);
for (cp = 0; cp < CP; cp++){ /* [cp, HW, CB] */
all_zero_param.out.primary = tmp;
all_zero_kernel(&all_zero_param);
all_zero_param.out.primary = &tmp[CB];
all_zero_kernel(&all_zero_param);
/* #pragma omp simd */
/* for (cb = 0; cb < 2*CB; cb++) { */
/* tmp[cb] = 0.0f; */
/* } */
reduce_HW_params.out.primary = new_tmp; /* [2*CB] */
for(hwb=0; hwb < num_HW_blocks; hwb++){
reduce_HW_params.in.primary = &LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW, CB] -----> [2 * CB] */
reduce_HW_kernel(&reduce_HW_params);
add_param.in0.primary = tmp;
add_param.in1.primary = new_tmp;
add_param.out.primary = tmp;
add_kernel(&add_param);
add_param.in0.primary = &tmp[CB];
add_param.in1.primary = &new_tmp[CB];
add_param.out.primary = &tmp[CB];
add_kernel(&add_param);
/* #pragma omp simd
for (cb = 0; cb < 2*CB; cb++) {
tmp[cb] += new_tmp[cb];
} */
}
if (group_size >= CB){ /* Group size >= block size (Ex.- CP = 4, CB = 16, G = 2, group_size = 32) */
g = (cp*CB)/group_size; /* determine current group */
m_reduce_rows_params.in.primary = tmp;
m_reduce_rows_params.out.primary = &m;
v_reduce_rows_params.in.primary = &tmp[CB];
v_reduce_rows_params.out.primary = &v;
reduce_rows_kernel(&m_reduce_rows_params);
reduce_rows_kernel(&v_reduce_rows_params);
sum_X[g] += m;
sum_X2[g] += v;
}
else{ /* Group size < block size (Ex.- CP = 4, CB = 16, G = 32, group_size = 2) */
for(i=0; i < CB; i += group_size){
m_reduce_groups_params.in.primary = &tmp[i];
m_reduce_groups_params.out.primary = &sum_X[cp*(CB/group_size) + (i/group_size)];
v_reduce_groups_params.in.primary = &tmp[CB + i];
v_reduce_groups_params.out.primary = &sum_X2[cp*(CB/group_size) + (i/group_size)];
reduce_groups_kernel(&m_reduce_groups_params);
reduce_groups_kernel(&v_reduce_groups_params);
}
}
}
for(g = 0; g < G; g++){ /* mean and variance calculation */
mean[np*G + g] = sum_X[g] / ((float)group_size * HW);
var[np*G + g] = (sum_X2[g] / ((float)group_size * HW)) - (mean[np*G + g]*mean[np*G + g]); /* var = E[X^2] - (E[X])^2 */
for(j = 0; j < group_size; j++){
s[g*group_size + j] = 1.0f / ((float)sqrt(var[np*G + g] + eps)); /* 1/sqrt(var(X) + eps) */
b[g*group_size + j] = -1 * mean[np*G + g] * s[g*group_size + j]; /* -E[X]/sqrt(var(X) + eps) */
}
}
for (cp = 0; cp < CP; cp++){
arg_array[1].primary = &s[cp*CB]; /* [CB] */
arg_array[2].primary = &b[cp*CB]; /* [CB] */
arg_array[3].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB); /* [CB] */
arg_array[4].primary = &LIBXSMM_VLA_ACCESS(2, beta, cp, 0, CB); /* [CB] */
for(hwb=0; hwb < num_HW_blocks; hwb++){
arg_array[0].primary = &LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW, CB] */
eqn_param.inputs = arg_array;
eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(4, out, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW,CB] */
func10(&eqn_param); /* Normalization equation -> y = ((s*x + b)*gamma + beta) */
}
}
}
}
}
void tpp_groupnorm_bwd_fp32(long NP, long CP, long HW, long CB, long G, long num_HW_blocks, float *pdout, float *pinp, float *mean, float *var, float *pgamma, float *pdin, float *pdgamma, float *pdbeta,
libxsmm_matrix_eqn_function dgamma_func, libxsmm_matrix_eqn_function dbeta_func, libxsmm_matrix_eqn_function db_func, libxsmm_matrix_eqn_function ds_func, libxsmm_matrix_eqn_function din_func,
libxsmm_meltwfunction_unary all_zero_kernel, libxsmm_meltwfunction_binary add_kernel, float eps) {
int group_size;
group_size = (CP*CB)/G;
const float scale = 1.0f / ((float)group_size * HW);
LIBXSMM_VLA_DECL(4, float, din, pdin, CP, HW, CB);
LIBXSMM_VLA_DECL(4, float, inp, pinp, CP, HW, CB);
LIBXSMM_VLA_DECL(4, float, dout, pdout, CP, HW, CB);
LIBXSMM_VLA_DECL(2, float, gamma, pgamma, CB);
LIBXSMM_VLA_DECL(2, float, dgamma, pdgamma, CB);
LIBXSMM_VLA_DECL(2, float, dbeta, pdbeta, CB);
LIBXSMM_ALIGNED(float dgamma_NP[NP*CP*CB], 64);
LIBXSMM_ALIGNED(float dbeta_NP[NP*CP*CB], 64);
if (group_size <= CB){
#pragma omp parallel
{
LIBXSMM_ALIGNED(float a[CB], 64);
LIBXSMM_ALIGNED(float b[CB], 64);
LIBXSMM_ALIGNED(float c[CB], 64);
LIBXSMM_ALIGNED(float ds[CB], 64);
LIBXSMM_ALIGNED(float db[CB], 64);
int np, cp;
#pragma omp for collapse(2)
for (np = 0; np < NP; np++){
for (cp = 0; cp < CP; cp++) {
int j, g, hwb, lg;
libxsmm_matrix_eqn_param eqn_param;
libxsmm_meltw_unary_param all_zero_param;
libxsmm_matrix_arg arg_array[10];
eqn_param.inputs = arg_array;
/* for(j = 0; j < CB; j++){
dgamma_NP[np*CP*CB + cp*CB + j] = 0.0f;
dbeta_NP[np*CP*CB + cp*CB + j] = 0.0f;
} */
all_zero_param.out.primary = &dgamma_NP[np*CP*CB + cp*CB];
all_zero_kernel(&all_zero_param);
all_zero_param.out.primary = &dbeta_NP[np*CP*CB + cp*CB];
all_zero_kernel(&all_zero_param);
all_zero_param.out.primary = ds;
all_zero_kernel(&all_zero_param);
all_zero_param.out.primary = db;
all_zero_kernel(&all_zero_param);
for(g = (cp*CB)/group_size; g < ((cp+1)*CB)/group_size; g++){ /* compute a and b for each channel from group means and variance */
lg = g - (cp*CB)/group_size;
for(j = 0; j < group_size; j++){
a[lg*group_size + j] = 1.0f / ((float)sqrt(var[np*G + g] + eps));
b[lg*group_size + j] = -a[lg*group_size + j]*mean[np*G + g];
}
}
arg_array[1].primary = a;
arg_array[2].primary = b;
arg_array[4].primary = &dgamma_NP[np*CP*CB + cp*CB];
arg_array[5].primary = &dbeta_NP[np*CP*CB + cp*CB];
arg_array[6].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB);
arg_array[8].primary = ds;
arg_array[9].primary = db;
for(hwb=0; hwb < num_HW_blocks; hwb++){
arg_array[0].primary = &LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB);
arg_array[3].primary = &LIBXSMM_VLA_ACCESS(4, dout, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB);
eqn_param.output.primary = ds;
ds_func(&eqn_param);
eqn_param.output.primary = db;
db_func(&eqn_param);
eqn_param.output.primary = &dgamma_NP[np*CP*CB + cp*CB];
dgamma_func(&eqn_param);
eqn_param.output.primary = &dbeta_NP[np*CP*CB + cp*CB];
dbeta_func(&eqn_param);
}
/* b = (db * mean[nb] - ds) * a * a * a * scale; */
/* c = -b * mean[nb] - db * a * scale; */
for(g = (cp*CB)/group_size; g < ((cp+1)*CB)/group_size; g++){ /* compute b and c for each channel from group means and variance */
lg = g - (cp*CB)/group_size;
float gds = 0.0f;
float gdb = 0.0f;
for(j = 0; j < group_size; j++){
gds += ds[lg*group_size + j]; /* Group ds and db calculation */
gdb += db[lg*group_size + j];
}
for(j = 0; j < group_size; j++){
b[lg*group_size + j] = (gdb * mean[np*G + g] - gds) * a[lg*group_size + j] * a[lg*group_size + j] * a[lg*group_size + j] * scale;
c[lg*group_size + j] = -b[lg*group_size + j] * mean[np*G + g] - gdb * a[lg*group_size + j] * scale;
}
}
arg_array[1].primary = a;
arg_array[2].primary = b;
arg_array[6].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB);
arg_array[7].primary = c;
for(hwb=0; hwb < num_HW_blocks; hwb++){
arg_array[0].primary = &LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB);
arg_array[3].primary = &LIBXSMM_VLA_ACCESS(4, dout, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB);
eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(4, din, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB);
din_func(&eqn_param);
}
}
}
#pragma omp for
for (cp = 0; cp < CP; cp++) {
for (np=0; np < NP; np++ ) {
int cb;
for(cb = 0; cb < CB; cb++){
LIBXSMM_VLA_ACCESS(2, dgamma, cp, cb, CB) += dgamma_NP[np*CP*CB + cp*CB + cb];
LIBXSMM_VLA_ACCESS(2, dbeta, cp, cb, CB) += dbeta_NP[np*CP*CB + cp*CB + cb];
}
}
}
}
}
else{
#pragma omp parallel
{
LIBXSMM_ALIGNED(float a[CP*CB], 64);
LIBXSMM_ALIGNED(float b[CP*CB], 64);
LIBXSMM_ALIGNED(float c[CP*CB], 64);
LIBXSMM_ALIGNED(float ds[CP*CB], 64);
LIBXSMM_ALIGNED(float db[CP*CB], 64);
int np;
#pragma omp for
for (np = 0; np < NP; np++) {
int j, g, cp, hwb;
libxsmm_matrix_eqn_param eqn_param;
libxsmm_meltw_unary_param all_zero_param;
libxsmm_matrix_arg arg_array[10];
eqn_param.inputs = arg_array;
/* for(j = 0; j < CP*CB; j++){ */
/* dgamma_NP[np*CP*CB + j] = 0.0f; */
/* dbeta_NP[np*CP*CB + j] = 0.0f; */
/* } */
for (cp = 0; cp < CP; cp++) {
all_zero_param.out.primary = &dgamma_NP[np*CP*CB + cp*CB];
all_zero_kernel(&all_zero_param);
all_zero_param.out.primary = &dbeta_NP[np*CP*CB + cp*CB];
all_zero_kernel(&all_zero_param);
all_zero_param.out.primary = &ds[cp*CB];
all_zero_kernel(&all_zero_param);
all_zero_param.out.primary = &db[cp*CB];
all_zero_kernel(&all_zero_param);
}
for(g = 0; g < G; g++){ /* compute a and b for each channel from group means and variance */
for(j = 0; j < group_size; j++){
a[g*group_size + j] = 1.0f / ((float)sqrt(var[np*G + g] + eps));
b[g*group_size + j] = -a[g*group_size + j]*mean[np*G + g];
}
}
for (cp = 0; cp < CP; cp++) {
arg_array[1].primary = &a[cp*CB];
arg_array[2].primary = &b[cp*CB];
arg_array[4].primary = &dgamma_NP[np*CP*CB + cp*CB];
arg_array[5].primary = &dbeta_NP[np*CP*CB + cp*CB];
arg_array[6].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB);
arg_array[8].primary = &ds[cp*CB];
arg_array[9].primary = &db[cp*CB];
for(hwb=0; hwb < num_HW_blocks; hwb++){
arg_array[0].primary = &LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB);
arg_array[3].primary = &LIBXSMM_VLA_ACCESS(4, dout, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB);
eqn_param.output.primary = &ds[cp*CB];
ds_func(&eqn_param);
eqn_param.output.primary = &db[cp*CB];
db_func(&eqn_param);
eqn_param.output.primary = &dgamma_NP[np*CP*CB + cp*CB];
dgamma_func(&eqn_param);
eqn_param.output.primary = &dbeta_NP[np*CP*CB + cp*CB];
dbeta_func(&eqn_param);
}
}
/* b = (db * mean[nb] - ds) * a * a * a * scale; */
/* c = -b * mean[nb] - db * a * scale; */
for(g = 0; g < G; g++){ /* compute b and c for each channel from group means and variance */
float gds = 0.0f;
float gdb = 0.0f;
for(j = 0; j < group_size; j++){
gds += ds[g*group_size + j]; /* Group ds and db calculation */
gdb += db[g*group_size + j];
}
for(j = 0; j < group_size; j++){
b[g*group_size + j] = (gdb * mean[np*G + g] - gds) * a[g*group_size + j] * a[g*group_size + j] * a[g*group_size + j] * scale;
c[g*group_size + j] = -b[g*group_size + j] * mean[np*G + g] - gdb * a[g*group_size + j] * scale;
}
}
for (cp = 0; cp < CP; cp++) {
arg_array[1].primary = &a[cp*CB];
arg_array[2].primary = &b[cp*CB];
arg_array[6].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB);
arg_array[7].primary = &c[cp*CB];
for(hwb=0; hwb < num_HW_blocks; hwb++){
arg_array[0].primary = &LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB);
arg_array[3].primary = &LIBXSMM_VLA_ACCESS(4, dout, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB);
eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(4, din, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB);
din_func(&eqn_param);
}
}
}
int cp;
#pragma omp for
for (cp = 0; cp < CP; cp++) {
for (np=0; np < NP; np++ ) {
int cb;
for(cb = 0; cb < CB; cb++){
LIBXSMM_VLA_ACCESS(2, dgamma, cp, cb, CB) += dgamma_NP[np*CP*CB + cp*CB + cb];
LIBXSMM_VLA_ACCESS(2, dbeta, cp, cb, CB) += dbeta_NP[np*CP*CB + cp*CB + cb];
}
}
}
}
}
}
void tpp_groupnorm_bwd_bf16(long NP, long CP, long HW, long CB, long G, long num_HW_blocks, libxsmm_bfloat16 *pdout, libxsmm_bfloat16 *pinp, float *mean, float *var, libxsmm_bfloat16 *pgamma, libxsmm_bfloat16 *pdin, float *pdgamma, float *pdbeta,
libxsmm_matrix_eqn_function dgamma_func, libxsmm_matrix_eqn_function dbeta_func, libxsmm_matrix_eqn_function db_func, libxsmm_matrix_eqn_function ds_func, libxsmm_matrix_eqn_function din_func,
libxsmm_meltwfunction_unary all_zero_kernel, libxsmm_meltwfunction_binary add_kernel, float eps) {
int group_size;
group_size = (CP*CB)/G;
const float scale = 1.0f / ((float)group_size*HW);
LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, din, pdin, CP, HW, CB);
LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, inp, pinp, CP, HW, CB);
LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, dout, pdout, CP, HW, CB);
LIBXSMM_VLA_DECL(2, libxsmm_bfloat16, gamma, pgamma, CB);
LIBXSMM_VLA_DECL(2, float, dgamma, pdgamma, CB);
LIBXSMM_VLA_DECL(2, float, dbeta, pdbeta, CB);
LIBXSMM_ALIGNED(float dgamma_NP[NP*CP*CB], 64);
LIBXSMM_ALIGNED(float dbeta_NP[NP*CP*CB], 64);
if (group_size <= CB){
#pragma omp parallel
{
LIBXSMM_ALIGNED(float a[CB], 64);
LIBXSMM_ALIGNED(float b[CB], 64);
LIBXSMM_ALIGNED(float c[CB], 64);
LIBXSMM_ALIGNED(float ds[CB], 64);
LIBXSMM_ALIGNED(float db[CB], 64);
int np, cp;
#pragma omp for collapse(2)
for (np = 0; np < NP; np++){
for (cp = 0; cp < CP; cp++) {
int j, g, hwb, lg;
libxsmm_matrix_eqn_param eqn_param;
libxsmm_meltw_unary_param all_zero_param;
libxsmm_matrix_arg arg_array[10];
eqn_param.inputs = arg_array;
all_zero_param.out.primary = &dgamma_NP[np*CP*CB + cp*CB];
all_zero_kernel(&all_zero_param);
all_zero_param.out.primary = &dbeta_NP[np*CP*CB + cp*CB];
all_zero_kernel(&all_zero_param);
all_zero_param.out.primary = ds;
all_zero_kernel(&all_zero_param);
all_zero_param.out.primary = db;
all_zero_kernel(&all_zero_param);
for(g = (cp*CB)/group_size; g < ((cp+1)*CB)/group_size; g++){ /* compute a and b for each channel from group means and variance */
lg = g - (cp*CB)/group_size;
for(j = 0; j < group_size; j++){
a[lg*group_size + j] = 1.0f / ((float)sqrt(var[np*G + g] + eps));
b[lg*group_size + j] = -a[lg*group_size + j]*mean[np*G + g];
}
}
arg_array[1].primary = a;
arg_array[2].primary = b;
arg_array[4].primary = &dgamma_NP[np*CP*CB + cp*CB];
arg_array[5].primary = &dbeta_NP[np*CP*CB + cp*CB];
arg_array[6].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB);
arg_array[8].primary = ds;
arg_array[9].primary = db;
for(hwb=0; hwb < num_HW_blocks; hwb++){
arg_array[0].primary = &LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB);
arg_array[3].primary = &LIBXSMM_VLA_ACCESS(4, dout, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB);
eqn_param.output.primary = ds;
ds_func(&eqn_param);
eqn_param.output.primary = db;
db_func(&eqn_param);
eqn_param.output.primary = &dgamma_NP[np*CP*CB + cp*CB];
dgamma_func(&eqn_param);
eqn_param.output.primary = &dbeta_NP[np*CP*CB + cp*CB];
dbeta_func(&eqn_param);
}
/* b = (db * mean[nb] - ds) * a * a * a * scale; */
/* c = -b * mean[nb] - db * a * scale; */
for(g = (cp*CB)/group_size; g < ((cp+1)*CB)/group_size; g++){ /* compute b and c for each channel from group means and variance */
lg = g - (cp*CB)/group_size;
float gds = 0.0f;
float gdb = 0.0f;
for(j = 0; j < group_size; j++){
gds += ds[lg*group_size + j]; /* Group ds and db calculation */
gdb += db[lg*group_size + j];
}
for(j = 0; j < group_size; j++){
b[lg*group_size + j] = (gdb * mean[np*G + g] - gds) * a[lg*group_size + j] * a[lg*group_size + j] * a[lg*group_size + j] * scale;
c[lg*group_size + j] = -b[lg*group_size + j] * mean[np*G + g] - gdb * a[lg*group_size + j] * scale;
}
}
arg_array[1].primary = a;
arg_array[2].primary = b;
arg_array[6].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB);
arg_array[7].primary = c;
for(hwb=0; hwb < num_HW_blocks; hwb++){
arg_array[0].primary = &LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB);
arg_array[3].primary = &LIBXSMM_VLA_ACCESS(4, dout, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB);
eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(4, din, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB);
din_func(&eqn_param);
}
}
}
#pragma omp for
for (cp = 0; cp < CP; cp++) {
for (np=0; np < NP; np++ ) {
int cb;
for(cb = 0; cb < CB; cb++){
LIBXSMM_VLA_ACCESS(2, dgamma, cp, cb, CB) += dgamma_NP[np*CP*CB + cp*CB + cb];
LIBXSMM_VLA_ACCESS(2, dbeta, cp, cb, CB) += dbeta_NP[np*CP*CB + cp*CB + cb];
}
}
}
}
}
else{
#pragma omp parallel
{
LIBXSMM_ALIGNED(float a[CP*CB], 64);
LIBXSMM_ALIGNED(float b[CP*CB], 64);
LIBXSMM_ALIGNED(float c[CP*CB], 64);
LIBXSMM_ALIGNED(float ds[CP*CB], 64);
LIBXSMM_ALIGNED(float db[CP*CB], 64);
int np;
#pragma omp for
for (np = 0; np < NP; np++) {
int j, g, cp, hwb;
libxsmm_matrix_eqn_param eqn_param;
libxsmm_meltw_unary_param all_zero_param;
libxsmm_matrix_arg arg_array[10];
eqn_param.inputs = arg_array;
for (cp = 0; cp < CP; cp++) {
all_zero_param.out.primary = &dgamma_NP[np*CP*CB + cp*CB];
all_zero_kernel(&all_zero_param);
all_zero_param.out.primary = &dbeta_NP[np*CP*CB + cp*CB];
all_zero_kernel(&all_zero_param);
all_zero_param.out.primary = &ds[cp*CB];
all_zero_kernel(&all_zero_param);
all_zero_param.out.primary = &db[cp*CB];
all_zero_kernel(&all_zero_param);
}
for(g = 0; g < G; g++){ /* compute a and b for each channel from group means and variance */
for(j = 0; j < group_size; j++){
a[g*group_size + j] = 1.0f / ((float)sqrt(var[np*G + g] + eps));
b[g*group_size + j] = -a[g*group_size + j]*mean[np*G + g];
}
}
for (cp = 0; cp < CP; cp++) {
arg_array[1].primary = &a[cp*CB];
arg_array[2].primary = &b[cp*CB];
arg_array[4].primary = &dgamma_NP[np*CP*CB + cp*CB];
arg_array[5].primary = &dbeta_NP[np*CP*CB + cp*CB];
arg_array[6].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB);
arg_array[8].primary = &ds[cp*CB];
arg_array[9].primary = &db[cp*CB];
for(hwb=0; hwb < num_HW_blocks; hwb++){
arg_array[0].primary = &LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB);
arg_array[3].primary = &LIBXSMM_VLA_ACCESS(4, dout, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB);
eqn_param.output.primary = &ds[cp*CB];
ds_func(&eqn_param);
eqn_param.output.primary = &db[cp*CB];
db_func(&eqn_param);
eqn_param.output.primary = &dgamma_NP[np*CP*CB + cp*CB];
dgamma_func(&eqn_param);
eqn_param.output.primary = &dbeta_NP[np*CP*CB + cp*CB];
dbeta_func(&eqn_param);
}
}
/* b = (db * mean[nb] - ds) * a * a * a * scale; */
/* c = -b * mean[nb] - db * a * scale; */
for(g = 0; g < G; g++){ /* compute b and c for each channel from group means and variance */
float gds = 0.0f;
float gdb = 0.0f;
for(j = 0; j < group_size; j++){
gds += ds[g*group_size + j]; /* Group ds and db calculation */
gdb += db[g*group_size + j];
}
for(j = 0; j < group_size; j++){
b[g*group_size + j] = (gdb * mean[np*G + g] - gds) * a[g*group_size + j] * a[g*group_size + j] * a[g*group_size + j] * scale;
c[g*group_size + j] = -b[g*group_size + j] * mean[np*G + g] - gdb * a[g*group_size + j] * scale;
}
}
for (cp = 0; cp < CP; cp++) {
arg_array[1].primary = &a[cp*CB];
arg_array[2].primary = &b[cp*CB];
arg_array[6].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB);
arg_array[7].primary = &c[cp*CB];
for(hwb=0; hwb < num_HW_blocks; hwb++){
arg_array[0].primary = &LIBXSMM_VLA_ACCESS(4, inp, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB);
arg_array[3].primary = &LIBXSMM_VLA_ACCESS(4, dout, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB);
eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(4, din, np, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB);
din_func(&eqn_param);
}
}
}
int cp;
#pragma omp for
for (cp = 0; cp < CP; cp++) {
for (np=0; np < NP; np++ ) {
int cb;
for(cb = 0; cb < CB; cb++){
LIBXSMM_VLA_ACCESS(2, dgamma, cp, cb, CB) += dgamma_NP[np*CP*CB + cp*CB + cb];
LIBXSMM_VLA_ACCESS(2, dbeta, cp, cb, CB) += dbeta_NP[np*CP*CB + cp*CB + cb];
}
}
}
}
}
}
void scaler_groupnorm_fwd_fp32(long NP, long CP, long HW, long CB, long G, float *pinp, float *pgamma, float *pbeta, float *mean, float *var, float *pout, float eps){
LIBXSMM_VLA_DECL(4, float, inp, pinp, CP, HW, CB); /* [NP, CP, HW, CB] */
LIBXSMM_VLA_DECL(4, float, out, pout, CP, HW, CB);
LIBXSMM_VLA_DECL(2, float, gamma, pgamma, CB);
LIBXSMM_VLA_DECL(2, float, beta, pbeta, CB);
int np, group_size;
group_size = (CP*CB)/G;
#pragma omp parallel for
for(np = 0; np < NP; np++){
LIBXSMM_ALIGNED(float sum_X[G], 64);
LIBXSMM_ALIGNED(float sum_X2[G], 64);
LIBXSMM_ALIGNED(float s[CP*CB], 64);
LIBXSMM_ALIGNED(float b[CP*CB], 64);
int i, j, cp, cb, hw, g;
float m, v, value;
for(g = 0; g < G; g++){
sum_X[g] = 0.0f;
sum_X2[g] = 0.0f;
}
for(cp = 0; cp < CP; cp++){ /* Size = CP*HW*CB*4 */
m = 0.0f;
v = 0.0f;
if (group_size >= CB){ /* Group size >= block size (Ex.- CP = 4, CB = 16, G = 2, group_size = 32) */
for(cb = 0; cb < CB; cb++){
for(hw = 0; hw < HW; hw++){
value = LIBXSMM_VLA_ACCESS(4, inp, np, cp, hw, cb, CP, HW, CB);
m += value;
v += (value*value);
}
}
g = (cp*CB)/group_size; /* determine current group */
sum_X[g] += m;
sum_X2[g] += v;
}
else{
for(i=0; i < CB; i += group_size){ /* Group size < block size (Ex.- CP = 4, CB = 16, G = 32, group_size = 2) */
for(j = 0; j < group_size; j++){
for(hw = 0; hw < HW; hw++){
value = LIBXSMM_VLA_ACCESS(4, inp, np, cp, hw, (i + j), CP, HW, CB);
sum_X[cp*(CB/group_size) + (i/group_size)] += value;
sum_X2[cp*(CB/group_size) + (i/group_size)] += (value*value);
}
}
}
}
}
for(g = 0; g < G; g++){ /* mean and variance calculation */ /* Size = 2*CP*CB*4 */
mean[np*G + g] = sum_X[g] / ((float)group_size * HW);
var[np*G + g] = (sum_X2[g] / ((float)group_size * HW)) - (mean[np*G + g]*mean[np*G + g]); /* var = E[X^2] - (E[X])^2 [G] */
for(j = 0; j < group_size; j++){
s[g*group_size + j] = 1.0f / ((float)sqrt(var[np*G + g] + eps)); /* s = 1/sqrt(var(X) + eps) [CP, CB] */
b[g*group_size + j] = -1 * mean[np*G + g] * s[g*group_size + j]; /* b = -E[X]/sqrt(var(X) + eps) [CP, CB] */
}
}
for(cp = 0; cp < CP; cp++){ /* Size = 2*CP*HW*CB*4 + 2*CP*CB*4 */
for(cb = 0; cb < CB; cb++){
for(hw = 0; hw < HW; hw++){
value = LIBXSMM_VLA_ACCESS(4, inp, np, cp, hw, cb, CP, HW, CB);
value = ((value * s[cp*CB + cb]) + b[cp*CB + cb]) * LIBXSMM_VLA_ACCESS(2, gamma, cp, cb, CB) + LIBXSMM_VLA_ACCESS(2, beta, cp, cb, CB); /* Normalization equation -> y = ((s*x + b)*gamma + beta) */
LIBXSMM_VLA_ACCESS(4, out, np, cp, hw, cb, CP, HW, CB) = value;
}
}
}
} /*End multithreading loop*/
}
void scaler_groupnorm_bwd_fp32(long NP, long CP, long HW, long CB, long G, float *pdout, float *pinp, float *mean, float *var, float *pgamma, float *pdin, float *pdgamma, float *pdbeta, float eps) {
int np, group_size;
group_size = (CP*CB)/G;
float scale = 1.0f / ((float)group_size * HW);
LIBXSMM_VLA_DECL(4, float, din, pdin, CP, HW, CB);
LIBXSMM_VLA_DECL(4, float, inp, pinp, CP, HW, CB);
LIBXSMM_VLA_DECL(4, float, dout, pdout, CP, HW, CB);
LIBXSMM_VLA_DECL(2, float, gamma, pgamma, CB);
LIBXSMM_VLA_DECL(2, float, dgamma, pdgamma, CB);
LIBXSMM_VLA_DECL(2, float, dbeta, pdbeta, CB);
LIBXSMM_ALIGNED(float dgamma_NP[NP*CP*CB], 64);
LIBXSMM_ALIGNED(float dbeta_NP[NP*CP*CB], 64);
#pragma omp parallel for
for(np = 0; np < NP; np++){
int j, cp, cb, hw, g;
LIBXSMM_ALIGNED(float a[CP*CB], 64);
LIBXSMM_ALIGNED(float b[CP*CB], 64);
LIBXSMM_ALIGNED(float c[CP*CB], 64);
LIBXSMM_ALIGNED(float ds[CP*CB], 64);
LIBXSMM_ALIGNED(float db[CP*CB], 64);
for(j = 0; j < CP*CB; j++){
dgamma_NP[np*CP*CB + j] = 0.0f;
dbeta_NP[np*CP*CB + j] = 0.0f;
}
for(g = 0; g < G; g++){ /* compute a and b for each channel from group means and variance */
for(j = 0; j < group_size; j++){
a[g*group_size + j] = 1.0f / ((float)sqrt(var[np*G + g] + eps));
b[g*group_size + j] = -a[g*group_size + j]*mean[np*G + g];
ds[g*group_size + j] = 0.0f;
db[g*group_size + j] = 0.0f;
}
}
for (cp = 0; cp < CP; cp++) { /* dgamma += (a * inp + b) * dout , dbeta += dout, ds += dout * gamma * inp, db += dout * gamma */ /* Size = 2*CP*HW*CB*4 */
for (cb = 0; cb < CB; cb++) {
for (hw = 0; hw < HW; hw++){
dgamma_NP[np*CP*CB + cp*CB + cb] += (a[cp*CB + cb] * LIBXSMM_VLA_ACCESS(4, inp, np, cp, hw, cb, CP, HW, CB) + b[cp*CB + cb]) * LIBXSMM_VLA_ACCESS(4, dout, np, cp, hw, cb, CP, HW, CB);
dbeta_NP[np*CP*CB + cp*CB + cb] += LIBXSMM_VLA_ACCESS(4, dout, np, cp, hw, cb, CP, HW, CB);
ds[cp*CB + cb] += LIBXSMM_VLA_ACCESS(4, dout, np, cp, hw, cb, CP, HW, CB) * LIBXSMM_VLA_ACCESS(2, gamma, cp, cb, CB) * LIBXSMM_VLA_ACCESS(4, inp, np, cp, hw, cb, CP, HW, CB);
db[cp*CB + cb] += LIBXSMM_VLA_ACCESS(4, dout, np, cp, hw, cb, CP, HW, CB) * LIBXSMM_VLA_ACCESS(2, gamma, cp, cb, CB);
}
}
}
/* b = (db * mean[nb] - ds) * a * a * a * scale; */
/* c = -b * mean[nb] - db * a * scale; */
for(g = 0; g < G; g++){ /* compute b and c for each channel from group means and variance */
float gds = 0.0f;
float gdb = 0.0f;
for(j = 0; j < group_size; j++){
gds += ds[g*group_size + j]; /* Group ds and db calculation */
gdb += db[g*group_size + j];
}
for(j = 0; j < group_size; j++){
b[g*group_size + j] = (gdb * mean[np*G + g] - gds) * a[g*group_size + j] * a[g*group_size + j] * a[g*group_size + j] * scale;
c[g*group_size + j] = -b[g*group_size + j] * mean[np*G + g] - gdb * a[g*group_size + j] * scale;
}
}
for (cp = 0; cp < CP; cp++) { /* din = dout * a * gamma + b * inp + c */ /* Size = 3*CP*HW*CB*4 */
for (cb = 0; cb < CB; cb++) {
for (hw = 0; hw < HW; hw++){
LIBXSMM_VLA_ACCESS(4, din, np, cp, hw, cb, CP, HW, CB) = LIBXSMM_VLA_ACCESS(4, dout, np, cp, hw, cb, CP, HW, CB) * a[cp*CB + cb] * LIBXSMM_VLA_ACCESS(2, gamma, cp, cb, CB) + b[cp*CB + cb] * LIBXSMM_VLA_ACCESS(4, inp, np, cp, hw, cb, CP, HW, CB) + c[cp*CB + cb];
}
}
}
}
int cp;
#pragma omp parallel for
for (cp = 0; cp < CP; cp++) {
for (np=0; np < NP; np++ ) {
int cb;
for(cb = 0; cb < CB; cb++){
LIBXSMM_VLA_ACCESS(2, dgamma, cp, cb, CB) += dgamma_NP[np*CP*CB + cp*CB + cb];
LIBXSMM_VLA_ACCESS(2, dbeta, cp, cb, CB) += dbeta_NP[np*CP*CB + cp*CB + cb];
}
}
}
}
int main( int argc, char* argv[] ) {
libxsmm_blasint my_eqn10, my_eqn11, my_eqn12, my_eqn13, my_eqn14, my_eqn15;
libxsmm_matrix_eqn_function func10, func11, func12, func13, func14, func15;
libxsmm_meltw_unary_flags jit_reduce_flags = LIBXSMM_MELTW_FLAG_UNARY_NONE;
libxsmm_meltw_unary_type unary_type;
libxsmm_meltwfunction_unary reduce_rows_kernel, reduce_HW_kernel, reduce_groups_kernel;
const float eps = FLT_EPSILON;
libxsmm_blasint i, it, ld, tmp_ld, tmp_ld2;
unsigned long long l_start, l_end;
double l_total = 0, l_total2 = 0;
double t_vec = 0, t_tpp = 0;
libxsmm_matdiff_info norms_out;
float *inp, *out, *dinp, *dout, *eqn_dinp, *eqn_dout, *dbeta, *eqn_dbeta, *dgamma, *eqn_dgamma, *eqn_out, *gamma, *beta, *cache_fl, *mean, *var;
libxsmm_bfloat16 *bf16_inp, *bf16_out, *bf16_dinp, *bf16_dout, *bf16_eqn_dinp, *bf16_eqn_dout, *bf16_gamma, *bf16_beta, *bf16_eqn_out;
int NP = 28;
int CP = 2;
int HW = 784;
int CB = 64;
int G = 1;
long num_HW_blocks = 16;
int datatype_mode = 0;
int iters = 100;
libxsmm_datatype in_dt = LIBXSMM_DATATYPE_F32;
libxsmm_datatype out_dt = LIBXSMM_DATATYPE_F32;
if ( argc > 1 ) NP = atoi(argv[1]);
if ( argc > 2 ) CP = atoi(argv[2]);
if ( argc > 3 ) HW = atoi(argv[3]);
if ( argc > 4 ) CB = atoi(argv[4]);
if ( argc > 5 ) G = atoi(argv[5]);
if ( argc > 6 ) num_HW_blocks = atoi(argv[6]);
if ( argc > 7 ) datatype_mode = atoi(argv[7]);
if ( argc > 8 ) iters = atoi(argv[8]);
if (datatype_mode == 0) {
in_dt = LIBXSMM_DATATYPE_F32;
out_dt = LIBXSMM_DATATYPE_F32;
} else if (datatype_mode == 1) {
in_dt = LIBXSMM_DATATYPE_BF16;
out_dt = LIBXSMM_DATATYPE_BF16;
} else {
printf("ERROR: Supporting only FP32 and BF16 precisions...\n");
}
inp = (float*) libxsmm_aligned_malloc( sizeof(float)*NP*CP*HW*CB, 2097152);
out = (float*) libxsmm_aligned_malloc( sizeof(float)*NP*CP*HW*CB, 2097152);
dinp = (float*) libxsmm_aligned_malloc( sizeof(float)*NP*CP*HW*CB, 2097152);
dout = (float*) libxsmm_aligned_malloc( sizeof(float)*NP*CP*HW*CB, 2097152);
dgamma = (float*) libxsmm_aligned_malloc( sizeof(float)*CP*CB, 2097152);
dbeta = (float*) libxsmm_aligned_malloc( sizeof(float)*CP*CB, 2097152);
eqn_dinp = (float*) libxsmm_aligned_malloc( sizeof(float)*NP*CP*HW*CB, 2097152);
eqn_dout = (float*) libxsmm_aligned_malloc( sizeof(float)*NP*CP*HW*CB, 2097152);
eqn_dgamma = (float*) libxsmm_aligned_malloc( sizeof(float)*CP*CB, 2097152);
eqn_dbeta = (float*) libxsmm_aligned_malloc( sizeof(float)*CP*CB, 2097152);
gamma = (float*) libxsmm_aligned_malloc( sizeof(float)*CP*CB, 2097152);
beta = (float*) libxsmm_aligned_malloc( sizeof(float)*CP*CB, 2097152);
mean = (float*) libxsmm_aligned_malloc( sizeof(float)*NP*G, 2097152);
var = (float*) libxsmm_aligned_malloc( sizeof(float)*NP*G, 2097152);
eqn_out = (float*) libxsmm_aligned_malloc( sizeof(float)*NP*CP*HW*CB, 2097152);
cache_fl = (float*) libxsmm_aligned_malloc( sizeof(float)*1024*1024, 2097152);
bf16_inp = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*NP*CP*HW*CB, 2097152);
bf16_out = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*NP*CP*HW*CB, 2097152);
bf16_dinp = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*NP*CP*HW*CB, 2097152);
bf16_dout = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*NP*CP*HW*CB, 2097152);
bf16_eqn_dinp = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*NP*CP*HW*CB, 2097152);
bf16_eqn_dout = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*NP*CP*HW*CB, 2097152);
bf16_gamma = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*CP*CB, 2097152);
bf16_beta = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*CP*CB, 2097152);
bf16_eqn_out = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*NP*CP*HW*CB, 2097152);
libxsmm_init();
libxsmm_matdiff_clear(&norms_out);
/* Initializing arrays */
for ( i = 0; i < NP*CP*HW*CB; ++i ) {
inp[i] = (float)libxsmm_rng_f64();
out[i] = (float)libxsmm_rng_f64();
eqn_out[i] = out[i];
dinp[i] = (float)libxsmm_rng_f64();
dout[i] = (float)libxsmm_rng_f64();
eqn_dinp[i] = dinp[i];
eqn_dout[i] = dout[i];
libxsmm_rne_convert_fp32_bf16( &inp[i], &bf16_inp[i], 1 );
libxsmm_rne_convert_fp32_bf16( &out[i], &bf16_out[i], 1 );
libxsmm_rne_convert_fp32_bf16( &eqn_out[i], &bf16_eqn_out[i], 1 );
libxsmm_rne_convert_fp32_bf16( &dout[i], &bf16_dout[i], 1 );
libxsmm_rne_convert_fp32_bf16( &eqn_dout[i], &bf16_eqn_dout[i], 1 );
libxsmm_rne_convert_fp32_bf16( &dinp[i], &bf16_dinp[i], 1 );
libxsmm_rne_convert_fp32_bf16( &eqn_dinp[i], &bf16_eqn_dinp[i], 1 );
}
for ( i = 0; i < CP*CB; ++i ) {
gamma[i] = (float)libxsmm_rng_f64();
beta[i] = (float)libxsmm_rng_f64();
dbeta[i] = (float)libxsmm_rng_f64();
dgamma[i] = (float)libxsmm_rng_f64();
eqn_dbeta[i] = dbeta[i];
eqn_dgamma[i] = dgamma[i];
libxsmm_rne_convert_fp32_bf16( &gamma[i], &bf16_gamma[i], 1 );
libxsmm_rne_convert_fp32_bf16( &beta[i], &bf16_beta[i], 1 );
}
for (i = 0; i < 1024 * 1024; i++ ) {
cache_fl[i] = (float)libxsmm_rng_f64();
}
libxsmm_blasint ldo = G;
libxsmm_meltwfunction_unary all_zero_G_kernel = libxsmm_dispatch_meltw_unary(G, 1, NULL, &ldo, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_XOR);
if ( all_zero_G_kernel == NULL) {
fprintf( stderr, "JIT for initialization by unary all zero group copy kernel failed. Bailing...!\n");
exit(-1);
}
ldo = CB;
libxsmm_meltwfunction_unary all_zero_kernel = libxsmm_dispatch_meltw_unary(CB, 1, NULL, &ldo, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_XOR);
if ( all_zero_G_kernel == NULL) {
fprintf( stderr, "JIT for initialization by unary all zero copy kernel failed. Bailing...!\n");
exit(-1);
}
libxsmm_meltwfunction_unary copy_kernel = libxsmm_dispatch_meltw_unary(CB, 1, &ldo, &ldo, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY);
if ( copy_kernel == NULL) {
fprintf( stderr, "JIT for initialization by copy kernel failed. Bailing...!\n");
exit(-1);
}
/* TPPs for reducing X and X2 in HW*/
ld = CB;
tmp_ld = CB;
unary_type = LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_X2_OP_ADD;
jit_reduce_flags = LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS;
reduce_HW_kernel = libxsmm_dispatch_meltw_unary(CB, HW/num_HW_blocks, &ld, &tmp_ld, in_dt, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_reduce_flags, unary_type);
libxsmm_blasint group_size = (CP*CB)/G;
libxsmm_meltwfunction_binary add_kernel = libxsmm_dispatch_meltw_binary(CB, 1, &ld, &ld, &ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_BINARY_NONE, LIBXSMM_MELTW_TYPE_BINARY_ADD);
if ( add_kernel == NULL) {
fprintf( stderr, "JIT for initialization of add kernel failed. Bailing...!\n");
exit(-1);
}
/* TPP for reducing groups */
ld = group_size; /* group_size = (CP*CB)/G */
tmp_ld = 1;
unary_type = LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD;
jit_reduce_flags = LIBXSMM_MELTW_FLAG_UNARY_REDUCE_ROWS;
reduce_groups_kernel = libxsmm_dispatch_meltw_unary(group_size, 1, &ld, &tmp_ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_reduce_flags, unary_type);
ld = CB;
tmp_ld = 1;
unary_type = LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD;
jit_reduce_flags = LIBXSMM_MELTW_FLAG_UNARY_REDUCE_ROWS;
reduce_rows_kernel = libxsmm_dispatch_meltw_unary(CB, 1, &ld, &tmp_ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_reduce_flags, unary_type);
/* TPP for foward */
ld = CB;
tmp_ld = 1;
tmp_ld2 = 1;
my_eqn10 = libxsmm_matrix_eqn_create(); /* y = (s*x + b)*gamma + beta */
libxsmm_matrix_eqn_push_back_ternary_op( my_eqn10, LIBXSMM_MELTW_TYPE_TERNARY_MULADD, LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_1 | LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_2 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT, LIBXSMM_DATATYPE_F32);
libxsmm_matrix_eqn_push_back_ternary_op( my_eqn10, LIBXSMM_MELTW_TYPE_TERNARY_MULADD, LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_1 | LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_2 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT, LIBXSMM_DATATYPE_F32);
libxsmm_matrix_eqn_push_back_arg( my_eqn10, CB, HW/num_HW_blocks, ld, 0, 0, in_dt ); /* x = [HW, CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn10, CB, 1, tmp_ld, 1, 0, LIBXSMM_DATATYPE_F32 ); /* s = [CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn10, CB, 1, tmp_ld, 2, 0, LIBXSMM_DATATYPE_F32 ); /* b = [CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn10, CB, 1, tmp_ld2, 3, 0, in_dt ); /* gamma = [CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn10, CB, 1, tmp_ld2, 4, 0, in_dt ); /* beta = [CB] */
func10 = libxsmm_dispatch_matrix_eqn( CB, HW/num_HW_blocks, &ld, out_dt, my_eqn10 ); /* y = [HW, CB] */
/* Check correctness */
if (datatype_mode == 0) {
scaler_groupnorm_fwd_fp32(NP, CP, HW, CB, G, inp, gamma, beta, mean, var, out, eps);
tpp_groupnorm_fwd_fp32(NP, CP, HW, CB, G, num_HW_blocks, inp, gamma, beta, mean, var, eqn_out, func10, reduce_HW_kernel, reduce_rows_kernel, reduce_groups_kernel, all_zero_G_kernel, all_zero_kernel, add_kernel, eps);
} else if (datatype_mode == 1) {
scaler_groupnorm_fwd_fp32(NP, CP, HW, CB, G, inp, gamma, beta, mean, var, out, eps);
tpp_groupnorm_fwd_bf16(NP, CP, HW, CB, G, num_HW_blocks, bf16_inp, bf16_gamma, bf16_beta, mean, var, bf16_eqn_out, func10, reduce_HW_kernel, reduce_rows_kernel, reduce_groups_kernel, all_zero_G_kernel, all_zero_kernel, add_kernel, eps);
for ( i = 0; i < NP*CP*HW*CB; ++i ) {
/* out[i] = upconvert_bf16(bf16_out[i]); */
eqn_out[i] = upconvert_bf16(bf16_eqn_out[i]);
}
}
/* compare */
printf("############################################\n");
if (datatype_mode == 0) {
printf("# Correctness FP32 FWD Groupnorm - Output #\n");
} else {
printf("# Correctness BF16 FWD Groupnorm - Output #\n");
}
printf("############################################\n");
libxsmm_matdiff(&norms_out, LIBXSMM_DATATYPE_F32, NP*CP*HW*CB, 1, out, eqn_out, 0, 0);
printf("L1 reference : %.25g\n", norms_out.l1_ref);
printf("L1 test : %.25g\n", norms_out.l1_tst);
printf("L2 abs.error : %.24f\n", norms_out.l2_abs);
printf("L2 rel.error : %.24f\n", norms_out.l2_rel);
printf("Linf abs.error: %.24f\n", norms_out.linf_abs);
printf("Linf rel.error: %.24f\n", norms_out.linf_rel);
printf("Check-norm : %.24f\n\n", norms_out.normf_rel);
if (datatype_mode == 0) {
scaler_groupnorm_fwd_fp32(NP, CP, HW, CB, G, inp, gamma, beta, mean, var, out, eps);
l_start = libxsmm_timer_tick();
for (it = 0; it < iters; it++) {
scaler_groupnorm_fwd_fp32(NP, CP, HW, CB, G, inp, gamma, beta, mean, var, out, eps);
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
printf("Unit time FWD = %.5g\n", ((double)(l_total)));
tpp_groupnorm_fwd_fp32(NP, CP, HW, CB, G, num_HW_blocks, inp, gamma, beta, mean, var, eqn_out, func10, reduce_HW_kernel, reduce_rows_kernel, reduce_groups_kernel, all_zero_G_kernel, all_zero_kernel, add_kernel, eps);
l_start = libxsmm_timer_tick();
for (it = 0; it < iters; it++) {
tpp_groupnorm_fwd_fp32(NP, CP, HW, CB, G, num_HW_blocks, inp, gamma, beta, mean, var, eqn_out, func10, reduce_HW_kernel, reduce_rows_kernel, reduce_groups_kernel, all_zero_G_kernel, all_zero_kernel, add_kernel, eps);
}
l_end = libxsmm_timer_tick();
l_total2 = libxsmm_timer_duration(l_start, l_end);
printf("TPP groupnorm time FWD = %.5g\n", ((double)(l_total2)));
printf("Speedup FWD is %.5g\n", l_total/l_total2);
} else if (datatype_mode == 1) {
scaler_groupnorm_fwd_fp32(NP, CP, HW, CB, G, inp, gamma, beta, mean, var, out, eps);
l_start = libxsmm_timer_tick();
for (it = 0; it < iters; it++) {
scaler_groupnorm_fwd_fp32(NP, CP, HW, CB, G, inp, gamma, beta, mean, var, out, eps);
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
printf("Scaler FP32 groupnorm time FWD = %.5g\n", ((double)(l_total)));
tpp_groupnorm_fwd_bf16(NP, CP, HW, CB, G, num_HW_blocks, bf16_inp, bf16_gamma, bf16_beta, mean, var, bf16_eqn_out, func10, reduce_HW_kernel, reduce_rows_kernel, reduce_groups_kernel, all_zero_G_kernel, all_zero_kernel, add_kernel, eps);
l_start = libxsmm_timer_tick();
for (it = 0; it < iters; it++) {
tpp_groupnorm_fwd_bf16(NP, CP, HW, CB, G, num_HW_blocks, bf16_inp, bf16_gamma, bf16_beta, mean, var, bf16_eqn_out, func10, reduce_HW_kernel, reduce_rows_kernel, reduce_groups_kernel, all_zero_G_kernel, all_zero_kernel, add_kernel, eps);
}
l_end = libxsmm_timer_tick();
l_total2 = libxsmm_timer_duration(l_start, l_end);
printf("TPP BF16 groupnorm time FWD = %.5g\n", ((double)(l_total2)));
printf("Speedup FWD is %.5g\n", l_total/l_total2);
}
t_tpp = l_total2;
t_vec = l_total;
/* Group norm equations */
/* Create MatEq for bwd layernorm */
ld = CB;
tmp_ld2 = 1;
/* dgamma function */
my_eqn11 = libxsmm_matrix_eqn_create(); /* dgamma = ((inp *a + b) * dout) + dgamma */
libxsmm_matrix_eqn_push_back_binary_op(my_eqn11, LIBXSMM_MELTW_TYPE_BINARY_ADD, LIBXSMM_MELTW_FLAG_BINARY_NONE, LIBXSMM_DATATYPE_F32); /* dgamma = ((inp *a + b) * dout) + dgamma */
libxsmm_matrix_eqn_push_back_unary_op(my_eqn11, LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD, LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS, LIBXSMM_DATATYPE_F32); /* [HW, CB] -> [CB] */
libxsmm_matrix_eqn_push_back_binary_op(my_eqn11, LIBXSMM_MELTW_TYPE_BINARY_MUL, LIBXSMM_MELTW_FLAG_BINARY_NONE, LIBXSMM_DATATYPE_F32); /* ((inp *a + b) * dout) */
libxsmm_matrix_eqn_push_back_ternary_op( my_eqn11, LIBXSMM_MELTW_TYPE_TERNARY_MULADD, LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_1 | LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_2 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT, LIBXSMM_DATATYPE_F32);
libxsmm_matrix_eqn_push_back_arg( my_eqn11, CB, HW/num_HW_blocks, ld, 0, 0, in_dt ); /* inp [HW, CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn11, CB, 1, 1, 1, 0, LIBXSMM_DATATYPE_F32 ); /* a [CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn11, CB, 1, 1, 2, 0, LIBXSMM_DATATYPE_F32 ); /* b [CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn11, CB, HW/num_HW_blocks, ld, 3, 0, in_dt ); /* dout [HW, CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn11, CB, 1, 1, 4, 0, LIBXSMM_DATATYPE_F32 ); /* dgamma [CB] */
func11 = libxsmm_dispatch_matrix_eqn( CB, 1, &tmp_ld2, LIBXSMM_DATATYPE_F32, my_eqn11 ); /* dgamma [CB] */
/* dbeta function */
my_eqn12 = libxsmm_matrix_eqn_create(); /* dbeta [CB] = dout [HW, CB] + dbeta [CB] */
libxsmm_matrix_eqn_push_back_binary_op( my_eqn12, LIBXSMM_MELTW_TYPE_BINARY_ADD, LIBXSMM_MELTW_FLAG_BINARY_NONE, LIBXSMM_DATATYPE_F32 ); /* dbeta_tmp [HW, CB] */
libxsmm_matrix_eqn_push_back_unary_op(my_eqn12, LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD, LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS, LIBXSMM_DATATYPE_F32); /* [HW, CB] -> [CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn12, CB, HW/num_HW_blocks, ld, 3, 0, in_dt ); /* dout [HW, CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn12, CB, 1, 1, 5, 0, LIBXSMM_DATATYPE_F32 ); /* dbeta [CB] */
func12 = libxsmm_dispatch_matrix_eqn( CB, 1, &tmp_ld2, LIBXSMM_DATATYPE_F32, my_eqn12 ); /* dbeta [CB] */
/* db new equation */
my_eqn13 = libxsmm_matrix_eqn_create(); /* db [CB] = (dout * gamma) [HW, CB] + db [CB]*/
libxsmm_matrix_eqn_push_back_binary_op(my_eqn13, LIBXSMM_MELTW_TYPE_BINARY_ADD, LIBXSMM_MELTW_FLAG_BINARY_NONE, LIBXSMM_DATATYPE_F32 ); /* db [CB] */
libxsmm_matrix_eqn_push_back_unary_op(my_eqn13, LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD, LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS, LIBXSMM_DATATYPE_F32); /* [HW, CB] -> [CB] */
libxsmm_matrix_eqn_push_back_binary_op( my_eqn13, LIBXSMM_MELTW_TYPE_BINARY_MUL, LIBXSMM_MELTW_FLAG_BINARY_BCAST_COL_IN_1, LIBXSMM_DATATYPE_F32 );
libxsmm_matrix_eqn_push_back_arg( my_eqn13, CB, HW/num_HW_blocks, ld, 3, 0, in_dt ); /* dout [HW, CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn13, CB, 1, 1, 6, 0, in_dt ); /* gamma [CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn13, CB, 1, 1, 9, 0, LIBXSMM_DATATYPE_F32 ); /* db [CB] */
func13 = libxsmm_dispatch_matrix_eqn( CB, 1, &tmp_ld2, LIBXSMM_DATATYPE_F32, my_eqn13 ); /* db [CB] */
/* ds new equation */
my_eqn14 = libxsmm_matrix_eqn_create(); /* ds [CB] = ((dout * gamma) * inp) [HW, CB] + ds [CB] */
libxsmm_matrix_eqn_push_back_binary_op(my_eqn14, LIBXSMM_MELTW_TYPE_BINARY_ADD, LIBXSMM_MELTW_FLAG_BINARY_NONE, LIBXSMM_DATATYPE_F32 ); /* ds [CB] */
libxsmm_matrix_eqn_push_back_unary_op(my_eqn14, LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD, LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS, LIBXSMM_DATATYPE_F32); /* [HW, CB] -> [CB] */
libxsmm_matrix_eqn_push_back_binary_op( my_eqn14, LIBXSMM_MELTW_TYPE_BINARY_MUL, LIBXSMM_MELTW_FLAG_BINARY_NONE, LIBXSMM_DATATYPE_F32 );
libxsmm_matrix_eqn_push_back_binary_op( my_eqn14, LIBXSMM_MELTW_TYPE_BINARY_MUL, LIBXSMM_MELTW_FLAG_BINARY_BCAST_COL_IN_1, LIBXSMM_DATATYPE_F32 ); /*(dout * gamma)*/
libxsmm_matrix_eqn_push_back_arg( my_eqn14, CB, HW/num_HW_blocks, ld, 3, 0, in_dt ); /* dout [HW, CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn14, CB, 1, 1, 6, 0, in_dt ); /* gamma [CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn14, CB, HW/num_HW_blocks, ld, 0, 0, in_dt ); /* inp [HW, CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn14, CB, 1, 1, 8, 0, LIBXSMM_DATATYPE_F32 ); /* ds [CB] */
func14 = libxsmm_dispatch_matrix_eqn( CB, 1, &tmp_ld2, LIBXSMM_DATATYPE_F32, my_eqn14 ); /* ds [CB] */
/* din equation */
my_eqn15 = libxsmm_matrix_eqn_create(); /* din = ((gamma * a) * dout) + (inp * b + c) */
libxsmm_matrix_eqn_push_back_ternary_op( my_eqn15, LIBXSMM_MELTW_TYPE_TERNARY_MULADD, LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_0 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT, LIBXSMM_DATATYPE_F32);
libxsmm_matrix_eqn_push_back_binary_op( my_eqn15, LIBXSMM_MELTW_TYPE_BINARY_MUL, LIBXSMM_MELTW_FLAG_BINARY_NONE, LIBXSMM_DATATYPE_F32 );
libxsmm_matrix_eqn_push_back_arg( my_eqn15, CB, 1, 1, 6, 0, in_dt ); /* gamma [CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn15, CB, 1, 1, 1, 0, LIBXSMM_DATATYPE_F32 ); /* a [CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn15, CB, HW/num_HW_blocks, ld, 3, 0, in_dt ); /* dout [HW, CB] */
libxsmm_matrix_eqn_push_back_ternary_op( my_eqn15, LIBXSMM_MELTW_TYPE_TERNARY_MULADD, LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_1 | LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_2 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT, LIBXSMM_DATATYPE_F32);
libxsmm_matrix_eqn_push_back_arg( my_eqn15, CB, HW/num_HW_blocks, ld, 0, 0, in_dt ); /* inp [HW, CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn15, CB, 1, 1, 2, 0, LIBXSMM_DATATYPE_F32 ); /* b [CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn15, CB, 1, 1, 7, 0, LIBXSMM_DATATYPE_F32 ); /* c [CB] */
func15 = libxsmm_dispatch_matrix_eqn( CB, HW/num_HW_blocks, &ld, in_dt, my_eqn15 ); /* din [HW, CB] */
if (datatype_mode == 0) {
scaler_groupnorm_bwd_fp32(NP, CP, HW, CB, G, dout, inp, mean, var, gamma, dinp, dgamma, dbeta, eps);
tpp_groupnorm_bwd_fp32(NP, CP, HW, CB, G, num_HW_blocks, eqn_dout, inp, mean, var, gamma, eqn_dinp, eqn_dgamma, eqn_dbeta, func11, func12, func13, func14, func15, all_zero_kernel, add_kernel, eps);
} else if (datatype_mode == 1) {
scaler_groupnorm_bwd_fp32(NP, CP, HW, CB, G, dout, inp, mean, var, gamma, dinp, dgamma, dbeta, eps);
tpp_groupnorm_bwd_bf16(NP, CP, HW, CB, G, num_HW_blocks, bf16_dout, bf16_inp, mean, var, bf16_gamma, bf16_eqn_dinp, eqn_dgamma, eqn_dbeta, func11, func12, func13, func14, func15, all_zero_kernel, add_kernel, eps);
for ( i = 0; i < NP*CP*HW*CB; ++i ) {
/* dinp[i] = upconvert_bf16(bf16_dinp[i]); */
eqn_dinp[i] = upconvert_bf16(bf16_eqn_dinp[i]);
}
}
/* compare */
printf("############################################\n");
if (datatype_mode == 0) {
printf("# Correctness FP32 BWD Groupnorm - Dinput #\n");
} else {
printf("# Correctness BF16 BWD Groupnorm - Dinput #\n");
}
printf("############################################\n");
libxsmm_matdiff(&norms_out, LIBXSMM_DATATYPE_F32, NP*CP*HW*CB, 1, dinp, eqn_dinp, 0, 0);
printf("L1 reference : %.25g\n", norms_out.l1_ref);
printf("L1 test : %.25g\n", norms_out.l1_tst);
printf("L2 abs.error : %.24f\n", norms_out.l2_abs);
printf("L2 rel.error : %.24f\n", norms_out.l2_rel);
printf("Linf abs.error: %.24f\n", norms_out.linf_abs);
printf("Linf rel.error: %.24f\n", norms_out.linf_rel);
printf("Check-norm : %.24f\n\n", norms_out.normf_rel);
printf("###########################################\n");
if (datatype_mode == 0) {
printf("# Correctness FP32 BWD Groupnorm - Dbeta #\n");
} else {
printf("# Correctness BF16 BWD Groupnorm - Dbeta #\n");
}
printf("###########################################\n");
libxsmm_matdiff(&norms_out, LIBXSMM_DATATYPE_F32, CP*CB, 1, dbeta, eqn_dbeta, 0, 0);
printf("L1 reference : %.25g\n", norms_out.l1_ref);
printf("L1 test : %.25g\n", norms_out.l1_tst);
printf("L2 abs.error : %.24f\n", norms_out.l2_abs);
printf("L2 rel.error : %.24f\n", norms_out.l2_rel);
printf("Linf abs.error: %.24f\n", norms_out.linf_abs);
printf("Linf rel.error: %.24f\n", norms_out.linf_rel);
printf("Check-norm : %.24f\n\n", norms_out.normf_rel);
printf("############################################\n");
if (datatype_mode == 0) {
printf("# Correctness FP32 BWD Groupnorm - Dgamma #\n");
} else {
printf("# Correctness BF16 BWD Groupnorm - Dgamma #\n");
}
printf("############################################\n");
libxsmm_matdiff(&norms_out, LIBXSMM_DATATYPE_F32, CP*CB, 1, dgamma, eqn_dgamma, 0, 0);
printf("L1 reference : %.25g\n", norms_out.l1_ref);
printf("L1 test : %.25g\n", norms_out.l1_tst);
printf("L2 abs.error : %.24f\n", norms_out.l2_abs);
printf("L2 rel.error : %.24f\n", norms_out.l2_rel);
printf("Linf abs.error: %.24f\n", norms_out.linf_abs);
printf("Linf rel.error: %.24f\n", norms_out.linf_rel);
printf("Check-norm : %.24f\n\n", norms_out.normf_rel);
if (datatype_mode == 0) {
scaler_groupnorm_bwd_fp32(NP, CP, HW, CB, G, dout, inp, mean, var, gamma, dinp, dgamma, dbeta, eps);
l_start = libxsmm_timer_tick();
for (it = 0; it < iters; it++) {
scaler_groupnorm_bwd_fp32(NP, CP, HW, CB, G, dout, inp, mean, var, gamma, dinp, dgamma, dbeta, eps);
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
printf("Scaler groupnorm time BWD = %.5g\n", ((double)(l_total)));
tpp_groupnorm_bwd_fp32(NP, CP, HW, CB, G, num_HW_blocks, eqn_dout, inp, mean, var, gamma, eqn_dinp, eqn_dgamma, eqn_dbeta, func11, func12, func13, func14, func15, all_zero_kernel, add_kernel, eps);
l_start = libxsmm_timer_tick();
for (it = 0; it < iters; it++) {
tpp_groupnorm_bwd_fp32(NP, CP, HW, CB, G, num_HW_blocks, eqn_dout, inp, mean, var, gamma, eqn_dinp, eqn_dgamma, eqn_dbeta, func11, func12, func13, func14, func15, all_zero_kernel, add_kernel, eps);
}
l_end = libxsmm_timer_tick();
l_total2 = libxsmm_timer_duration(l_start, l_end);
printf("TPP groupnorm time BWD = %.5g\n", ((double)(l_total2)));
printf("Speedup BWD is %.5g\n", l_total/l_total2);
} else if (datatype_mode == 1) {
scaler_groupnorm_bwd_fp32(NP, CP, HW, CB, G, dout, inp, mean, var, gamma, dinp, dgamma, dbeta, eps);
l_start = libxsmm_timer_tick();
for (it = 0; it < iters; it++) {
scaler_groupnorm_bwd_fp32(NP, CP, HW, CB, G, dout, inp, mean, var, gamma, dinp, dgamma, dbeta, eps);
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
printf("Scaler FP32 groupnorm time BWD = %.5g\n", ((double)(l_total)));
tpp_groupnorm_bwd_bf16(NP, CP, HW, CB, G, num_HW_blocks, bf16_dout, bf16_inp, mean, var, bf16_gamma, bf16_dinp, dgamma, dbeta, func11, func12, func13, func14, func15, all_zero_kernel, add_kernel, eps);
l_start = libxsmm_timer_tick();
for (it = 0; it < iters; it++) {
tpp_groupnorm_bwd_bf16(NP, CP, HW, CB, G, num_HW_blocks, bf16_dout, bf16_inp, mean, var, bf16_gamma, bf16_dinp, dgamma, dbeta, func11, func12, func13, func14, func15, all_zero_kernel, add_kernel, eps);
}
l_end = libxsmm_timer_tick();
l_total2 = libxsmm_timer_duration(l_start, l_end);
printf("TPP BF16 groupnorm time BWD = %.5g\n", ((double)(l_total2)));
printf("Speedup BWD is %.5g\n", l_total/l_total2);
}
/* printf("Running sum is %.5f\n", sum); */
t_tpp += l_total2;
t_vec += l_total;
printf("\n\n=================================\n");
printf("Total Speedup via TPP Matrix equation is %.5g\n", t_vec/t_tpp);
printf("=================================\n");
libxsmm_free(inp);
libxsmm_free(out);
libxsmm_free(dinp);
libxsmm_free(dout);
libxsmm_free(eqn_dinp);
libxsmm_free(eqn_dout);
libxsmm_free(bf16_dinp);
libxsmm_free(bf16_dout);
libxsmm_free(bf16_eqn_dinp);
libxsmm_free(bf16_eqn_dout);
libxsmm_free(dgamma);
libxsmm_free(dbeta);
libxsmm_free(eqn_dgamma);
libxsmm_free(eqn_dbeta);
libxsmm_free(mean);
libxsmm_free(var);
libxsmm_free(gamma);
libxsmm_free(beta);
libxsmm_free(eqn_out);
libxsmm_free(bf16_inp);
libxsmm_free(bf16_out);
libxsmm_free(bf16_gamma);
libxsmm_free(bf16_beta);
libxsmm_free(bf16_eqn_out);
libxsmm_free(cache_fl);
return 0;
}
|
enforce_detgammabar_constraint.h | void enforce_detgammabar_constraint(const int Nxx_plus_2NGHOSTS[3],REAL *xx[3], REAL *in_gfs) {
#pragma omp parallel for
for(int i2=0; i2<Nxx_plus_2NGHOSTS[2]; i2++) {
const REAL xx2 = xx[2][i2];
for(int i1=0; i1<Nxx_plus_2NGHOSTS[1]; i1++) {
const REAL xx1 = xx[1][i1];
for(int i0=0; i0<Nxx_plus_2NGHOSTS[0]; i0++) {
const REAL xx0 = xx[0][i0];
/*
* NRPy+ Finite Difference Code Generation, Step 1 of 1: Read from main memory and compute finite difference stencils:
*/
const double hDD00 = in_gfs[IDX4(HDD00GF, i0,i1,i2)];
const double hDD01 = in_gfs[IDX4(HDD01GF, i0,i1,i2)];
const double hDD02 = in_gfs[IDX4(HDD02GF, i0,i1,i2)];
const double hDD11 = in_gfs[IDX4(HDD11GF, i0,i1,i2)];
const double hDD12 = in_gfs[IDX4(HDD12GF, i0,i1,i2)];
const double hDD22 = in_gfs[IDX4(HDD22GF, i0,i1,i2)];
/*
* NRPy+ Finite Difference Code Generation, Step 2 of 1: Evaluate SymPy expressions and write to main memory:
*/
const double tmp0 = hDD00 + 1;
const double tmp1 = sin(xx1);
const double tmp2 = pow(tmp1, 2);
const double tmp3 = tmp2*pow(xx0, 4);
const double tmp4 = pow(xx0, 2);
const double tmp5 = hDD11*tmp4 + tmp4;
const double tmp6 = tmp2*tmp4;
const double tmp7 = hDD22*tmp6 + tmp6;
const double tmp8 = cbrt(1.0/(-pow(hDD01, 2)*tmp4*tmp7 + 2*hDD01*hDD02*hDD12*tmp3 - pow(hDD02, 2)*tmp5*tmp6 - pow(hDD12, 2)*tmp0*tmp3 + tmp0*tmp5*tmp7))*pow(fabs(tmp1), 2.0/3.0)*pow(fabs(xx0), 4.0/3.0);
in_gfs[IDX4(HDD00GF, i0, i1, i2)] = tmp0*tmp8 - 1;
in_gfs[IDX4(HDD01GF, i0, i1, i2)] = hDD01*tmp8;
in_gfs[IDX4(HDD02GF, i0, i1, i2)] = hDD02*tmp8;
in_gfs[IDX4(HDD11GF, i0, i1, i2)] = tmp8*(hDD11 + 1) - 1;
in_gfs[IDX4(HDD12GF, i0, i1, i2)] = hDD12*tmp8;
in_gfs[IDX4(HDD22GF, i0, i1, i2)] = tmp8*(hDD22 + 1) - 1;
} // END LOOP: for(int i0=0; i0<Nxx_plus_2NGHOSTS[0]; i0++)
} // END LOOP: for(int i1=0; i1<Nxx_plus_2NGHOSTS[1]; i1++)
} // END LOOP: for(int i2=0; i2<Nxx_plus_2NGHOSTS[2]; i2++)
}
|
0fa8731d37e95224dbb06693b0c5599e66d59f4c.c | #define _POSIX_C_SOURCE 200809L
#include "stdlib.h"
#include "math.h"
#include "sys/time.h"
#include "omp.h"
struct dataobj
{
void *restrict data;
int * size;
int * npsize;
int * dsize;
int * hsize;
int * hofs;
int * oofs;
} ;
struct profiler
{
double section0;
} ;
int padfunc(struct dataobj *restrict vp_vec, const int x_M, const int y_M, const int z_M, const int abc_x_l_ltkn, const int abc_x_r_rtkn, const int abc_y_l_ltkn, const int abc_y_r_rtkn, const int abc_z_l_ltkn, const int abc_z_r_rtkn, struct profiler * timers, const int x_m, const int y_m, const int z_m)
{
float (*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[vp_vec->size[1]][vp_vec->size[2]]) vp_vec->data;
#pragma omp target enter data map(to: vp[0:vp_vec->size[0]][0:vp_vec->size[1]][0:vp_vec->size[2]])
struct timeval start_section0, end_section0;
gettimeofday(&start_section0, NULL);
/* Begin section0 */
for (int abc_x_l = x_m; abc_x_l <= abc_x_l_ltkn + x_m - 1; abc_x_l += 1)
{
#pragma omp target teams distribute parallel for collapse(2)
for (int y = y_m; y <= y_M; y += 1)
{
for (int z = z_m; z <= z_M; z += 1)
{
vp[abc_x_l + 12][y + 12][z + 12] = vp[22][y + 12][z + 12];
}
}
}
for (int abc_x_r = -abc_x_r_rtkn + x_M + 1; abc_x_r <= x_M; abc_x_r += 1)
{
#pragma omp target teams distribute parallel for collapse(2)
for (int y = y_m; y <= y_M; y += 1)
{
for (int z = z_m; z <= z_M; z += 1)
{
vp[abc_x_r + 12][y + 12][z + 12] = vp[x_M + 2][y + 12][z + 12];
}
}
}
#pragma omp target teams distribute parallel for collapse(1)
for (int x = x_m; x <= x_M; x += 1)
{
for (int abc_y_l = y_m; abc_y_l <= abc_y_l_ltkn + y_m - 1; abc_y_l += 1)
{
for (int z = z_m; z <= z_M; z += 1)
{
vp[x + 12][abc_y_l + 12][z + 12] = vp[x + 12][22][z + 12];
}
}
for (int abc_y_r = -abc_y_r_rtkn + y_M + 1; abc_y_r <= y_M; abc_y_r += 1)
{
for (int z = z_m; z <= z_M; z += 1)
{
vp[x + 12][abc_y_r + 12][z + 12] = vp[x + 12][y_M + 2][z + 12];
}
}
for (int y = y_m; y <= y_M; y += 1)
{
for (int abc_z_l = z_m; abc_z_l <= abc_z_l_ltkn + z_m - 1; abc_z_l += 1)
{
vp[x + 12][y + 12][abc_z_l + 12] = vp[x + 12][y + 12][22];
}
for (int abc_z_r = -abc_z_r_rtkn + z_M + 1; abc_z_r <= z_M; abc_z_r += 1)
{
vp[x + 12][y + 12][abc_z_r + 12] = vp[x + 12][y + 12][z_M + 2];
}
}
}
/* End section0 */
gettimeofday(&end_section0, NULL);
timers->section0 += (double)(end_section0.tv_sec-start_section0.tv_sec)+(double)(end_section0.tv_usec-start_section0.tv_usec)/1000000;
#pragma omp target update from(vp[0:vp_vec->size[0]][0:vp_vec->size[1]][0:vp_vec->size[2]])
#pragma omp target exit data map(release: vp[0:vp_vec->size[0]][0:vp_vec->size[1]][0:vp_vec->size[2]])
return 0;
}
|
GB_nvec_nonempty.c | //------------------------------------------------------------------------------
// GB_nvec_nonempty: count the number of non-empty vectors
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// All pending tuples are ignored. If a vector has all zombies it is still
// counted as non-empty.
#include "GB.h"
GB_PUBLIC
int64_t GB_nvec_nonempty // return # of non-empty vectors
(
const GrB_Matrix A, // input matrix to examine
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (A != NULL) ;
ASSERT (GB_ZOMBIES_OK (A)) ;
ASSERT (GB_JUMBLED_OK (A)) ;
ASSERT (GB_PENDING_OK (A)) ;
//--------------------------------------------------------------------------
// trivial cases
//--------------------------------------------------------------------------
if (GB_IS_FULL (A) || GB_IS_BITMAP (A))
{
// A is full or bitmap; nvec_nonempty depends only on the dimensions
return ((A->vlen == 0) ? 0 : A->vdim) ;
}
if (GB_nnz (A) == 0)
{
// A is sparse or hypersparse, with no entries
return (0) ;
}
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
int64_t anvec = A->nvec ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (anvec, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// count the non-empty columns
//--------------------------------------------------------------------------
int64_t nvec_nonempty = 0 ;
const int64_t *restrict Ap = A->p ;
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static) \
reduction(+:nvec_nonempty)
for (k = 0 ; k < anvec ; k++)
{
if (Ap [k] < Ap [k+1]) nvec_nonempty++ ;
}
ASSERT (nvec_nonempty >= 0 && nvec_nonempty <= A->vdim) ;
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
return (nvec_nonempty) ;
}
|
cluster_main.c | /*! \file cluster_main.c this file has the functions used by the compute cluster process.
*This process retrieves problems from the i/o clusters and returns results
*/
#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <string.h>
#include <assert.h>
#include <HAL/hal/hal.h>
#include <mppaipc.h>
#include <mppa/osconfig.h>
#include <omp.h>
#include "shared_defs.h"
#include "settings.h"
#include "OMPResultVector.h"
#include "OMPHitsAndDoublets.h"
#include "OMPCACell.h"
#include "OMPSimpleVector.h"
#include "OMPMixedList.h"
//! The number of cores per cluster
#define TC 16
OMPResultVector results;
//! Retrieves quadruplets out of connected doublets
/*! This function gets the connected doublets and performs a DFS to produce the quadruplets.
\param doublets The data structure of the hit and doublet data
\param layer The current layer in the search
\param idx The identifier within the layer for the current doublet we are visiting
\param ml The lists of the connections between doublets
\param top An array that holds the indices of the start of every successor list
\param foundNtuplets The result returned for our problem
\param tmpNtuplet The current path of the search
\param minHitsPerNtuplet The number of layers involved
*/
void find_ntuplets(const OMPLayerDoublets* doublets, unsigned int layer, unsigned int idx, MixedList* ml, int** top,
OMPResultVector* foundNtuplets, OMPSimpleVector* tmpNtuplet, const unsigned int minHitsPerNtuplet) {
int j;
int4 found;
int otherCell;
if (layer == 0) {
if (sizesv(tmpNtuplet) >= minHitsPerNtuplet - 1) {
found.elem[0]=get_inner_hit_id(doublets, tmpNtuplet->m_data[2], 0);
found.elem[1]=get_outer_hit_id(doublets, tmpNtuplet->m_data[2], 0);
found.elem[2]=get_outer_hit_id(doublets, tmpNtuplet->m_data[1], 1);
found.elem[3]=get_outer_hit_id(doublets, tmpNtuplet->m_data[0], 2);
push_backtsrv(foundNtuplets, &found);
}else
return;
} else {
int ptr = top[layer][idx];
while (ptr != -1) {
int otherIdx = fetch_ml (ml, ptr);
push_backsv(tmpNtuplet, otherIdx);
find_ntuplets(doublets, layer-1, otherIdx, ml, top, foundNtuplets, tmpNtuplet, minHitsPerNtuplet);
pop_backsv(tmpNtuplet, &otherCell);
ptr = next_ml (ml, ptr);
}
}
}
//! everything the CA does is here.
/** The control flow is as follows:
* the process opens the file descriptors for communicating with IO,
* loops so that it can handle the different request (didn't add a termination message) and executes the communication logic.
* It gets info on the size of the problem to receive (communication, also uses sync), allocates space for these layer pairs
* and starts reading. As soon as a layer pair is received, it performs the initialization of its cells. After we get more layer pairs,
* we connect the adjacent ones and finally we find the quadruplets. Then we push the results to IO.
*/
int
main(int argc __attribute__ ((unused)), char *argv[])
{
int rank = 0;
int i, j, status, idx1,idx2;
float fargs[6];
const int num_a = 6*sizeof(float)+3*(theNumberOfLayers-1)*sizeof(int);
char args[num_a];
int iargs[3*(theNumberOfLayers-1)];
const char *root_sync = argv[1], *d_portal = argv[2], *a_portal = argv[3];
const char *b_portal = argv[4], *c_portal = argv[5], *e_portal = argv[6];;
/*Each cluster contributes a different bit to the root_sync mask.*/
long long mask = (long long)1 << rank;
/*Open the NoC special files.*/
int root_sync_fd = mppa_open(root_sync, O_WRONLY);
int d_portal_fd = mppa_open(d_portal, O_RDONLY);
int a_portal_fd = mppa_open(a_portal, O_RDONLY);
int b_portal_fd = mppa_open(b_portal, O_RDONLY);
int c_portal_fd = mppa_open(c_portal, O_WRONLY);
int e_portal_fd = mppa_open(e_portal, O_RDONLY);
if (root_sync_fd < 0)
printf ("Sync open error\n");
if (a_portal_fd < 0)
printf ("portal error\n");
if (b_portal_fd < 0)
printf ("portal error\n");
if (c_portal_fd < 0)
printf ("portal error\n");
if (d_portal_fd < 0)
printf ("portal error\n");
if (e_portal_fd < 0)
printf ("portal error\n");
OMPLayerDoublets doublets[theNumberOfLayers-1];
int maxi = 0;
int maxp = 0;
/*arena to be managed*/
char* buffer = malloc(1500000);
int fail = 0;
int num[3];
/*initialize statically-sized asynchronous communications*/
mppa_aiocb_t d_portal_aiocb[1] =
{ MPPA_AIOCB_INITIALIZER(d_portal_fd, args, num_a) };
mppa_aiocb_t c_portal_aiocb[1] =
{ MPPA_AIOCB_INITIALIZER(c_portal_fd, &results, sizeof(OMPResultVector)) };
mppa_aiocb_set_pwrite(c_portal_aiocb, &results, sizeof(OMPResultVector), 0);
mppa_aiocb_set_trigger(d_portal_aiocb, 1);
status |= mppa_aio_read(d_portal_aiocb);
for (idx1 = 0; idx1 < 100; idx1++)
for (idx2 = 0; idx2 < 7; idx2++) {
char* origin = buffer;
int left = 1500000;
int flag = 0;
unsigned int s;
/*synchronize with io and gets parameters*/
status |= mppa_write(root_sync_fd, &mask, sizeof(mask));
if (idx1 != 0 || idx2 != 0)
mppa_aio_wait(c_portal_aiocb);
status |= mppa_aio_wait(d_portal_aiocb);
memcpy (fargs, args, 6*sizeof(float));
memcpy (iargs, args+6*sizeof(float), 3*(theNumberOfLayers-1)*sizeof(int));
num[0] = 2*sizeof(int)*iargs[0]+3*sizeof(float)*(iargs[1]+iargs[2]);
num[1] = 2*sizeof(int)*iargs[3]+3*sizeof(float)*iargs[5];
num[2] = 2*sizeof(int)*iargs[6]+3*sizeof(float)*iargs[8];
//printf ("%d: Allocating memory\n", mppa_getpid());
char* l0; char* l1; char* l2;
int* liptr[3];
/*allocate space*/
left -= num[0];
l0 = origin+left; /*allocated at the end of the buffer, is deallocated later*/
l1 = origin;
left -= num[1];
origin += num[1];
l2 = origin;
left -= num[2];
origin += num[2];
s = iargs[3];
liptr[1] = (int*) origin;
left -= s*sizeof(int);
origin += s*sizeof(int);
MixedList* ml = (MixedList*) origin;
left -=sizeof(MixedList);
origin += sizeof(MixedList);
if (left >= 0) {
init_ml(ml, origin, 10000);
left -= 2*10000*sizeof(int);
origin += 2*10000*sizeof(int);
if (left < 0)
printf ("Out of memory: ml\n");
} else {
printf ("Out of memory: ml\n");
}
int* outerptr[2];
MixedList* isOuterHitOfCell[2];
isOuterHitOfCell[0] = (MixedList*) origin;
left -= sizeof(MixedList);
origin += sizeof(MixedList);
if (left >= 0) {
int size = iargs[0]+100;
init_ml(isOuterHitOfCell[0], origin, size);
left -= 2*size*sizeof(int);
origin += 2*size*sizeof(int);
if (left < 0)
printf ("Out of memory: outer\n");
} else {
printf ("Out of memory: outer\n");
}
outerptr[0] = (int*) origin;
left -= iargs[2]*sizeof(int);
origin += iargs[2]*sizeof(int);
if (left >= 0) {
for (i = 0; i < iargs[2]; i++)
outerptr[0][i] = -1;
}
isOuterHitOfCell[1] = (MixedList*) origin;
left -= sizeof(MixedList);
origin += sizeof(MixedList);
if (left >= 0) {
int size = iargs[3]+100;
init_ml(isOuterHitOfCell[1], origin, size);
left -= 2*size*sizeof(int);
origin += 2*size*sizeof(int);
if (left < 0)
printf ("Out of memory: outer\n");
} else {
printf ("Out of memory: outer\n");
}
outerptr[1] = (int*) origin;
left -= iargs[5]*sizeof(int);
origin += iargs[5]*sizeof(int);
if (left >= 0) {
for (i = 0; i < iargs[5]; i++)
outerptr[1][i] = -1;
}
int outer;
int layer;
int top, val;
int thisCell[3];
int otherCell[3];
left -= 2*iargs[0]*sizeof(float);
doublets[0].r = (float*) (origin+left);
if (left < 0) {
printf ("Out of memory: r1\n");
}
doublets[1].r = (float*) origin;
left -= 2*iargs[3]*sizeof(float);
if (left < 0) {
printf ("Out of memory: r2\n");
}
origin += 2*iargs[3]*sizeof(float);
/*initialize hit and doublet receives
get each layer in a different communication
the idea is that we will wait for each layer just before we process it hiding other communications*/
mppa_aiocb_t l0_portal_aiocb[1] =
{ MPPA_AIOCB_INITIALIZER(a_portal_fd, l0, num[0]) };
mppa_aiocb_t l1_portal_aiocb[1] =
{ MPPA_AIOCB_INITIALIZER(b_portal_fd, l1, num[1]) };
mppa_aiocb_t l2_portal_aiocb[1] =
{ MPPA_AIOCB_INITIALIZER(e_portal_fd, l2, num[2]) };
mppa_aiocb_set_trigger(l0_portal_aiocb, 1);
status |= mppa_aio_read(l0_portal_aiocb);
mppa_aiocb_set_trigger(l1_portal_aiocb, 1);
status |= mppa_aio_read(l1_portal_aiocb);
mppa_aiocb_set_trigger(l2_portal_aiocb, 1);
status |= mppa_aio_read(l2_portal_aiocb);
/*unlock the writes of io*/
status |= mppa_write(root_sync_fd, &mask, sizeof(mask));
status |= mppa_aio_wait(l0_portal_aiocb);
doublets[0].size = iargs[0];
doublets[0].indices = (int*) l0;
l0 += 2*sizeof(int)*iargs[0];
doublets[0].layers[0].size = iargs[1];
doublets[0].layers[0].p = (float*) l0;
l0 += 3*sizeof(float)*iargs[1];
doublets[0].layers[1].size = iargs[2];
doublets[0].layers[1].p = (float*) l0;
/*Create layerpair 0-1*/
if (left >= 0) {
#pragma omp parallel for num_threads(TC)
for (j = 0; j < doublets[0].size; j++) {
int in = doublets[0].indices[2*j];
int out = doublets[0].indices[2*j+1];
int inner = get_inner_hit_id (doublets, j, 0);
float x = get_inner_x (doublets, inner, 0);
float y = get_inner_y (doublets, inner, 0);
doublets[0].r[2*j] = hypot(x,y);
int outer = get_outer_hit_id (doublets, j, 0);
x = get_outer_x (doublets, outer, 0);
y = get_outer_y (doublets, outer, 0);
doublets[0].r[2*j+1] = hypot(x,y);
push_back_mlts (isOuterHitOfCell[0], &outerptr[0][outer], j);
}
}
status |= mppa_aio_wait(l1_portal_aiocb);
doublets[1].size = iargs[3];
doublets[1].indices = (int*) l1;
l1 += 2*sizeof(int)*iargs[3];
doublets[1].layers[0].size = doublets[0].layers[1].size;
doublets[1].layers[0].p = doublets[0].layers[1].p;
doublets[1].layers[1].size = iargs[5];
doublets[1].layers[1].p = (float*) l1;
/*Create layerpair 1-2*/
if (left >= 0) {
#pragma omp parallel for num_threads(TC)
for (j = 0; j < doublets[1].size; j++) {
int inner = get_inner_hit_id (doublets, j, 1);
float x = get_inner_x (doublets, inner, 1);
float y = get_inner_y (doublets, inner, 1);
doublets[1].r[2*j] = hypot(x,y);
int outer = get_outer_hit_id (doublets, j, 1);
x = get_outer_x (doublets, outer, 1);
y = get_outer_y (doublets, outer, 1);
doublets[1].r[2*j+1] = hypot(x,y);
push_back_mlts (isOuterHitOfCell[1], &outerptr[1][outer], j);
}
}
/*connect 0-1-2*/
if (left >= 0) {
s = doublets[1].size;
#pragma omp parallel for num_threads(TC) private(j, top, val, thisCell, otherCell)
for (i = 0; i < s; i++) {
top = -1;
int inner = get_inner_hit_id(doublets, i, 1);
thisCell[0] = i;
thisCell[1] = inner;
thisCell[2] = get_outer_hit_id(doublets, i, 1);
int optr = outerptr[0][inner];
/*loop through doublets sharing hit*/
while (optr != -1) {
otherCell[0] = fetch_ml (isOuterHitOfCell[0], optr);
otherCell[1] = get_inner_hit_id(doublets, otherCell[0], 0);
otherCell[2] = get_outer_hit_id(doublets, otherCell[0], 0);
if (check_alignment_and_tag(doublets, thisCell, 1, otherCell,
fargs[0], fargs[1], fargs[2],
fargs[3], fargs[4], fargs[5])) {
val = otherCell[0];
top = push_back_ml (ml, top, val);
if (top < 0) {
printf ("Error: out of space\n");
results.m_size = -1;
left = -1;
break;
}
}
optr = next_ml (isOuterHitOfCell[0], optr);
}
liptr[1][i] = top;
}
}
status |= mppa_aio_wait(l2_portal_aiocb);
doublets[2].size = iargs[6];
doublets[2].indices = (int*) l2;
l2 += 2*sizeof(int)*iargs[6];
doublets[2].layers[0].size = doublets[1].layers[1].size;
doublets[2].layers[0].p = doublets[1].layers[1].p;
doublets[2].layers[1].size = iargs[8];
doublets[2].layers[1].p = (float*) l2;
if (left < 0) {
flag = 1;
results.m_size = -1;
goto res;
}
left += num[0];
left += 2*iargs[0]*sizeof(float);
doublets[2].r = (float*) origin;
left -= 2*iargs[6]*sizeof(float);
origin += 2*iargs[6]*sizeof(float);
s = iargs[6];
liptr[2] = (int*) origin;
left -= s*sizeof(int);
origin += s*sizeof(int);
if (left < 0) {
printf ("Out of memory: r3\n");
flag = 1;
results.m_size = -1;
goto res;
}
/*Create layerpair 2-3*/
#pragma omp parallel for num_threads(TC)
for (j = 0; j < doublets[2].size; j++) {
int inner = get_inner_hit_id (doublets, j, 2);
float x = get_inner_x (doublets, inner, 2);
float y = get_inner_y (doublets, inner, 2);
doublets[2].r[2*j] = hypot(x,y);
int outer = get_outer_hit_id (doublets, j, 2);
x = get_outer_x (doublets, outer, 2);
y = get_outer_y (doublets, outer, 2);
doublets[2].r[2*j+1] = hypot(x,y);
}
if (left < 0) {
flag = 1;
results.m_size = -1;
goto res;
}
/*connect 1-2-3*/
for (layer = 2; layer < theNumberOfLayers-1; layer++) {
s = doublets[layer].size;
#pragma omp parallel for num_threads(TC) private(j, top, val, thisCell, otherCell)
for (i = 0; i < s; i++) {
top = -1;
int inner = get_inner_hit_id(doublets, i, layer);
thisCell[0] = i;
thisCell[1] = inner;
thisCell[2] = get_outer_hit_id(doublets, i, layer);
int optr = outerptr[layer-1][inner];
while (optr != -1) {
otherCell[0] = fetch_ml (isOuterHitOfCell[layer-1], optr);
otherCell[1] = get_inner_hit_id(doublets, otherCell[0], layer-1);
otherCell[2] = get_outer_hit_id(doublets, otherCell[0], layer-1);
if (check_alignment_and_tag(doublets, thisCell, layer, otherCell,
fargs[0], fargs[1], fargs[2],
fargs[3], fargs[4], fargs[5])) {
val = otherCell[0];
top = push_back_ml (ml, top, val);
if (top < 0) {
printf ("Error: out of space\n");
results.m_size = -1;
flag = 1;
break;
}
}
optr = next_ml (isOuterHitOfCell[layer-1], optr);
}
liptr[layer][i] = top;
}
if (flag == 1)
goto res;
}
unsigned int lastLayerPairIndex = numberOfLayers - 2;
resetrv(&results);
OMPSimpleVector stack;
/*get the quadruplets*/
s = doublets[lastLayerPairIndex].size;
#pragma omp parallel for num_threads(TC) private(stack)
for (i = 0; i < s; i++) {
resetsv(&stack);
push_backsv(&stack, i);
find_ntuplets(doublets, lastLayerPairIndex, i, ml, liptr, &results, &stack, 4);
}
res:
if (results.m_size == -1)
fail++;
/*starts sending results and getting new parameters*/
mppa_aiocb_set_trigger(d_portal_aiocb, 1);
status |= mppa_aio_read(d_portal_aiocb);
status |= mppa_pwrite(c_portal_fd, &results, sizeof(OMPResultVector), 0);
mppa_aio_write(c_portal_aiocb);
}
printf ("Failed to compute: %d\n", fail);
mppa_exit(0);
return 0;
}
|
dataset.h | #ifndef LIGHTGBM_DATASET_H_
#define LIGHTGBM_DATASET_H_
#include <LightGBM/utils/random.h>
#include <LightGBM/utils/text_reader.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <LightGBM/meta.h>
#include <LightGBM/config.h>
#include <LightGBM/feature_group.h>
#include <vector>
#include <utility>
#include <functional>
#include <string>
#include <unordered_set>
#include <mutex>
namespace LightGBM {
/*! \brief forward declaration */
class DatasetLoader;
/*!
* \brief This class is used to store some meta(non-feature) data for training data,
* e.g. labels, weights, initial scores, qurey level informations.
*
* Some details:
* 1. Label, used for traning.
* 2. Weights, weighs of records, optional
* 3. Query Boundaries, necessary for lambdarank.
* The documents of i-th query is in [ query_boundarise[i], query_boundarise[i+1] )
* 4. Query Weights, auto calculate by weights and query_boundarise(if both of them are existed)
* the weight for i-th query is sum(query_boundarise[i] , .., query_boundarise[i+1]) / (query_boundarise[i + 1] - query_boundarise[i+1])
* 5. Initial score. optional. if exsitng, the model will boost from this score, otherwise will start from 0.
*/
class Metadata {
public:
/*!
* \brief Null costructor
*/
Metadata();
/*!
* \brief Initialization will load qurey level informations, since it is need for sampling data
* \param data_filename Filename of data
* \param init_score_filename Filename of initial score
*/
void Init(const char* data_filename, const char* initscore_file);
/*!
* \brief init as subset
* \param metadata Filename of data
* \param used_indices
* \param num_used_indices
*/
void Init(const Metadata& metadata, const data_size_t* used_indices, data_size_t num_used_indices);
/*!
* \brief Initial with binary memory
* \param memory Pointer to memory
*/
void LoadFromMemory(const void* memory);
/*! \brief Destructor */
~Metadata();
/*!
* \brief Initial work, will allocate space for label, weight(if exists) and query(if exists)
* \param num_data Number of training data
* \param weight_idx Index of weight column, < 0 means doesn't exists
* \param query_idx Index of query id column, < 0 means doesn't exists
*/
void Init(data_size_t num_data, int weight_idx, int query_idx);
/*!
* \brief Partition label by used indices
* \param used_indices Indice of local used
*/
void PartitionLabel(const std::vector<data_size_t>& used_indices);
/*!
* \brief Partition meta data according to local used indices if need
* \param num_all_data Number of total training data, including other machines' data on parallel learning
* \param used_data_indices Indices of local used training data
*/
void CheckOrPartition(data_size_t num_all_data,
const std::vector<data_size_t>& used_data_indices);
void SetLabel(const label_t* label, data_size_t len);
void SetWeights(const label_t* weights, data_size_t len);
void SetQuery(const data_size_t* query, data_size_t len);
/*!
* \brief Set initial scores
* \param init_score Initial scores, this class will manage memory for init_score.
*/
void SetInitScore(const double* init_score, data_size_t len);
/*!
* \brief Save binary data to file
* \param file File want to write
*/
void SaveBinaryToFile(const VirtualFileWriter* writer) const;
/*!
* \brief Get sizes in byte of this object
*/
size_t SizesInByte() const;
/*!
* \brief Get pointer of label
* \return Pointer of label
*/
inline const label_t* label() const { return label_.data(); }
/*!
* \brief Set label for one record
* \param idx Index of this record
* \param value Label value of this record
*/
inline void SetLabelAt(data_size_t idx, label_t value)
{
label_[idx] = value;
}
/*!
* \brief Set Weight for one record
* \param idx Index of this record
* \param value Weight value of this record
*/
inline void SetWeightAt(data_size_t idx, label_t value)
{
weights_[idx] = value;
}
/*!
* \brief Set Query Id for one record
* \param idx Index of this record
* \param value Query Id value of this record
*/
inline void SetQueryAt(data_size_t idx, data_size_t value)
{
queries_[idx] = static_cast<data_size_t>(value);
}
/*!
* \brief Get weights, if not exists, will return nullptr
* \return Pointer of weights
*/
inline const label_t* weights() const {
if (!weights_.empty()) {
return weights_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get data boundaries on queries, if not exists, will return nullptr
* we assume data will order by query,
* the interval of [query_boundaris[i], query_boundaris[i+1])
* is the data indices for query i.
* \return Pointer of data boundaries on queries
*/
inline const data_size_t* query_boundaries() const {
if (!query_boundaries_.empty()) {
return query_boundaries_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get Number of queries
* \return Number of queries
*/
inline data_size_t num_queries() const { return num_queries_; }
/*!
* \brief Get weights for queries, if not exists, will return nullptr
* \return Pointer of weights for queries
*/
inline const label_t* query_weights() const {
if (!query_weights_.empty()) {
return query_weights_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get initial scores, if not exists, will return nullptr
* \return Pointer of initial scores
*/
inline const double* init_score() const {
if (!init_score_.empty()) {
return init_score_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get size of initial scores
*/
inline int64_t num_init_score() const { return num_init_score_; }
/*! \brief Disable copy */
Metadata& operator=(const Metadata&) = delete;
/*! \brief Disable copy */
Metadata(const Metadata&) = delete;
private:
/*! \brief Load initial scores from file */
void LoadInitialScore(const char* initscore_file);
/*! \brief Load wights from file */
void LoadWeights();
/*! \brief Load query boundaries from file */
void LoadQueryBoundaries();
/*! \brief Load query wights */
void LoadQueryWeights();
/*! \brief Filename of current data */
std::string data_filename_;
/*! \brief Number of data */
data_size_t num_data_;
/*! \brief Number of weights, used to check correct weight file */
data_size_t num_weights_;
/*! \brief Label data */
std::vector<label_t> label_;
/*! \brief Weights data */
std::vector<label_t> weights_;
/*! \brief Query boundaries */
std::vector<data_size_t> query_boundaries_;
/*! \brief Query weights */
std::vector<label_t> query_weights_;
/*! \brief Number of querys */
data_size_t num_queries_;
/*! \brief Number of Initial score, used to check correct weight file */
int64_t num_init_score_;
/*! \brief Initial score */
std::vector<double> init_score_;
/*! \brief Queries data */
std::vector<data_size_t> queries_;
/*! \brief mutex for threading safe call */
std::mutex mutex_;
bool weight_load_from_file_;
bool query_load_from_file_;
bool init_score_load_from_file_;
};
/*! \brief Interface for Parser */
class Parser {
public:
/*! \brief virtual destructor */
virtual ~Parser() {}
/*!
* \brief Parse one line with label
* \param str One line record, string format, should end with '\0'
* \param out_features Output columns, store in (column_idx, values)
* \param out_label Label will store to this if exists
*/
virtual void ParseOneLine(const char* str,
std::vector<std::pair<int, double>>* out_features, double* out_label) const = 0;
virtual int TotalColumns() const = 0;
/*!
* \brief Create a object of parser, will auto choose the format depend on file
* \param filename One Filename of data
* \param num_features Pass num_features of this data file if you know, <=0 means don't know
* \param label_idx index of label column
* \return Object of parser
*/
static Parser* CreateParser(const char* filename, bool header, int num_features, int label_idx);
};
/*! \brief The main class of data set,
* which are used to traning or validation
*/
class Dataset {
public:
friend DatasetLoader;
LIGHTGBM_EXPORT Dataset();
LIGHTGBM_EXPORT Dataset(data_size_t num_data);
void Construct(
std::vector<std::unique_ptr<BinMapper>>& bin_mappers,
int** sample_non_zero_indices,
const int* num_per_col,
size_t total_sample_cnt,
const Config& io_config);
/*! \brief Destructor */
LIGHTGBM_EXPORT ~Dataset();
LIGHTGBM_EXPORT bool CheckAlign(const Dataset& other) const {
if (num_features_ != other.num_features_) {
return false;
}
if (num_total_features_ != other.num_total_features_) {
return false;
}
if (label_idx_ != other.label_idx_) {
return false;
}
for (int i = 0; i < num_features_; ++i) {
if (!FeatureBinMapper(i)->CheckAlign(*(other.FeatureBinMapper(i)))) {
return false;
}
}
return true;
}
inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<double>& feature_values) {
if (is_finish_load_) { return; }
for (size_t i = 0; i < feature_values.size() && i < static_cast<size_t>(num_total_features_); ++i) {
int feature_idx = used_feature_map_[i];
if (feature_idx >= 0) {
const int group = feature2group_[feature_idx];
const int sub_feature = feature2subfeature_[feature_idx];
feature_groups_[group]->PushData(tid, sub_feature, row_idx, feature_values[i]);
}
}
}
inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<std::pair<int, double>>& feature_values) {
if (is_finish_load_) { return; }
for (auto& inner_data : feature_values) {
if (inner_data.first >= num_total_features_) { continue; }
int feature_idx = used_feature_map_[inner_data.first];
if (feature_idx >= 0) {
const int group = feature2group_[feature_idx];
const int sub_feature = feature2subfeature_[feature_idx];
feature_groups_[group]->PushData(tid, sub_feature, row_idx, inner_data.second);
}
}
}
inline void PushOneData(int tid, data_size_t row_idx, int group, int sub_feature, double value) {
feature_groups_[group]->PushData(tid, sub_feature, row_idx, value);
}
inline int RealFeatureIndex(int fidx) const {
return real_feature_idx_[fidx];
}
inline int InnerFeatureIndex(int col_idx) const {
return used_feature_map_[col_idx];
}
inline int Feature2Group(int feature_idx) const {
return feature2group_[feature_idx];
}
inline int Feture2SubFeature(int feature_idx) const {
return feature2subfeature_[feature_idx];
}
inline uint64_t GroupBinBoundary(int group_idx) const {
return group_bin_boundaries_[group_idx];
}
inline uint64_t NumTotalBin() const {
return group_bin_boundaries_.back();
}
inline std::vector<int> ValidFeatureIndices() const {
std::vector<int> ret;
for (int i = 0; i < num_total_features_; ++i) {
if (used_feature_map_[i] >= 0) {
ret.push_back(i);
}
}
return ret;
}
void ReSize(data_size_t num_data);
void CopySubset(const Dataset* fullset, const data_size_t* used_indices, data_size_t num_used_indices, bool need_meta_data);
LIGHTGBM_EXPORT void FinishLoad();
LIGHTGBM_EXPORT bool SetFloatField(const char* field_name, const float* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool SetDoubleField(const char* field_name, const double* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool SetIntField(const char* field_name, const int* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool GetFloatField(const char* field_name, data_size_t* out_len, const float** out_ptr);
LIGHTGBM_EXPORT bool GetDoubleField(const char* field_name, data_size_t* out_len, const double** out_ptr);
LIGHTGBM_EXPORT bool GetIntField(const char* field_name, data_size_t* out_len, const int** out_ptr);
/*!
* \brief Save current dataset into binary file, will save to "filename.bin"
*/
LIGHTGBM_EXPORT void SaveBinaryFile(const char* bin_filename);
LIGHTGBM_EXPORT void CopyFeatureMapperFrom(const Dataset* dataset);
LIGHTGBM_EXPORT void CreateValid(const Dataset* dataset);
void ConstructHistograms(const std::vector<int8_t>& is_feature_used,
const data_size_t* data_indices, data_size_t num_data,
int leaf_idx,
std::vector<std::unique_ptr<OrderedBin>>& ordered_bins,
const score_t* gradients, const score_t* hessians,
score_t* ordered_gradients, score_t* ordered_hessians,
bool is_constant_hessian,
HistogramBinEntry* histogram_data) const;
void FixHistogram(int feature_idx, double sum_gradient, double sum_hessian, data_size_t num_data,
HistogramBinEntry* data) const;
inline data_size_t Split(int feature,
const uint32_t* threshold, int num_threshold, bool default_left,
data_size_t* data_indices, data_size_t num_data,
data_size_t* lte_indices, data_size_t* gt_indices) const {
const int group = feature2group_[feature];
const int sub_feature = feature2subfeature_[feature];
return feature_groups_[group]->Split(sub_feature, threshold, num_threshold, default_left, data_indices, num_data, lte_indices, gt_indices);
}
inline int SubFeatureBinOffset(int i) const {
const int sub_feature = feature2subfeature_[i];
if (sub_feature == 0) {
return 1;
} else {
return 0;
}
}
inline int FeatureNumBin(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->num_bin();
}
inline int8_t FeatureMonotone(int i) const {
if (monotone_types_.empty()) {
return 0;
} else {
return monotone_types_[i];
}
}
inline double FeaturePenalte(int i) const {
if (feature_penalty_.empty()) {
return 1;
} else {
return feature_penalty_[i];
}
}
bool HasMonotone() const {
if (monotone_types_.empty()) {
return false;
} else {
for (size_t i = 0; i < monotone_types_.size(); ++i) {
if (monotone_types_[i] != 0) {
return true;
}
}
return false;
}
}
inline int FeatureGroupNumBin(int group) const {
return feature_groups_[group]->num_total_bin_;
}
inline const BinMapper* FeatureBinMapper(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature].get();
}
inline const Bin* FeatureBin(int i) const {
const int group = feature2group_[i];
return feature_groups_[group]->bin_data_.get();
}
inline const Bin* FeatureGroupBin(int group) const {
return feature_groups_[group]->bin_data_.get();
}
inline bool FeatureGroupIsSparse(int group) const {
return feature_groups_[group]->is_sparse_;
}
inline BinIterator* FeatureIterator(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->SubFeatureIterator(sub_feature);
}
inline BinIterator* FeatureGroupIterator(int group) const {
return feature_groups_[group]->FeatureGroupIterator();
}
inline double RealThreshold(int i, uint32_t threshold) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->BinToValue(threshold);
}
// given a real threshold, find the closest threshold bin
inline uint32_t BinThreshold(int i, double threshold_double) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->ValueToBin(threshold_double);
}
inline void CreateOrderedBins(std::vector<std::unique_ptr<OrderedBin>>* ordered_bins) const {
ordered_bins->resize(num_groups_);
OMP_INIT_EX();
#pragma omp parallel for schedule(guided)
for (int i = 0; i < num_groups_; ++i) {
OMP_LOOP_EX_BEGIN();
ordered_bins->at(i).reset(feature_groups_[i]->bin_data_->CreateOrderedBin());
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
}
/*!
* \brief Get meta data pointer
* \return Pointer of meta data
*/
inline const Metadata& metadata() const { return metadata_; }
/*! \brief Get Number of used features */
inline int num_features() const { return num_features_; }
/*! \brief Get Number of feature groups */
inline int num_feature_groups() const { return num_groups_;}
/*! \brief Get Number of total features */
inline int num_total_features() const { return num_total_features_; }
/*! \brief Get the index of label column */
inline int label_idx() const { return label_idx_; }
/*! \brief Get names of current data set */
inline const std::vector<std::string>& feature_names() const { return feature_names_; }
inline void set_feature_names(const std::vector<std::string>& feature_names) {
if (feature_names.size() != static_cast<size_t>(num_total_features_)) {
Log::Fatal("Size of feature_names error, should equal with total number of features");
}
feature_names_ = std::vector<std::string>(feature_names);
// replace ' ' in feature_names with '_'
bool spaceInFeatureName = false;
for (auto& feature_name: feature_names_){
if (feature_name.find(' ') != std::string::npos){
spaceInFeatureName = true;
std::replace(feature_name.begin(), feature_name.end(), ' ', '_');
}
}
if (spaceInFeatureName){
Log::Warning("Find whitespaces in feature_names, replace with underlines");
}
}
inline std::vector<std::string> feature_infos() const {
std::vector<std::string> bufs;
for (int i = 0; i < num_total_features_; i++) {
int fidx = used_feature_map_[i];
if (fidx == -1) {
bufs.push_back("none");
} else {
const auto bin_mapper = FeatureBinMapper(fidx);
bufs.push_back(bin_mapper->bin_info());
}
}
return bufs;
}
void ResetConfig(const char* parameters);
/*! \brief Get Number of data */
inline data_size_t num_data() const { return num_data_; }
/*! \brief Disable copy */
Dataset& operator=(const Dataset&) = delete;
/*! \brief Disable copy */
Dataset(const Dataset&) = delete;
private:
std::string data_filename_;
/*! \brief Store used features */
std::vector<std::unique_ptr<FeatureGroup>> feature_groups_;
/*! \brief Mapper from real feature index to used index*/
std::vector<int> used_feature_map_;
/*! \brief Number of used features*/
int num_features_;
/*! \brief Number of total features*/
int num_total_features_;
/*! \brief Number of total data*/
data_size_t num_data_;
/*! \brief Store some label level data*/
Metadata metadata_;
/*! \brief index of label column */
int label_idx_ = 0;
/*! \brief Threshold for treating a feature as a sparse feature */
double sparse_threshold_;
/*! \brief store feature names */
std::vector<std::string> feature_names_;
/*! \brief store feature names */
static const char* binary_file_token;
int num_groups_;
std::vector<int> real_feature_idx_;
std::vector<int> feature2group_;
std::vector<int> feature2subfeature_;
std::vector<uint64_t> group_bin_boundaries_;
std::vector<int> group_feature_start_;
std::vector<int> group_feature_cnt_;
std::vector<int8_t> monotone_types_;
std::vector<double> feature_penalty_;
bool is_finish_load_;
int max_bin_;
int bin_construct_sample_cnt_;
int min_data_in_bin_;
bool use_missing_;
bool zero_as_missing_;
};
} // namespace LightGBM
#endif // LightGBM_DATA_H_
|
GB_binop__land_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__land_uint32
// A.*B function (eWiseMult): GB_AemultB__land_uint32
// A*D function (colscale): GB_AxD__land_uint32
// D*A function (rowscale): GB_DxB__land_uint32
// C+=B function (dense accum): GB_Cdense_accumB__land_uint32
// C+=b function (dense accum): GB_Cdense_accumb__land_uint32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__land_uint32
// C=scalar+B GB_bind1st__land_uint32
// C=scalar+B' GB_bind1st_tran__land_uint32
// C=A+scalar GB_bind2nd__land_uint32
// C=A'+scalar GB_bind2nd_tran__land_uint32
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = ((aij != 0) && (bij != 0))
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = ((x != 0) && (y != 0)) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LAND || GxB_NO_UINT32 || GxB_NO_LAND_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__land_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__land_uint32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__land_uint32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__land_uint32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__land_uint32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__land_uint32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__land_uint32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__land_uint32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = Bx [p] ;
Cx [p] = ((x != 0) && (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__land_uint32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = Ax [p] ;
Cx [p] = ((aij != 0) && (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = ((x != 0) && (aij != 0)) ; \
}
GrB_Info GB_bind1st_tran__land_uint32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) && (y != 0)) ; \
}
GrB_Info GB_bind2nd_tran__land_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Matrix_add_mp.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <time.h>
#define size 400
int main()
{
long double M1[size][size]={1075.75},M2[size][size]={1594.97},M3[size][size];
float startTime,endTime,execTime;
int i,j;
int omp_rank;
startTime = omp_get_wtime();
#pragma omp parallel private (i,j) shared (M1,M2,M3)
{
omp_rank = omp_get_thread_num();
#pragma omp for
for(i=0;i<250;i++)
{
for(j=0;j<250;j++)
{
for(int k=0;k<100000;k++)
M3[i][j] = M1[i][j] + M2[i][j];
}
}
}
endTime = omp_get_wtime();
execTime = endTime-startTime;
printf("%f \n",execTime);
return(0);
} |
blas_dh.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision: 2.8 $
***********************************************************************EHEADER*/
#include "_hypre_Euclid.h"
/* #include "blas_dh.h" */
#undef __FUNC__
#define __FUNC__ "matvec_euclid_seq"
void matvec_euclid_seq(HYPRE_Int n, HYPRE_Int *rp, HYPRE_Int *cval, double *aval, double *x, double *y)
{
START_FUNC_DH
HYPRE_Int i, j;
HYPRE_Int from, to, col;
double sum;
if (np_dh > 1) SET_V_ERROR("only for sequential case!\n");
#ifdef USING_OPENMP_DH
#pragma omp parallel private(j, col, sum, from, to) \
default(shared) \
firstprivate(n, rp, cval, aval, x, y)
#endif
{
#ifdef USING_OPENMP_DH
#pragma omp for schedule(static)
#endif
for (i=0; i<n; ++i) {
sum = 0.0;
from = rp[i];
to = rp[i+1];
for (j=from; j<to; ++j) {
col = cval[j];
sum += (aval[j]*x[col]);
}
y[i] = sum;
}
}
END_FUNC_DH
}
#undef __FUNC__
#define __FUNC__ "Axpy"
void Axpy(HYPRE_Int n, double alpha, double *x, double *y)
{
START_FUNC_DH
HYPRE_Int i;
#ifdef USING_OPENMP_DH
#pragma omp parallel for schedule(static) firstprivate(alpha, x, y) \
private(i)
#endif
for (i=0; i<n; ++i) {
y[i] = alpha*x[i] + y[i];
}
END_FUNC_DH
}
#undef __FUNC__
#define __FUNC__ "CopyVec"
void CopyVec(HYPRE_Int n, double *xIN, double *yOUT)
{
START_FUNC_DH
HYPRE_Int i;
#ifdef USING_OPENMP_DH
#pragma omp parallel for schedule(static) firstprivate(yOUT, xIN) \
private(i)
#endif
for (i=0; i<n; ++i) {
yOUT[i] = xIN[i];
}
END_FUNC_DH
}
#undef __FUNC__
#define __FUNC__ "ScaleVec"
void ScaleVec(HYPRE_Int n, double alpha, double *x)
{
START_FUNC_DH
HYPRE_Int i;
#ifdef USING_OPENMP_DH
#pragma omp parallel for schedule(static) firstprivate(alpha, x) \
private(i)
#endif
for (i=0; i<n; ++i) {
x[i] *= alpha;
}
END_FUNC_DH
}
#undef __FUNC__
#define __FUNC__ "InnerProd"
double InnerProd(HYPRE_Int n, double *x, double *y)
{
START_FUNC_DH
double result, local_result = 0.0;
HYPRE_Int i;
#ifdef USING_OPENMP_DH
#pragma omp parallel for schedule(static) firstprivate(x, y) \
private(i) \
reduction(+:local_result)
#endif
for (i=0; i<n; ++i) {
local_result += x[i] * y[i];
}
if (np_dh > 1) {
hypre_MPI_Allreduce(&local_result, &result, 1, hypre_MPI_DOUBLE, hypre_MPI_SUM, comm_dh);
} else {
result = local_result;
}
END_FUNC_VAL(result)
}
#undef __FUNC__
#define __FUNC__ "Norm2"
double Norm2(HYPRE_Int n, double *x)
{
START_FUNC_DH
double result, local_result = 0.0;
HYPRE_Int i;
#ifdef USING_OPENMP_DH
#pragma omp parallel for schedule(static) firstprivate(x) \
private(i) \
reduction(+:local_result)
#endif
for (i=0; i<n; ++i) {
local_result += (x[i]*x[i]);
}
if (np_dh > 1) {
hypre_MPI_Allreduce(&local_result, &result, 1, hypre_MPI_DOUBLE, hypre_MPI_SUM, comm_dh);
} else {
result = local_result;
}
result = sqrt(result);
END_FUNC_VAL(result)
}
|
parallel_radix_sort.h | // Copyright 2010, Takuya Akiba
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Takuya Akiba nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef PARALLEL_RADIX_SORT_H_
#define PARALLEL_RADIX_SORT_H_
#ifdef _OPENMP
#include <omp.h>
#endif
#include <stdint.h>
#include <cstring>
#include <cassert>
#include <climits>
#include <algorithm>
#include <utility>
#include <iostream>
namespace parallel_radix_sort {
namespace utility {
// Return the number of threads that would be executed in parallel regions
int GetMaxThreads() {
#ifdef _OPENMP
return omp_get_max_threads();
#else
return 1;
#endif
}
// Set the number of threads that would be executed in parallel regions
void SetNumThreads(int num_threads) {
#ifdef _OPENMP
omp_set_num_threads(num_threads);
#else
if (num_threads != 1) {
assert(!"compile with -fopenmp");
}
#endif
}
// Return the thread number, which lies in [0, the number of threads)
int GetThreadId() {
#ifdef _OPENMP
return omp_get_thread_num();
#else
return 0;
#endif
}
} // namespace utility
namespace internal {
// Size of the software managed buffer
const size_t kOutBufferSize = 32;
// The algorithm is implemented in this internal class
template<typename PlainType, typename UnsignedType, typename Encoder, typename ValueManager, int Base>
class ParallelRadixSortInternal {
public:
ParallelRadixSortInternal();
~ParallelRadixSortInternal();
void Init(size_t max_elems, int max_threads);
PlainType *Sort(PlainType *data, size_t num_elems, int num_threads,
ValueManager *value_manager);
static void InitAndSort(PlainType *data, size_t num_elems, int num_threads,
ValueManager *value_manager);
private:
size_t max_elems_;
int max_threads_;
UnsignedType *tmp_;
size_t **histo_;
UnsignedType ***out_buf_;
size_t **out_buf_n_;
int num_threads_;
size_t *pos_bgn_, *pos_end_;
ValueManager *value_manager_;
void DeleteAll();
UnsignedType *SortInternal(UnsignedType *data, size_t num_elems,
int num_threads, ValueManager *value_manager);
// Compute |pos_bgn_| and |pos_end_| (associated ranges for each threads)
void ComputeRanges(size_t num_elems);
// First step of each iteration of sorting
// Compute the histogram of |src| using bits in [b, b + Base)
void ComputeHistogram(int b, UnsignedType *src);
// Second step of each iteration of sorting
// Scatter elements of |src| to |dst| using the histogram
void Scatter(int b, UnsignedType *src, UnsignedType *dst);
};
template<typename PlainType, typename UnsignedType, typename Encoder, typename ValueManager, int Base>
ParallelRadixSortInternal<PlainType, UnsignedType, Encoder, ValueManager, Base>::ParallelRadixSortInternal()
: max_elems_(0), max_threads_(0), tmp_(NULL), histo_(NULL), out_buf_(NULL), out_buf_n_(NULL), pos_bgn_(NULL), pos_end_(NULL) {
assert(sizeof(PlainType) == sizeof(UnsignedType));
}
template<typename PlainType, typename UnsignedType, typename Encoder, typename ValueManager, int Base>
ParallelRadixSortInternal<PlainType, UnsignedType, Encoder, ValueManager, Base> ::~ParallelRadixSortInternal() {
DeleteAll();
}
template<typename PlainType, typename UnsignedType, typename Encoder, typename ValueManager, int Base>
void ParallelRadixSortInternal<PlainType, UnsignedType, Encoder, ValueManager, Base> ::DeleteAll() {
delete [] tmp_;
tmp_ = NULL;
for (int i = 0; i < max_threads_; ++i) delete [] histo_[i];
delete [] histo_;
histo_ = NULL;
for (int i = 0; i < max_threads_; ++i) {
for (size_t j = 0; j < 1 << Base; ++j) {
delete [] out_buf_[i][j];
}
delete [] out_buf_n_[i];
delete [] out_buf_[i];
}
delete [] out_buf_;
delete [] out_buf_n_;
out_buf_ = NULL;
out_buf_n_ = NULL;
delete [] pos_bgn_;
delete [] pos_end_;
pos_bgn_ = pos_end_ = NULL;
max_elems_ = 0;
max_threads_ = 0;
}
template<typename PlainType, typename UnsignedType, typename Encoder, typename ValueManager, int Base>
void ParallelRadixSortInternal<PlainType, UnsignedType, Encoder, ValueManager, Base> ::Init(size_t max_elems, int max_threads) {
DeleteAll();
max_elems_ = max_elems;
if (max_threads == -1) {
max_threads = utility::GetMaxThreads();
}
assert(max_threads >= 1);
max_threads_ = max_threads;
tmp_ = new UnsignedType[max_elems];
histo_ = new size_t*[max_threads];
for (int i = 0; i < max_threads; ++i) {
histo_[i] = new size_t[1 << Base];
}
out_buf_ = new UnsignedType**[max_threads];
out_buf_n_ = new size_t*[max_threads];
for (int i = 0; i < max_threads; ++i) {
out_buf_[i] = new UnsignedType*[1 << Base];
out_buf_n_[i] = new size_t[1 << Base];
for (size_t j = 0; j < 1 << Base; ++j) {
out_buf_[i][j] = new UnsignedType[kOutBufferSize];
}
}
pos_bgn_ = new size_t[max_threads];
pos_end_ = new size_t[max_threads];
}
template<typename PlainType, typename UnsignedType, typename Encoder, typename ValueManager, int Base>
PlainType *ParallelRadixSortInternal<PlainType, UnsignedType, Encoder, ValueManager, Base> ::Sort(PlainType *data, size_t num_elems, int num_threads, ValueManager *value_manager) {
UnsignedType *src = reinterpret_cast<UnsignedType*>(data);
UnsignedType *res = SortInternal(src, num_elems, num_threads, value_manager);
return reinterpret_cast<PlainType*>(res);
}
template<typename PlainType, typename UnsignedType, typename Encoder, typename ValueManager, int Base>
void ParallelRadixSortInternal<PlainType, UnsignedType, Encoder, ValueManager, Base> ::InitAndSort(PlainType *data, size_t num_elems, int num_threads, ValueManager *value_manager) {
ParallelRadixSortInternal prs;
prs.Init(num_elems, num_threads);
const PlainType *res = prs.Sort(data, num_elems, num_threads, value_manager);
if (res != data) {
for (size_t i = 0; i < num_elems; ++i) data[i] = res[i];
}
}
template<typename PlainType, typename UnsignedType, typename Encoder, typename ValueManager, int Base>
UnsignedType *ParallelRadixSortInternal<PlainType, UnsignedType, Encoder, ValueManager, Base> ::SortInternal(UnsignedType *data, size_t num_elems, int num_threads, ValueManager *value_manager) {
assert(num_elems <= max_elems_);
for(size_t i = 0; i < num_elems; i+=num_elems/20){
std::cout << data[i] << " ";
}
std::cout << std::endl;
if (num_threads == -1) {
num_threads = utility::GetMaxThreads();
}
assert(1 <= num_threads && num_threads <= max_threads_);
utility::SetNumThreads(num_threads);
assert(utility::GetMaxThreads() == num_threads);
num_threads_ = num_threads;
std::cout << "num_threads " << num_threads << std::endl;
value_manager_ = value_manager;
// Compute |pos_bgn_| and |pos_end_|
ComputeRanges(num_elems);
for(size_t i = 0; i < num_threads; i++){
std::cout << pos_bgn_[i] << " " << pos_end_[i] << std::endl;
}
std::cout << std::endl;
// Iterate from lower bits to higher bits
const int bits = CHAR_BIT * sizeof(UnsignedType);
std::cout << "bits " << bits << " base " << Base << std::endl;
UnsignedType *src = data, *dst = tmp_;
for (int b = 0; b < bits; b += Base) {
ComputeHistogram(b, src);
Scatter(b, src, dst);
std::swap(src, dst);
value_manager->Next();
}
for(size_t i = 0; i < num_elems; i+=num_elems/20){
std::cout << src[i] << " ";
}
std::cout << std::endl;
return src;
}
template<typename PlainType, typename UnsignedType, typename Encoder, typename ValueManager, int Base>
void ParallelRadixSortInternal<PlainType, UnsignedType, Encoder, ValueManager, Base> ::ComputeRanges(size_t num_elems) {
pos_bgn_[0] = 0;
for (int i = 0; i < num_threads_ - 1; ++i) {
const size_t t = (num_elems - pos_bgn_[i]) / (num_threads_ - i);
pos_bgn_[i + 1] = pos_end_[i] = pos_bgn_[i] + t;
}
pos_end_[num_threads_ - 1] = num_elems;
}
template<typename PlainType, typename UnsignedType, typename Encoder, typename ValueManager, int Base>
void ParallelRadixSortInternal<PlainType, UnsignedType, Encoder, ValueManager, Base> ::ComputeHistogram(int b, UnsignedType *src) {
// Compute local histogram
#ifdef _OPENMP
#pragma omp parallel
#endif
{
const int my_id = utility::GetThreadId();
const size_t my_bgn = pos_bgn_[my_id];
const size_t my_end = pos_end_[my_id];
size_t *my_histo = histo_[my_id];
memset(my_histo, 0, sizeof(size_t) * (1 << Base));
for (size_t i = my_bgn; i < my_end; ++i) {
const UnsignedType s = Encoder::encode(src[i]);
const UnsignedType t = (s >> b) & ((1 << Base) - 1);
++my_histo[t];
}
}
// Compute global histogram
size_t s = 0;
for (size_t i = 0; i < 1 << Base; ++i) {
for (int j = 0; j < num_threads_; ++j) {
const size_t t = s + histo_[j][i];
histo_[j][i] = s;
s = t;
}
}
}
template<typename PlainType, typename UnsignedType, typename Encoder, typename ValueManager, int Base>
void ParallelRadixSortInternal<PlainType, UnsignedType, Encoder, ValueManager, Base> ::Scatter(int b, UnsignedType *src, UnsignedType *dst) {
#ifdef _OPENMP
#pragma omp parallel
#endif
{
const int my_id = utility::GetThreadId();
const size_t my_bgn = pos_bgn_[my_id];
const size_t my_end = pos_end_[my_id];
size_t *my_histo = histo_[my_id];
UnsignedType **my_buf = out_buf_[my_id];
size_t *my_buf_n = out_buf_n_[my_id];
memset(my_buf_n, 0, sizeof(size_t) * (1 << Base));
for (size_t i = my_bgn; i < my_end; ++i) {
const UnsignedType s = Encoder::encode(src[i]);
const UnsignedType t = (s >> b) & ((1 << Base) - 1);
my_buf[t][my_buf_n[t]] = src[i];
value_manager_->Push(my_id, t, my_buf_n[t], i);
++my_buf_n[t];
if (my_buf_n[t] == kOutBufferSize) {
size_t p = my_histo[t];
for (size_t j = 0; j < kOutBufferSize; ++j) {
dst[p++] = my_buf[t][j];
}
value_manager_->Flush(my_id, t, kOutBufferSize, my_histo[t]);
my_histo[t] += kOutBufferSize;
my_buf_n[t] = 0;
}
}
// Flush everything
for (size_t i = 0; i < 1 << Base; ++i) {
size_t p = my_histo[i];
for (size_t j = 0; j < my_buf_n[i]; ++j) {
dst[p++] = my_buf[i][j];
}
value_manager_->Flush(my_id, i, my_buf_n[i], my_histo[i]);
}
}
}
} // namespace internal
// Encoders encode signed/unsigned integers and floating point numbers
// to correctly ordered unsigned integers
namespace encoder {
class EncoderUnsigned {
public:
template<typename UnsignedType>
inline static UnsignedType encode(UnsignedType x) {
return x;
}
};
class EncoderSigned {
public:
template<typename UnsignedType>
inline static UnsignedType encode(UnsignedType x) {
return x ^ (UnsignedType(1) << (CHAR_BIT * sizeof(UnsignedType) - 1));
}
};
class EncoderDecimal {
public:
template<typename UnsignedType>
inline static UnsignedType encode(UnsignedType x) {
static const int bits = CHAR_BIT * sizeof(UnsignedType);
const UnsignedType a = x >> (bits - 1);
const UnsignedType b = (-a) | (UnsignedType(1) << (bits - 1));
return x ^ b;
}
};
} // namespace encoder
// Value managers are used to generalize the sorting algorithm
// to sorting of keys and sorting of pairs
namespace value_manager {
class DummyValueManager {
public:
inline void Push(int thread __attribute__((unused)),
size_t bucket __attribute__((unused)),
size_t num __attribute__((unused)),
size_t from_pos __attribute__((unused))) {}
inline void Flush(int thread __attribute__((unused)),
size_t bucket __attribute__((unused)),
size_t num __attribute__((unused)),
size_t to_pos __attribute__((unused))) {}
void Next() {}
};
template<typename ValueType, int Base> class PairValueManager {
public:
PairValueManager()
: max_elems_(0), max_threads_(0), original_(NULL), tmp_(NULL),
src_(NULL), dst_(NULL), out_buf_(NULL) {}
~PairValueManager() {
DeleteAll();
}
void Init(size_t max_elems, int max_threads);
void Start(ValueType *original, size_t num_elems, int num_threads) {
assert(num_elems <= max_elems_);
assert(num_threads <= max_threads_);
src_ = original_ = original;
dst_ = tmp_;
}
inline void Push(int thread, size_t bucket, size_t num, size_t from_pos) {
out_buf_[thread][bucket][num] = src_[from_pos];
}
inline void Flush(int thread, size_t bucket, size_t num, size_t to_pos) {
for (size_t i = 0; i < num; ++i) {
dst_[to_pos++] = out_buf_[thread][bucket][i];
}
}
void Next() {
std::swap(src_, dst_);
}
ValueType *GetResult() {
return src_;
}
private:
size_t max_elems_;
int max_threads_;
static const size_t kOutBufferSize = internal::kOutBufferSize;
ValueType *original_, *tmp_;
ValueType *src_, *dst_;
ValueType ***out_buf_;
void DeleteAll();
};
template<typename ValueType, int Base>
void PairValueManager<ValueType, Base> ::Init(size_t max_elems, int max_threads) {
if (max_threads == -1) {
max_threads = utility::GetMaxThreads();
}
assert(max_threads >= 1);
DeleteAll();
max_elems_ = max_elems;
max_threads_ = max_threads;
tmp_ = new ValueType[max_elems];
out_buf_ = new ValueType**[max_threads];
for (int i = 0; i < max_threads; ++i) {
out_buf_[i] = new ValueType*[1 << Base];
for (size_t j = 0; j < 1 << Base; ++j) {
out_buf_[i][j] = new ValueType[kOutBufferSize];
}
}
}
template<typename ValueType, int Base>
void PairValueManager<ValueType, Base> ::DeleteAll() {
delete [] tmp_;
tmp_ = NULL;
for (int i = 0; i < max_threads_; ++i) {
for (size_t j = 0; j < 1 << Base; ++j) {
delete [] out_buf_[i][j];
}
delete [] out_buf_[i];
}
delete [] out_buf_;
out_buf_ = NULL;
max_elems_ = 0;
max_threads_ = 0;
}
} // namespace value_manager
// Frontend class for sorting keys
template<typename PlainType, typename UnsignedType = PlainType, typename Encoder = encoder::EncoderUnsigned, int Base = 8>
class KeySort {
typedef value_manager::DummyValueManager DummyValueManager;
typedef internal::ParallelRadixSortInternal<PlainType, UnsignedType, Encoder, DummyValueManager, Base> Internal;
public:
// In the following functions, when |max_threads| or |num_threads| is -1,
// the default value given by OpenMP would be used.
void Init(size_t max_elems, int max_threads = -1) {
internal_.Init(max_elems, max_threads);
}
// Notice that the pointer returned by this
// does not necessarily equal to |data|.
PlainType *Sort(PlainType *data, size_t num_elems, int num_threads = -1) {
return internal_.Sort(data, num_elems, num_threads, &dummy_value_manager_);
}
static void InitAndSort(PlainType *data, size_t num_elems, int num_threads = -1) {
DummyValueManager dvm;
Internal::InitAndSort(data, num_elems, num_threads, &dvm);
}
private:
Internal internal_;
DummyValueManager dummy_value_manager_;
};
// Frontend class for sorting pairs
template<typename PlainType, typename ValueType, typename UnsignedType = PlainType, typename Encoder = encoder::EncoderUnsigned, int Base = 8>
class PairSort {
typedef value_manager::PairValueManager<ValueType, Base> ValueManager;
typedef internal::ParallelRadixSortInternal<PlainType, UnsignedType, Encoder, ValueManager, Base> Internal;
public:
// In the following functions, when |max_threads| or |num_threads| is -1,
// the default value given by OpenMP would be used.
void Init(size_t max_elems, int max_threads = -1) {
internal_.Init(max_elems, max_threads);
value_manager_.Init(max_elems, max_threads);
}
// Notice that the pointers returned by this
// do not necessarily equal to |keys| and |vals|.
std::pair<PlainType*, ValueType*> Sort(PlainType *keys, ValueType *vals, size_t num_elems, int num_threads = -1) {
value_manager_.Start(vals, num_elems, num_threads);
PlainType *res_keys = internal_.Sort(keys, num_elems, num_threads, &value_manager_);
ValueType *res_vals = value_manager_.GetResult();
return std::make_pair(res_keys, res_vals);
}
static void InitAndSort(PlainType *keys, ValueType *vals,
size_t num_elems, int num_threads = -1) {
ValueManager vm;
vm.Init(num_elems, num_threads);
vm.Start(vals, num_elems, num_threads);
Internal::InitAndSort(keys, num_elems, num_threads, &vm);
ValueType *res_vals = vm.GetResult();
if (res_vals != vals) {
for (size_t i = 0; i < num_elems; ++i) {
vals[i] = res_vals[i];
}
}
}
private:
Internal internal_;
ValueManager value_manager_;
};
#define TYPE_CASE(plain_type, unsigned_type, encoder_type) \
template<> class KeySort<plain_type> \
: public KeySort<plain_type, unsigned_type, \
encoder::Encoder ## encoder_type> {}; \
template<typename V> class PairSort<plain_type, V> \
: public PairSort<plain_type, V, unsigned_type, \
encoder::Encoder ## encoder_type> {}; \
// Signed integers
TYPE_CASE(char, unsigned char, Signed);
TYPE_CASE(short, unsigned short, Signed);
TYPE_CASE(int, unsigned int, Signed);
TYPE_CASE(long, unsigned long, Signed);
TYPE_CASE(long long, unsigned long long, Signed);
// |signed char| and |char| are treated as different types
TYPE_CASE(signed char, unsigned char, Signed);
// Floating point numbers
TYPE_CASE(float, uint32_t, Decimal);
TYPE_CASE(double, uint64_t, Decimal);
#undef TYPE_CASE
template<typename KeyType>
void SortKeys(KeyType *data, size_t num_elems, int num_threads = -1) {
KeySort<KeyType>::InitAndSort(data, num_elems, num_threads);
}
template<typename KeyType, typename ValueType>
void SortPairs(KeyType *keys, ValueType *vals, size_t num_elems, int num_threads = -1) {
PairSort<KeyType, ValueType>::InitAndSort(keys, vals, num_elems, num_threads);
}
}; // namespace parallel radix sort
#endif // PARALLEL_RADIX_SORT_H_
|
pzhetrf_aasen.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> s d c
*
**/
#include <math.h>
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
#include "core_blas.h"
#define A(m, n) ((plasma_complex64_t*)plasma_tile_addr(A, (m), (n)))
#define T(m, n) ((plasma_complex64_t*)plasma_tile_addr(T, (m), (n)))
#define L(m, n) ((plasma_complex64_t*)plasma_tile_addr(A, (m), (n)-1))
#define U(m, n) ((plasma_complex64_t*)plasma_tile_addr(A, (m)-1, (n)))
#define W(j) ((plasma_complex64_t*)plasma_tile_addr(W, (j), 0)) // nb*nb used to compute T(k,k)
#define W2(j) ((plasma_complex64_t*)plasma_tile_addr(W2, (j), 0)) // mt*(nb*nb) to store H
#define W3(j) ((plasma_complex64_t*)plasma_tile_addr(W3, (j), 0)) // mt*(nb*nb) used to form T(k,k)
#define W4(j) ((plasma_complex64_t*)plasma_tile_addr(W4, (j), 0)) // wmt used to update L(:,k)
#define H(m, n) (uplo == PlasmaLower ? W2((m)) : W2((n)))
#define IPIV(i) (ipiv + (i)*(A.mb))
/***************************************************************************//**
* Parallel tile LDLt factorization.
* @see plasma_omp_zhetrf_aasen
* TODO: Use nested-parallelisation to remove synchronization points?
******************************************************************************/
void plasma_pzhetrf_aasen(plasma_enum_t uplo,
plasma_desc_t A, int *ipiv,
plasma_desc_t T,
plasma_desc_t W,
plasma_sequence_t *sequence,
plasma_request_t *request)
{
// Return if failed sequence.
if (sequence->status != PlasmaSuccess)
return;
// Read parameters from the context.
plasma_context_t *plasma = plasma_context_self();
plasma_barrier_t *barrier = &plasma->barrier;
int ib = plasma->ib;
int max_panel_threads = plasma->max_panel_threads;
int wmt = W.mt-(1+2*A.mt);
// Creaet views for the workspaces
plasma_desc_t W2 = plasma_desc_view(W, A.mb, 0, A.mt*A.mb, A.nb);
plasma_desc_t W3 = plasma_desc_view(W, (1+A.mt)*A.mb, 0, A.mt*A.mb, A.nb);
plasma_desc_t W4 = plasma_desc_view(W, (1+2*A.mt)*A.mb, 0, wmt, A.nb);
//==============
// PlasmaLower
//==============
// NOTE: In old PLASMA, we used priority.
if (uplo == PlasmaLower) {
for (int k = 0; k < A.mt; k++) {
int mvak = plasma_tile_mview(A, k);
int ldak = plasma_tile_mmain(A, k);
int ldtk = T.mb; //plasma_tile_mmain_band(T, k);
// -- computing offdiagonals H(1:k-1, k) -- //
for (int m=1; m < k; m++) {
int mvam = plasma_tile_mview(A, m);
int ldtm = T.mb; //plasma_tile_mmain_band(T, m);
core_omp_zgemm(
PlasmaNoTrans, PlasmaConjTrans,
mvam, mvak, mvam,
1.0, T(m, m), ldtm,
L(k, m), ldak,
0.0, H(m, k), A.mb,
sequence, request);
if (m > 1) {
core_omp_zgemm(
PlasmaNoTrans, PlasmaConjTrans,
mvam, mvak, A.mb,
1.0, T(m, m-1), ldtm,
L(k, m-1), ldak,
1.0, H(m, k), A.mb,
sequence, request);
}
int mvamp1 = plasma_tile_mview(A, m+1);
int ldtmp1 = A.mb; //plasma_tile_mmain_band(T, m+1);
core_omp_zgemm(
PlasmaConjTrans, PlasmaConjTrans,
mvam, mvak, mvamp1,
1.0, T(m+1, m), ldtmp1,
L(k, m+1), ldak,
1.0, H(m, k), A.mb,
sequence, request);
}
// ---- end of computing H(1:(k-1),k) -- //
// -- computing diagonal T(k, k) -- //
plasma_complex64_t beta;
if (k > 1) {
int num = k-1;
for (int m = 1; m < k; m++) {
int mvam = plasma_tile_mview(A, m);
int id = (m-1) % num;
if (m < num+1)
beta = 0.0;
else
beta = 1.0;
core_omp_zgemm(
PlasmaNoTrans, PlasmaNoTrans,
mvak, mvak, mvam,
-1.0, L(k, m), ldak,
H(m, k), A.mb,
beta, W3(id), A.mb,
sequence, request);
}
// all-reduce W3 using a binary tree //
// NOTE: Old PLASMA had an option to reduce in a set of tiles //
int num_players = num; // number of players
int num_rounds = ceil( log10((double)num_players)/log10(2.0) ); // height of tournament
int base = 2; // intervals between brackets
for (int round = 1; round <= num_rounds; round++) {
int num_brackets = num_players / 2; // number of brackets
for (int bracket = 0; bracket < num_brackets; bracket++) {
// first contender
int m1 = base*bracket;
// second contender
int m2 = m1+base/2;
core_omp_zgeadd(
PlasmaNoTrans, mvak, mvak,
1.0, W3(m2), A.mb,
1.0, W3(m1), A.mb,
sequence, request);
}
num_players = ceil( ((double)num_players)/2.0 );
base = 2*base;
}
core_omp_zlacpy(
PlasmaLower, PlasmaNoTrans,
mvak, mvak,
A(k, k), ldak,
T(k, k), ldtk,
sequence, request);
core_omp_zgeadd(
PlasmaNoTrans, mvak, mvak,
1.0, W3(0), A.mb,
1.0, T(k, k), ldtk,
sequence, request);
}
else { // k == 0 or 1
core_omp_zlacpy(
PlasmaLower, PlasmaNoTrans,
mvak, mvak,
A(k, k), ldak,
T(k, k), ldtk,
sequence, request);
// expanding to full matrix
core_omp_zlacpy(
PlasmaLower, PlasmaConjTrans,
mvak, mvak,
T(k, k), ldtk,
T(k, k), ldtk,
sequence, request);
}
if (k > 0) {
if (k > 1) {
core_omp_zgemm(
PlasmaNoTrans, PlasmaNoTrans,
mvak, A.mb, mvak,
1.0, L(k, k), ldak,
T(k, k-1), ldtk,
0.0, W(0), A.mb,
sequence, request);
core_omp_zgemm(
PlasmaNoTrans, PlasmaConjTrans,
mvak, mvak, A.mb,
-1.0, W(0), A.mb,
L(k, k-1), ldak,
1.0, T(k, k), ldtk,
sequence, request);
}
// - symmetrically solve with L(k,k) //
core_omp_zhegst(
1, PlasmaLower, mvak,
T(k, k), ldtk,
L(k, k), ldak,
sequence, request);
// expand to full matrix
core_omp_zlacpy(
PlasmaLower, PlasmaConjTrans,
mvak, mvak,
T(k, k), ldtk,
T(k, k), ldtk,
sequence, request);
}
// computing H(k, k) //
beta = 0.0;
if (k > 1) {
core_omp_zgemm(
PlasmaNoTrans, PlasmaConjTrans,
mvak, mvak, A.nb,
1.0, T(k, k-1), ldtk,
L(k, k-1), ldak,
0.0, H(k, k), A.mb,
sequence, request);
beta = 1.0;
}
if (k+1 < A.nt) {
int ldakp1 = plasma_tile_mmain(A, k+1);
if (k > 0) {
core_omp_zgemm(
PlasmaNoTrans, PlasmaConjTrans,
mvak, mvak, mvak,
1.0, T(k, k), ldtk,
L(k, k), ldak,
beta, H(k, k), A.mb,
sequence, request);
// computing the (k+1)-th column of L //
// - update with the previous column
if (A.mt-k < plasma->max_threads && k > 0) {
int num = imin(k, wmt/(A.mt-k-1)); // workspace per row
for (int n = 1; n <= k; n++) {
int mvan = plasma_tile_mview(A, n);
for (int m = k+1; m < A.mt; m++) {
int mvam = plasma_tile_mview(A, m);
int ldam = plasma_tile_mmain(A, m);
int id = (m-k-1)*num+(n-1)%num;
if (n < num+1)
beta = 0.0;
else
beta = 1.0;
if (n < num+1 || n > k-num) {
core_omp_zgemm(
PlasmaNoTrans, PlasmaNoTrans,
mvam, mvak, mvan,
-1.0, L(m, n), ldam,
H(n, k), A.mb,
beta, W4(id), A.mb,
sequence, request);
}
else {
core_omp_zgemm(
PlasmaNoTrans, PlasmaNoTrans,
mvam, mvak, mvan,
-1.0, L(m, n), ldam,
H(n, k), A.mb,
beta, W4(id), A.mb,
sequence, request);
}
}
}
// accumerate within workspace using a binary tree
int num_players = num; // number of players
int num_rounds = ceil( log10((double)num_players)/log10(2.0) ); // height of tournament
int base = 2; // intervals between brackets
for (int round = 1; round <= num_rounds; round++) {
int num_brackets = num_players / 2; // number of brackets
for (int bracket = 0; bracket < num_brackets; bracket++) {
// first contender
int m1 = base*bracket;
// second contender
int m2 = m1+base/2;
for (int m = k+1; m < A.mt; m++) {
int mvam = plasma_tile_mview(A, m);
core_omp_zgeadd(
PlasmaNoTrans, mvam, mvak,
1.0, W4((m-k-1)*num+m2), A.mb,
1.0, W4((m-k-1)*num+m1), A.mb,
sequence, request);
}
}
num_players = ceil( ((double)num_players)/2.0 );
base = 2*base;
}
// accumelate into L(:,k+1)
for (int m = k+1; m < A.mt; m++) {
int mvam = plasma_tile_mview(A, m);
int ldam = plasma_tile_mmain(A, m);
core_omp_zgeadd(
PlasmaNoTrans, mvam, mvak,
1.0, W4((m-k-1)*num), A.mb,
1.0, L(m, k+1), ldam,
sequence, request);
}
}
else {
for (int n = 1; n <= k; n++) {
int mvan = plasma_tile_mview(A, n);
for (int m = k+1; m < A.mt; m++) {
int mvam = plasma_tile_mview(A, m);
int ldam = plasma_tile_mmain(A, m);
core_omp_zgemm(
PlasmaNoTrans, PlasmaNoTrans,
mvam, mvak, mvan,
-1.0, L(m, n), ldam,
H(n, k), A.mb,
1.0, L(m, k+1), ldam,
sequence, request);
}
}
}
} // end of if (k > 0)
// ============================= //
// == PLASMA LU panel == //
// ============================= //
// -- compute LU of the panel -- //
plasma_complex64_t *a00, *a20;
a00 = L(k+1, k+1);
a20 = L(A.mt-1, k+1);
int mlkk = A.m - (k+1)*A.mb; // dimension
int ma00k = (A.mt-(k+1)-1)*A.mb;
int na00k = plasma_tile_nmain(A, k);
int lda20 = plasma_tile_mmain(A, A.mt-1);
int k1 = 1+(k+1)*A.nb;
int k2 = imin(mlkk, mvak)+(k+1)*A.nb;
#pragma omp taskwait // make sure all the tiles in the column are ready
#pragma omp task depend(inout:a00[0:ma00k*na00k]) \
depend(inout:a20[0:lda20*mvak]) \
depend(out:ipiv[k1-1:k2]) /*\
priority(1) */
{
if (sequence->status == PlasmaSuccess) {
for (int rank = 0; rank < max_panel_threads; rank++) {
#pragma omp task // priority(1)
{
plasma_desc_t view =
plasma_desc_view(A,
(k+1)*A.mb, k*A.nb,
mlkk, mvak);
int info = core_zgetrf(view, IPIV(k+1), ib,
rank, max_panel_threads,
barrier);
if (info != 0)
plasma_request_fail(sequence, request, (k+1)*A.mb+info);
}
}
}
#pragma omp taskwait
for (int i = 0; i < imin(mlkk, mvak); i++) {
IPIV(k+1)[i] += (k+1)*A.mb;
}
}
//#pragma omp taskwait
// ============================== //
// == end of PLASMA LU panel == //
// ============================== //
// -- apply pivoting to previous columns of L -- //
for (int n = 1; n < k+1; n++)
{
plasma_complex64_t *akk = NULL;
int mlkn = (A.mt-k-1)*A.mb;
int nlkn = plasma_tile_nmain(A, n-1);
akk = L(k+1, n);
#pragma omp task depend(in:ipiv[(k1-1):k2]) \
depend(inout:akk[0:mlkn*nlkn])
{
if (sequence->status == PlasmaSuccess) {
plasma_desc_t view =
plasma_desc_view(A, 0, (n-1)*A.nb, A.m, nlkn);
core_zgeswp(PlasmaRowwise, view, k1, k2, ipiv, 1);
}
}
}
// computing T(k+1, k) //
int mvakp1 = plasma_tile_mview(A, k+1);
int ldak_n = plasma_tile_nmain(A, k);
int ldtkp1 = A.mb; //plasma_tile_mmain_band(T, k+1);
// copy upper-triangular part of L(k+1,k+1) to T(k+1,k)
// and then zero it out
core_omp_zlacpy(
PlasmaUpper, PlasmaNoTrans,
mvakp1, mvak,
L(k+1, k+1), ldakp1,
T(k+1, k ), ldtkp1,
sequence, request);
core_omp_zlaset(
PlasmaUpper,
ldakp1, ldak_n, 0, 0,
mvakp1, mvak,
0.0, 1.0,
L(k+1, k+1));
if (k > 0) {
core_omp_ztrsm(
PlasmaRight, PlasmaLower,
PlasmaConjTrans, PlasmaUnit,
mvakp1, mvak,
1.0, L(k, k), ldak,
T(k+1, k), ldtkp1,
sequence, request);
}
// copy T(k+1, k) to T(k, k+1) for zgbtrf
core_omp_zlacpy(
PlasmaGeneral, PlasmaConjTrans,
mvakp1, mvak,
T(k+1, k), ldtkp1,
T(k, k+1), ldtk,
sequence, request);
// -- symmetrically apply pivoting to trailing A -- //
plasma_complex64_t *akk = NULL;
int mlkn = (A.mt-k-1)*A.mb;
int nlkn = plasma_tile_nmain(A, k+1);
akk = A(k+1, k+1);
// TODO: calling core routine for now.
#pragma omp task depend(in:ipiv[(k1-1):k2]) \
depend(inout:akk[0:nlkn*mlkn])
{
core_zheswp(PlasmaLower, A, k1, k2, ipiv, 1);
}
// synch the row-swap of previous column before the next update
#pragma omp taskwait
}
}
}
//==============
// PlasmaUpper
//==============
else {
// TODO: Upper
}
}
|
pdgbtrf.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/pzgbtrf.c, normal z -> d, Fri Sep 28 17:38:10 2018
*
**/
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
#include <plasma_core_blas.h>
#define A(m, n) ((double*)plasma_tile_addr(A, m, n))
/******************************************************************************/
void plasma_pdgbtrf(plasma_desc_t A, int *ipiv,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Return if failed sequence.
if (sequence->status != PlasmaSuccess)
return;
// Read parameters from the context.
plasma_context_t *plasma = plasma_context_self();
int ib = plasma->ib;
int max_panel_threads = plasma->max_panel_threads;
for (int k = 0; k < imin(A.mt, A.nt); k++) {
// for band matrix, gm is a multiple of mb,
// and there is no a10 submatrix
int mvak = plasma_tile_mview(A, k);
int nvak = plasma_tile_nview(A, k);
int ldak = plasma_tile_mmain_band(A, k, k);
// panel
int *ipivk = NULL;
double *a00 = NULL;
int mak = imin(A.m-k*A.mb, mvak+A.kl);
int size_a00 = (A.gm-k*A.mb) * plasma_tile_nmain(A, k);
int size_i = imin(mvak, nvak);
int num_panel_threads = imin(max_panel_threads,
imin(imin(A.mt, A.nt)-k, A.klt));
ipivk = &ipiv[k*A.mb];
a00 = A(k, k);
#pragma omp task depend(inout:a00[0:size_a00]) \
depend(out:ipivk[0:size_i]) \
priority(1)
{
volatile int *max_idx = (int*)malloc(num_panel_threads*sizeof(int));
if (max_idx == NULL)
plasma_request_fail(sequence, request, PlasmaErrorOutOfMemory);
volatile double *max_val =
(double*)malloc(num_panel_threads*sizeof(
double));
if (max_val == NULL)
plasma_request_fail(sequence, request, PlasmaErrorOutOfMemory);
volatile int info = 0;
plasma_barrier_t barrier;
plasma_barrier_init(&barrier);
if (sequence->status == PlasmaSuccess) {
for (int rank = 0; rank < num_panel_threads; rank++) {
#pragma omp task shared(barrier) priority(1)
{
// create a view for panel as a "general" submatrix
plasma_desc_t view = plasma_desc_view(
A, (A.kut-1)*A.mb, k*A.nb, mak, nvak);
view.type = PlasmaGeneral;
plasma_core_dgetrf(view, &ipiv[k*A.mb], ib,
rank, num_panel_threads,
max_idx, max_val, &info,
&barrier);
if (info != 0)
plasma_request_fail(sequence, request, k*A.mb+info);
}
}
}
#pragma omp taskwait
free((void*)max_idx);
free((void*)max_val);
}
// update
// TODO: fills are not tracked, see the one in fork
for (int n = k+1; n < imin(A.nt, k+A.kut); n++) {
double *a01 = NULL;
double *a11 = NULL;
int nvan = plasma_tile_nview(A, n);
int size_a01 = ldak*nvan;
int size_a11 = (A.gm-(k+1)*A.mb)*nvan;
a01 = A(k, n);
a11 = A(k+1, n);
#pragma omp task depend(in:a00[0:size_a00]) \
depend(inout:ipivk[0:size_i]) \
depend(inout:a01[0:size_a01]) \
depend(inout:a11[0:size_a11]) \
priority(n == k+1)
{
if (sequence->status == PlasmaSuccess) {
// geswp
int k1 = k*A.mb+1;
int k2 = imin(k*A.mb+A.mb, A.m);
plasma_desc_t view =
plasma_desc_view(A,
(A.kut-1 + k-n)*A.mb, n*A.nb,
mak, nvan);
view.type = PlasmaGeneral;
plasma_core_dgeswp(
PlasmaRowwise, view, 1, k2-k1+1, &ipiv[k*A.mb], 1);
// trsm
plasma_core_dtrsm(PlasmaLeft, PlasmaLower,
PlasmaNoTrans, PlasmaUnit,
mvak, nvan,
1.0, A(k, k), ldak,
A(k, n), plasma_tile_mmain_band(A, k, n));
// gemm
for (int m = imax(k+1, n-A.kut); m < imin(k+A.klt, A.mt); m++) {
int mvam = plasma_tile_mview(A, m);
#pragma omp task priority(n == k+1)
{
plasma_core_dgemm(
PlasmaNoTrans, PlasmaNoTrans,
mvam, nvan, A.nb,
-1.0, A(m, k), plasma_tile_mmain_band(A, m, k),
A(k, n), plasma_tile_mmain_band(A, k, n),
1.0, A(m, n), plasma_tile_mmain_band(A, m, n));
}
}
#pragma omp taskwait
}
}
}
#pragma omp task depend(in:ipivk[0:size_i])
if (sequence->status == PlasmaSuccess) {
if (k > 0) {
for (int i = 0; i < imin(mak, nvak); i++) {
ipiv[k*A.mb+i] += k*A.mb;
}
}
}
}
}
|
bt.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 2.3 OpenMP C versions - BT
This benchmark is an OpenMP C version of the NPB BT code.
The OpenMP C versions are developed by RWCP and derived from the serial
Fortran versions in "NPB 2.3-serial" developed by NAS.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Authors: R. Van der Wijngaart
T. Harris
M. Yarrow
OpenMP C version: S. Satoh
--------------------------------------------------------------------*/
#include "npb-C.h"
/* global variables */
#include "header.h"
/* function declarations */
static void add(void);
static void adi(void);
static void error_norm(double rms[5]);
static void rhs_norm(double rms[5]);
static void exact_rhs(void);
static void exact_solution(double xi, double eta, double zeta,
double dtemp[5]);
static void initialize(void);
static void lhsinit(void);
static void lhsx(void);
static void lhsy(void);
static void lhsz(void);
static void compute_rhs(void);
static void set_constants(void);
static void verify(int no_time_steps, char *cclass, boolean *verified);
static void x_solve(void);
static void x_backsubstitute(void);
static void x_solve_cell(void);
static void matvec_sub(double ablock[5][5], double avec[5], double bvec[5]);
static void matmul_sub(double ablock[5][5], double bblock[5][5],
double cblock[5][5]);
static void binvcrhs(double lhs[5][5], double c[5][5], double r[5]);
static void binvrhs(double lhs[5][5], double r[5]);
static void y_solve(void);
static void y_backsubstitute(void);
static void y_solve_cell(void);
static void z_solve(void);
static void z_backsubstitute(void);
static void z_solve_cell(void);
/*--------------------------------------------------------------------
program BT
c-------------------------------------------------------------------*/
int main(int argc, char **argv) {
int niter, step, n3;
int nthreads = 1;
double navg, mflops;
double tmax;
boolean verified;
char cclass;
FILE *fp;
/*--------------------------------------------------------------------
c Root node reads input file (if it exists) else takes
c defaults from parameters
c-------------------------------------------------------------------*/
printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version"
" - BT Benchmark\n\n");
fp = fopen("inputbt.data", "r");
if (fp != NULL) {
printf(" Reading from input file inputbt.data");
fscanf(fp, "%d", &niter);
while (fgetc(fp) != '\n');
fscanf(fp, "%lg", &dt);
while (fgetc(fp) != '\n');
fscanf(fp, "%d%d%d",
&grid_points[0], &grid_points[1], &grid_points[2]);
fclose(fp);
} else {
printf(" No input file inputbt.data. Using compiled defaults\n");
niter = NITER_DEFAULT;
dt = DT_DEFAULT;
grid_points[0] = PROBLEM_SIZE;
grid_points[1] = PROBLEM_SIZE;
grid_points[2] = PROBLEM_SIZE;
}
printf(" Size: %3dx%3dx%3d\n",
grid_points[0], grid_points[1], grid_points[2]);
printf(" Iterations: %3d dt: %10.6f\n", niter, dt);
if (grid_points[0] > IMAX ||
grid_points[1] > JMAX ||
grid_points[2] > KMAX) {
printf(" %dx%dx%d\n", grid_points[0], grid_points[1], grid_points[2]);
printf(" Problem size too big for compiled array sizes\n");
exit(1);
}
set_constants();
#pragma omp parallel
{
initialize();
lhsinit();
exact_rhs();
/*--------------------------------------------------------------------
c do one time step to touch all code, and reinitialize
c-------------------------------------------------------------------*/
adi();
initialize();
} /* end parallel */
timer_clear(1);
timer_start(1);
#pragma omp parallel firstprivate(niter) private(step)
{
for (step = 1; step <= niter; step++) {
if (step%20 == 0 || step == 1) {
#pragma omp master
printf(" Time step %4d\n", step);
}
adi();
}
#if defined(_OPENMP)
#pragma omp master
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
} /* end parallel */
timer_stop(1);
tmax = timer_read(1);
verify(niter, &cclass, &verified);
n3 = grid_points[0]*grid_points[1]*grid_points[2];
navg = (grid_points[0]+grid_points[1]+grid_points[2])/3.0;
if ( fabs(tmax-0.0)>1.0e-5 ) {
//if ( tmax != 0.0 ) {
mflops = 1.0e-6*(double)niter*
(3478.8*(double)n3-17655.7*pow2(navg)+28023.7*navg) / tmax;
} else {
mflops = 0.0;
}
c_print_results("BT", cclass, grid_points[0],
grid_points[1], grid_points[2], niter, nthreads,
tmax, mflops, " floating point",
verified, NPBVERSION,COMPILETIME, CS1, CS2, CS3, CS4, CS5,
CS6, "(none)");
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void add(void) {
/*--------------------------------------------------------------------
c addition of update to the vector u
c-------------------------------------------------------------------*/
int i, j, k, m;
#pragma omp for private(j,k,m)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
u[i][j][k][m] = u[i][j][k][m] + rhs[i][j][k][m];
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void adi(void) {
compute_rhs();
x_solve();
y_solve();
z_solve();
add();
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void error_norm(double rms[5]) {
/*--------------------------------------------------------------------
c this function computes the norm of the difference between the
c computed solution and the exact solution
c-------------------------------------------------------------------*/
int i, j, k, m, d;
double xi, eta, zeta, u_exact[5], add;
for (m = 0; m < 5; m++) {
rms[m] = 0.0;
}
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, u_exact);
for (m = 0; m < 5; m++) {
add = u[i][j][k][m] - u_exact[m];
rms[m] = rms[m] + add*add;
}
}
}
}
for (m = 0; m < 5; m++) {
for (d = 0; d <= 2; d++) {
rms[m] = rms[m] / (double)(grid_points[d]-2);
}
rms[m] = sqrt(rms[m]);
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void rhs_norm(double rms[5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
int i, j, k, d, m;
double add;
for (m = 0; m < 5; m++) {
rms[m] = 0.0;
}
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
add = rhs[i][j][k][m];
rms[m] = rms[m] + add*add;
}
}
}
}
for (m = 0; m < 5; m++) {
for (d = 0; d <= 2; d++) {
rms[m] = rms[m] / (double)(grid_points[d]-2);
}
rms[m] = sqrt(rms[m]);
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void exact_rhs(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c compute the right hand side based on exact solution
c-------------------------------------------------------------------*/
double dtemp[5], xi, eta, zeta, dtpp;
int m, i, j, k, ip1, im1, jp1, jm1, km1, kp1;
/*--------------------------------------------------------------------
c initialize
c-------------------------------------------------------------------*/
#pragma omp for private(j,k,m)
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
for (m = 0; m < 5; m++) {
forcing[i][j][k][m] = 0.0;
}
}
}
}
/*--------------------------------------------------------------------
c xi-direction flux differences
c-------------------------------------------------------------------*/
#pragma omp for private(k,i,m)
for (j = 1; j < grid_points[1]-1; j++) {
eta = (double)j * dnym1;
for (k = 1; k < grid_points[2]-1; k++) {
zeta = (double)k * dnzm1;
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++) {
ue[i][m] = dtemp[m];
}
dtpp = 1.0 / dtemp[0];
for (m = 1; m <= 4; m++) {
buf[i][m] = dtpp * dtemp[m];
}
cuf[i] = buf[i][1] * buf[i][1];
buf[i][0] = cuf[i] + buf[i][2] * buf[i][2] +
buf[i][3] * buf[i][3];
q[i] = 0.5*(buf[i][1]*ue[i][1] + buf[i][2]*ue[i][2] +
buf[i][3]*ue[i][3]);
}
for (i = 1; i < grid_points[0]-1; i++) {
im1 = i-1;
ip1 = i+1;
forcing[i][j][k][0] = forcing[i][j][k][0] -
tx2*(ue[ip1][1]-ue[im1][1])+
dx1tx1*(ue[ip1][0]-2.0*ue[i][0]+ue[im1][0]);
forcing[i][j][k][1] = forcing[i][j][k][1] -
tx2 * ((ue[ip1][1]*buf[ip1][1]+c2*(ue[ip1][4]-q[ip1]))-
(ue[im1][1]*buf[im1][1]+c2*(ue[im1][4]-q[im1])))+
xxcon1*(buf[ip1][1]-2.0*buf[i][1]+buf[im1][1])+
dx2tx1*( ue[ip1][1]-2.0* ue[i][1]+ ue[im1][1]);
forcing[i][j][k][2] = forcing[i][j][k][2] -
tx2 * (ue[ip1][2]*buf[ip1][1]-ue[im1][2]*buf[im1][1])+
xxcon2*(buf[ip1][2]-2.0*buf[i][2]+buf[im1][2])+
dx3tx1*( ue[ip1][2]-2.0* ue[i][2]+ ue[im1][2]);
forcing[i][j][k][3] = forcing[i][j][k][3] -
tx2*(ue[ip1][3]*buf[ip1][1]-ue[im1][3]*buf[im1][1])+
xxcon2*(buf[ip1][3]-2.0*buf[i][3]+buf[im1][3])+
dx4tx1*( ue[ip1][3]-2.0* ue[i][3]+ ue[im1][3]);
forcing[i][j][k][4] = forcing[i][j][k][4] -
tx2*(buf[ip1][1]*(c1*ue[ip1][4]-c2*q[ip1])-
buf[im1][1]*(c1*ue[im1][4]-c2*q[im1]))+
0.5*xxcon3*(buf[ip1][0]-2.0*buf[i][0]+buf[im1][0])+
xxcon4*(cuf[ip1]-2.0*cuf[i]+cuf[im1])+
xxcon5*(buf[ip1][4]-2.0*buf[i][4]+buf[im1][4])+
dx5tx1*( ue[ip1][4]-2.0* ue[i][4]+ ue[im1][4]);
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
i = 1;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(5.0*ue[i][m] - 4.0*ue[i+1][m] +ue[i+2][m]);
i = 2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(-4.0*ue[i-1][m] + 6.0*ue[i][m] -
4.0*ue[i+1][m] + ue[i+2][m]);
}
for (m = 0; m < 5; m++) {
for (i = 1*3; i <= grid_points[0]-3*1-1; i++) {
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*
(ue[i-2][m] - 4.0*ue[i-1][m] +
6.0*ue[i][m] - 4.0*ue[i+1][m] + ue[i+2][m]);
}
}
for (m = 0; m < 5; m++) {
i = grid_points[0]-3;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(ue[i-2][m] - 4.0*ue[i-1][m] +
6.0*ue[i][m] - 4.0*ue[i+1][m]);
i = grid_points[0]-2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(ue[i-2][m] - 4.0*ue[i-1][m] + 5.0*ue[i][m]);
}
}
}
/*--------------------------------------------------------------------
c eta-direction flux differences
c-------------------------------------------------------------------*/
#pragma omp for private(k,j,m)
for (i = 1; i < grid_points[0]-1; i++) {
xi = (double)i * dnxm1;
for (k = 1; k < grid_points[2]-1; k++) {
zeta = (double)k * dnzm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++) {
ue[j][m] = dtemp[m];
}
dtpp = 1.0/dtemp[0];
for (m = 1; m <= 4; m++) {
buf[j][m] = dtpp * dtemp[m];
}
cuf[j] = buf[j][2] * buf[j][2];
buf[j][0] = cuf[j] + buf[j][1] * buf[j][1] +
buf[j][3] * buf[j][3];
q[j] = 0.5*(buf[j][1]*ue[j][1] + buf[j][2]*ue[j][2] +
buf[j][3]*ue[j][3]);
}
for (j = 1; j < grid_points[1]-1; j++) {
jm1 = j-1;
jp1 = j+1;
forcing[i][j][k][0] = forcing[i][j][k][0] -
ty2*( ue[jp1][2]-ue[jm1][2] )+
dy1ty1*(ue[jp1][0]-2.0*ue[j][0]+ue[jm1][0]);
forcing[i][j][k][1] = forcing[i][j][k][1] -
ty2*(ue[jp1][1]*buf[jp1][2]-ue[jm1][1]*buf[jm1][2])+
yycon2*(buf[jp1][1]-2.0*buf[j][1]+buf[jm1][1])+
dy2ty1*( ue[jp1][1]-2.0* ue[j][1]+ ue[jm1][1]);
forcing[i][j][k][2] = forcing[i][j][k][2] -
ty2*((ue[jp1][2]*buf[jp1][2]+c2*(ue[jp1][4]-q[jp1]))-
(ue[jm1][2]*buf[jm1][2]+c2*(ue[jm1][4]-q[jm1])))+
yycon1*(buf[jp1][2]-2.0*buf[j][2]+buf[jm1][2])+
dy3ty1*( ue[jp1][2]-2.0*ue[j][2] +ue[jm1][2]);
forcing[i][j][k][3] = forcing[i][j][k][3] -
ty2*(ue[jp1][3]*buf[jp1][2]-ue[jm1][3]*buf[jm1][2])+
yycon2*(buf[jp1][3]-2.0*buf[j][3]+buf[jm1][3])+
dy4ty1*( ue[jp1][3]-2.0*ue[j][3]+ ue[jm1][3]);
forcing[i][j][k][4] = forcing[i][j][k][4] -
ty2*(buf[jp1][2]*(c1*ue[jp1][4]-c2*q[jp1])-
buf[jm1][2]*(c1*ue[jm1][4]-c2*q[jm1]))+
0.5*yycon3*(buf[jp1][0]-2.0*buf[j][0]+
buf[jm1][0])+
yycon4*(cuf[jp1]-2.0*cuf[j]+cuf[jm1])+
yycon5*(buf[jp1][4]-2.0*buf[j][4]+buf[jm1][4])+
dy5ty1*(ue[jp1][4]-2.0*ue[j][4]+ue[jm1][4]);
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
j = 1;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(5.0*ue[j][m] - 4.0*ue[j+1][m] +ue[j+2][m]);
j = 2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(-4.0*ue[j-1][m] + 6.0*ue[j][m] -
4.0*ue[j+1][m] + ue[j+2][m]);
}
for (m = 0; m < 5; m++) {
for (j = 1*3; j <= grid_points[1]-3*1-1; j++) {
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*
(ue[j-2][m] - 4.0*ue[j-1][m] +
6.0*ue[j][m] - 4.0*ue[j+1][m] + ue[j+2][m]);
}
}
for (m = 0; m < 5; m++) {
j = grid_points[1]-3;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(ue[j-2][m] - 4.0*ue[j-1][m] +
6.0*ue[j][m] - 4.0*ue[j+1][m]);
j = grid_points[1]-2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(ue[j-2][m] - 4.0*ue[j-1][m] + 5.0*ue[j][m]);
}
}
}
/*--------------------------------------------------------------------
c zeta-direction flux differences
c-------------------------------------------------------------------*/
#pragma omp for private(j,k,m)
for (i = 1; i < grid_points[0]-1; i++) {
xi = (double)i * dnxm1;
for (j = 1; j < grid_points[1]-1; j++) {
eta = (double)j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++) {
ue[k][m] = dtemp[m];
}
dtpp = 1.0/dtemp[0];
for (m = 1; m <= 4; m++) {
buf[k][m] = dtpp * dtemp[m];
}
cuf[k] = buf[k][3] * buf[k][3];
buf[k][0] = cuf[k] + buf[k][1] * buf[k][1] +
buf[k][2] * buf[k][2];
q[k] = 0.5*(buf[k][1]*ue[k][1] + buf[k][2]*ue[k][2] +
buf[k][3]*ue[k][3]);
}
for (k = 1; k < grid_points[2]-1; k++) {
km1 = k-1;
kp1 = k+1;
forcing[i][j][k][0] = forcing[i][j][k][0] -
tz2*( ue[kp1][3]-ue[km1][3] )+
dz1tz1*(ue[kp1][0]-2.0*ue[k][0]+ue[km1][0]);
forcing[i][j][k][1] = forcing[i][j][k][1] -
tz2 * (ue[kp1][1]*buf[kp1][3]-ue[km1][1]*buf[km1][3])+
zzcon2*(buf[kp1][1]-2.0*buf[k][1]+buf[km1][1])+
dz2tz1*( ue[kp1][1]-2.0* ue[k][1]+ ue[km1][1]);
forcing[i][j][k][2] = forcing[i][j][k][2] -
tz2 * (ue[kp1][2]*buf[kp1][3]-ue[km1][2]*buf[km1][3])+
zzcon2*(buf[kp1][2]-2.0*buf[k][2]+buf[km1][2])+
dz3tz1*(ue[kp1][2]-2.0*ue[k][2]+ue[km1][2]);
forcing[i][j][k][3] = forcing[i][j][k][3] -
tz2 * ((ue[kp1][3]*buf[kp1][3]+c2*(ue[kp1][4]-q[kp1]))-
(ue[km1][3]*buf[km1][3]+c2*(ue[km1][4]-q[km1])))+
zzcon1*(buf[kp1][3]-2.0*buf[k][3]+buf[km1][3])+
dz4tz1*( ue[kp1][3]-2.0*ue[k][3] +ue[km1][3]);
forcing[i][j][k][4] = forcing[i][j][k][4] -
tz2 * (buf[kp1][3]*(c1*ue[kp1][4]-c2*q[kp1])-
buf[km1][3]*(c1*ue[km1][4]-c2*q[km1]))+
0.5*zzcon3*(buf[kp1][0]-2.0*buf[k][0]
+buf[km1][0])+
zzcon4*(cuf[kp1]-2.0*cuf[k]+cuf[km1])+
zzcon5*(buf[kp1][4]-2.0*buf[k][4]+buf[km1][4])+
dz5tz1*( ue[kp1][4]-2.0*ue[k][4]+ ue[km1][4]);
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
k = 1;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(5.0*ue[k][m] - 4.0*ue[k+1][m] +ue[k+2][m]);
k = 2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(-4.0*ue[k-1][m] + 6.0*ue[k][m] -
4.0*ue[k+1][m] + ue[k+2][m]);
}
for (m = 0; m < 5; m++) {
for (k = 1*3; k <= grid_points[2]-3*1-1; k++) {
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*
(ue[k-2][m] - 4.0*ue[k-1][m] +
6.0*ue[k][m] - 4.0*ue[k+1][m] + ue[k+2][m]);
}
}
for (m = 0; m < 5; m++) {
k = grid_points[2]-3;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(ue[k-2][m] - 4.0*ue[k-1][m] +
6.0*ue[k][m] - 4.0*ue[k+1][m]);
k = grid_points[2]-2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(ue[k-2][m] - 4.0*ue[k-1][m] + 5.0*ue[k][m]);
}
}
}
/*--------------------------------------------------------------------
c now change the sign of the forcing function,
c-------------------------------------------------------------------*/
#pragma omp for private(j,k,m)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
forcing[i][j][k][m] = -1.0 * forcing[i][j][k][m];
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void exact_solution(double xi, double eta, double zeta,
double dtemp[5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c this function returns the exact solution at point xi, eta, zeta
c-------------------------------------------------------------------*/
int m;
for (m = 0; m < 5; m++) {
dtemp[m] = ce[m][0] +
xi*(ce[m][1] + xi*(ce[m][4] + xi*(ce[m][7]
+ xi*ce[m][10]))) +
eta*(ce[m][2] + eta*(ce[m][5] + eta*(ce[m][8]
+ eta*ce[m][11])))+
zeta*(ce[m][3] + zeta*(ce[m][6] + zeta*(ce[m][9] +
zeta*ce[m][12])));
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void initialize(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c This subroutine initializes the field variable u using
c tri-linear transfinite interpolation of the boundary values
c-------------------------------------------------------------------*/
int i, j, k, m, ix, iy, iz;
double xi, eta, zeta, Pface[2][3][5], Pxi, Peta, Pzeta, temp[5];
/*--------------------------------------------------------------------
c Later (in compute_rhs) we compute 1/u for every element. A few of
c the corner elements are not used, but it convenient (and faster)
c to compute the whole thing with a simple loop. Make sure those
c values are nonzero by initializing the whole thing here.
c-------------------------------------------------------------------*/
#pragma omp for private(j,k,m)
for (i = 0; i < IMAX; i++) {
for (j = 0; j < IMAX; j++) {
for (k = 0; k < IMAX; k++) {
for (m = 0; m < 5; m++) {
u[i][j][k][m] = 1.0;
}
}
}
}
/*--------------------------------------------------------------------
c first store the "interpolated" values everywhere on the grid
c-------------------------------------------------------------------*/
#pragma omp for private(j,k,ix,iy,iz,m)
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
for (ix = 0; ix < 2; ix++) {
exact_solution((double)ix, eta, zeta,
&(Pface[ix][0][0]));
}
for (iy = 0; iy < 2; iy++) {
exact_solution(xi, (double)iy , zeta,
&Pface[iy][1][0]);
}
for (iz = 0; iz < 2; iz++) {
exact_solution(xi, eta, (double)iz,
&Pface[iz][2][0]);
}
for (m = 0; m < 5; m++) {
Pxi = xi * Pface[1][0][m] +
(1.0-xi) * Pface[0][0][m];
Peta = eta * Pface[1][1][m] +
(1.0-eta) * Pface[0][1][m];
Pzeta = zeta * Pface[1][2][m] +
(1.0-zeta) * Pface[0][2][m];
u[i][j][k][m] = Pxi + Peta + Pzeta -
Pxi*Peta - Pxi*Pzeta - Peta*Pzeta +
Pxi*Peta*Pzeta;
}
}
}
}
/*--------------------------------------------------------------------
c now store the exact values on the boundaries
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c west face
c-------------------------------------------------------------------*/
i = 0;
xi = 0.0;
#pragma omp for private(k,m) nowait
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c east face
c-------------------------------------------------------------------*/
i = grid_points[0]-1;
xi = 1.0;
#pragma omp for private(k,m)
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c south face
c-------------------------------------------------------------------*/
j = 0;
eta = 0.0;
#pragma omp for private(k,m) nowait
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c north face
c-------------------------------------------------------------------*/
j = grid_points[1]-1;
eta = 1.0;
#pragma omp for private(k,m)
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c bottom face
c-------------------------------------------------------------------*/
k = 0;
zeta = 0.0;
#pragma omp for private(j,m) nowait
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i *dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c top face
c-------------------------------------------------------------------*/
k = grid_points[2]-1;
zeta = 1.0;
#pragma omp for private(j,m)
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void lhsinit(void) {
int i, j, k, m, n;
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c zero the whole left hand side for starters
c-------------------------------------------------------------------*/
#pragma omp for private(j,k,m,n)
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
for (m = 0; m < 5; m++) {
for (n = 0; n < 5; n++) {
lhs[i][j][k][0][m][n] = 0.0;
lhs[i][j][k][1][m][n] = 0.0;
lhs[i][j][k][2][m][n] = 0.0;
}
}
}
}
}
/*--------------------------------------------------------------------
c next, set all diagonal values to 1. This is overkill, but convenient
c-------------------------------------------------------------------*/
#pragma omp for private(j,k,m)
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
for (m = 0; m < 5; m++) {
lhs[i][j][k][1][m][m] = 1.0;
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void lhsx(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c This function computes the left hand side in the xi-direction
c-------------------------------------------------------------------*/
int i, j, k;
/*--------------------------------------------------------------------
c determine a (labeled f) and n jacobians
c-------------------------------------------------------------------*/
#pragma omp for private(k,i)
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (i = 0; i < grid_points[0]; i++) {
tmp1 = 1.0 / u[i][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
/*--------------------------------------------------------------------
c
c-------------------------------------------------------------------*/
fjac[ i][ j][ k][0][0] = 0.0;
fjac[ i][ j][ k][0][1] = 1.0;
fjac[ i][ j][ k][0][2] = 0.0;
fjac[ i][ j][ k][0][3] = 0.0;
fjac[ i][ j][ k][0][4] = 0.0;
fjac[ i][ j][ k][1][0] = -(u[i][j][k][1] * tmp2 *
u[i][j][k][1])
+ c2 * 0.50 * (u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] ) * tmp2;
fjac[i][j][k][1][1] = ( 2.0 - c2 )
* ( u[i][j][k][1] / u[i][j][k][0] );
fjac[i][j][k][1][2] = - c2 * ( u[i][j][k][2] * tmp1 );
fjac[i][j][k][1][3] = - c2 * ( u[i][j][k][3] * tmp1 );
fjac[i][j][k][1][4] = c2;
fjac[i][j][k][2][0] = - ( u[i][j][k][1]*u[i][j][k][2] ) * tmp2;
fjac[i][j][k][2][1] = u[i][j][k][2] * tmp1;
fjac[i][j][k][2][2] = u[i][j][k][1] * tmp1;
fjac[i][j][k][2][3] = 0.0;
fjac[i][j][k][2][4] = 0.0;
fjac[i][j][k][3][0] = - ( u[i][j][k][1]*u[i][j][k][3] ) * tmp2;
fjac[i][j][k][3][1] = u[i][j][k][3] * tmp1;
fjac[i][j][k][3][2] = 0.0;
fjac[i][j][k][3][3] = u[i][j][k][1] * tmp1;
fjac[i][j][k][3][4] = 0.0;
fjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] ) * tmp2
- c1 * ( u[i][j][k][4] * tmp1 ) )
* ( u[i][j][k][1] * tmp1 );
fjac[i][j][k][4][1] = c1 * u[i][j][k][4] * tmp1
- 0.50 * c2
* ( 3.0*u[i][j][k][1]*u[i][j][k][1]
+ u[i][j][k][2]*u[i][j][k][2]
+ u[i][j][k][3]*u[i][j][k][3] ) * tmp2;
fjac[i][j][k][4][2] = - c2 * ( u[i][j][k][2]*u[i][j][k][1] )
* tmp2;
fjac[i][j][k][4][3] = - c2 * ( u[i][j][k][3]*u[i][j][k][1] )
* tmp2;
fjac[i][j][k][4][4] = c1 * ( u[i][j][k][1] * tmp1 );
njac[i][j][k][0][0] = 0.0;
njac[i][j][k][0][1] = 0.0;
njac[i][j][k][0][2] = 0.0;
njac[i][j][k][0][3] = 0.0;
njac[i][j][k][0][4] = 0.0;
njac[i][j][k][1][0] = - con43 * c3c4 * tmp2 * u[i][j][k][1];
njac[i][j][k][1][1] = con43 * c3c4 * tmp1;
njac[i][j][k][1][2] = 0.0;
njac[i][j][k][1][3] = 0.0;
njac[i][j][k][1][4] = 0.0;
njac[i][j][k][2][0] = - c3c4 * tmp2 * u[i][j][k][2];
njac[i][j][k][2][1] = 0.0;
njac[i][j][k][2][2] = c3c4 * tmp1;
njac[i][j][k][2][3] = 0.0;
njac[i][j][k][2][4] = 0.0;
njac[i][j][k][3][0] = - c3c4 * tmp2 * u[i][j][k][3];
njac[i][j][k][3][1] = 0.0;
njac[i][j][k][3][2] = 0.0;
njac[i][j][k][3][3] = c3c4 * tmp1;
njac[i][j][k][3][4] = 0.0;
njac[i][j][k][4][0] = - ( con43 * c3c4
- c1345 ) * tmp3 * (pow2(u[i][j][k][1]))
- ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][2]))
- ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][3]))
- c1345 * tmp2 * u[i][j][k][4];
njac[i][j][k][4][1] = ( con43 * c3c4
- c1345 ) * tmp2 * u[i][j][k][1];
njac[i][j][k][4][2] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][2];
njac[i][j][k][4][3] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][3];
njac[i][j][k][4][4] = ( c1345 ) * tmp1;
}
/*--------------------------------------------------------------------
c now jacobians set, so form left hand side in x direction
c-------------------------------------------------------------------*/
for (i = 1; i < grid_points[0]-1; i++) {
tmp1 = dt * tx1;
tmp2 = dt * tx2;
lhs[i][j][k][AA][0][0] = - tmp2 * fjac[i-1][j][k][0][0]
- tmp1 * njac[i-1][j][k][0][0]
- tmp1 * dx1;
lhs[i][j][k][AA][0][1] = - tmp2 * fjac[i-1][j][k][0][1]
- tmp1 * njac[i-1][j][k][0][1];
lhs[i][j][k][AA][0][2] = - tmp2 * fjac[i-1][j][k][0][2]
- tmp1 * njac[i-1][j][k][0][2];
lhs[i][j][k][AA][0][3] = - tmp2 * fjac[i-1][j][k][0][3]
- tmp1 * njac[i-1][j][k][0][3];
lhs[i][j][k][AA][0][4] = - tmp2 * fjac[i-1][j][k][0][4]
- tmp1 * njac[i-1][j][k][0][4];
lhs[i][j][k][AA][1][0] = - tmp2 * fjac[i-1][j][k][1][0]
- tmp1 * njac[i-1][j][k][1][0];
lhs[i][j][k][AA][1][1] = - tmp2 * fjac[i-1][j][k][1][1]
- tmp1 * njac[i-1][j][k][1][1]
- tmp1 * dx2;
lhs[i][j][k][AA][1][2] = - tmp2 * fjac[i-1][j][k][1][2]
- tmp1 * njac[i-1][j][k][1][2];
lhs[i][j][k][AA][1][3] = - tmp2 * fjac[i-1][j][k][1][3]
- tmp1 * njac[i-1][j][k][1][3];
lhs[i][j][k][AA][1][4] = - tmp2 * fjac[i-1][j][k][1][4]
- tmp1 * njac[i-1][j][k][1][4];
lhs[i][j][k][AA][2][0] = - tmp2 * fjac[i-1][j][k][2][0]
- tmp1 * njac[i-1][j][k][2][0];
lhs[i][j][k][AA][2][1] = - tmp2 * fjac[i-1][j][k][2][1]
- tmp1 * njac[i-1][j][k][2][1];
lhs[i][j][k][AA][2][2] = - tmp2 * fjac[i-1][j][k][2][2]
- tmp1 * njac[i-1][j][k][2][2]
- tmp1 * dx3;
lhs[i][j][k][AA][2][3] = - tmp2 * fjac[i-1][j][k][2][3]
- tmp1 * njac[i-1][j][k][2][3];
lhs[i][j][k][AA][2][4] = - tmp2 * fjac[i-1][j][k][2][4]
- tmp1 * njac[i-1][j][k][2][4];
lhs[i][j][k][AA][3][0] = - tmp2 * fjac[i-1][j][k][3][0]
- tmp1 * njac[i-1][j][k][3][0];
lhs[i][j][k][AA][3][1] = - tmp2 * fjac[i-1][j][k][3][1]
- tmp1 * njac[i-1][j][k][3][1];
lhs[i][j][k][AA][3][2] = - tmp2 * fjac[i-1][j][k][3][2]
- tmp1 * njac[i-1][j][k][3][2];
lhs[i][j][k][AA][3][3] = - tmp2 * fjac[i-1][j][k][3][3]
- tmp1 * njac[i-1][j][k][3][3]
- tmp1 * dx4;
lhs[i][j][k][AA][3][4] = - tmp2 * fjac[i-1][j][k][3][4]
- tmp1 * njac[i-1][j][k][3][4];
lhs[i][j][k][AA][4][0] = - tmp2 * fjac[i-1][j][k][4][0]
- tmp1 * njac[i-1][j][k][4][0];
lhs[i][j][k][AA][4][1] = - tmp2 * fjac[i-1][j][k][4][1]
- tmp1 * njac[i-1][j][k][4][1];
lhs[i][j][k][AA][4][2] = - tmp2 * fjac[i-1][j][k][4][2]
- tmp1 * njac[i-1][j][k][4][2];
lhs[i][j][k][AA][4][3] = - tmp2 * fjac[i-1][j][k][4][3]
- tmp1 * njac[i-1][j][k][4][3];
lhs[i][j][k][AA][4][4] = - tmp2 * fjac[i-1][j][k][4][4]
- tmp1 * njac[i-1][j][k][4][4]
- tmp1 * dx5;
lhs[i][j][k][BB][0][0] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][0][0]
+ tmp1 * 2.0 * dx1;
lhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];
lhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];
lhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];
lhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];
lhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];
lhs[i][j][k][BB][1][1] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][1][1]
+ tmp1 * 2.0 * dx2;
lhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];
lhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];
lhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];
lhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];
lhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];
lhs[i][j][k][BB][2][2] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][2][2]
+ tmp1 * 2.0 * dx3;
lhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];
lhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];
lhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];
lhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];
lhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];
lhs[i][j][k][BB][3][3] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][3][3]
+ tmp1 * 2.0 * dx4;
lhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];
lhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];
lhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];
lhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];
lhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];
lhs[i][j][k][BB][4][4] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][4][4]
+ tmp1 * 2.0 * dx5;
lhs[i][j][k][CC][0][0] = tmp2 * fjac[i+1][j][k][0][0]
- tmp1 * njac[i+1][j][k][0][0]
- tmp1 * dx1;
lhs[i][j][k][CC][0][1] = tmp2 * fjac[i+1][j][k][0][1]
- tmp1 * njac[i+1][j][k][0][1];
lhs[i][j][k][CC][0][2] = tmp2 * fjac[i+1][j][k][0][2]
- tmp1 * njac[i+1][j][k][0][2];
lhs[i][j][k][CC][0][3] = tmp2 * fjac[i+1][j][k][0][3]
- tmp1 * njac[i+1][j][k][0][3];
lhs[i][j][k][CC][0][4] = tmp2 * fjac[i+1][j][k][0][4]
- tmp1 * njac[i+1][j][k][0][4];
lhs[i][j][k][CC][1][0] = tmp2 * fjac[i+1][j][k][1][0]
- tmp1 * njac[i+1][j][k][1][0];
lhs[i][j][k][CC][1][1] = tmp2 * fjac[i+1][j][k][1][1]
- tmp1 * njac[i+1][j][k][1][1]
- tmp1 * dx2;
lhs[i][j][k][CC][1][2] = tmp2 * fjac[i+1][j][k][1][2]
- tmp1 * njac[i+1][j][k][1][2];
lhs[i][j][k][CC][1][3] = tmp2 * fjac[i+1][j][k][1][3]
- tmp1 * njac[i+1][j][k][1][3];
lhs[i][j][k][CC][1][4] = tmp2 * fjac[i+1][j][k][1][4]
- tmp1 * njac[i+1][j][k][1][4];
lhs[i][j][k][CC][2][0] = tmp2 * fjac[i+1][j][k][2][0]
- tmp1 * njac[i+1][j][k][2][0];
lhs[i][j][k][CC][2][1] = tmp2 * fjac[i+1][j][k][2][1]
- tmp1 * njac[i+1][j][k][2][1];
lhs[i][j][k][CC][2][2] = tmp2 * fjac[i+1][j][k][2][2]
- tmp1 * njac[i+1][j][k][2][2]
- tmp1 * dx3;
lhs[i][j][k][CC][2][3] = tmp2 * fjac[i+1][j][k][2][3]
- tmp1 * njac[i+1][j][k][2][3];
lhs[i][j][k][CC][2][4] = tmp2 * fjac[i+1][j][k][2][4]
- tmp1 * njac[i+1][j][k][2][4];
lhs[i][j][k][CC][3][0] = tmp2 * fjac[i+1][j][k][3][0]
- tmp1 * njac[i+1][j][k][3][0];
lhs[i][j][k][CC][3][1] = tmp2 * fjac[i+1][j][k][3][1]
- tmp1 * njac[i+1][j][k][3][1];
lhs[i][j][k][CC][3][2] = tmp2 * fjac[i+1][j][k][3][2]
- tmp1 * njac[i+1][j][k][3][2];
lhs[i][j][k][CC][3][3] = tmp2 * fjac[i+1][j][k][3][3]
- tmp1 * njac[i+1][j][k][3][3]
- tmp1 * dx4;
lhs[i][j][k][CC][3][4] = tmp2 * fjac[i+1][j][k][3][4]
- tmp1 * njac[i+1][j][k][3][4];
lhs[i][j][k][CC][4][0] = tmp2 * fjac[i+1][j][k][4][0]
- tmp1 * njac[i+1][j][k][4][0];
lhs[i][j][k][CC][4][1] = tmp2 * fjac[i+1][j][k][4][1]
- tmp1 * njac[i+1][j][k][4][1];
lhs[i][j][k][CC][4][2] = tmp2 * fjac[i+1][j][k][4][2]
- tmp1 * njac[i+1][j][k][4][2];
lhs[i][j][k][CC][4][3] = tmp2 * fjac[i+1][j][k][4][3]
- tmp1 * njac[i+1][j][k][4][3];
lhs[i][j][k][CC][4][4] = tmp2 * fjac[i+1][j][k][4][4]
- tmp1 * njac[i+1][j][k][4][4]
- tmp1 * dx5;
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void lhsy(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c This function computes the left hand side for the three y-factors
c-------------------------------------------------------------------*/
int i, j, k;
/*--------------------------------------------------------------------
c Compute the indices for storing the tri-diagonal matrix;
c determine a (labeled f) and n jacobians for cell c
c-------------------------------------------------------------------*/
#pragma omp for private(j,k)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
tmp1 = 1.0 / u[i][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
fjac[ i][ j][ k][0][0] = 0.0;
fjac[ i][ j][ k][0][1] = 0.0;
fjac[ i][ j][ k][0][2] = 1.0;
fjac[ i][ j][ k][0][3] = 0.0;
fjac[ i][ j][ k][0][4] = 0.0;
fjac[i][j][k][1][0] = - ( u[i][j][k][1]*u[i][j][k][2] )
* tmp2;
fjac[i][j][k][1][1] = u[i][j][k][2] * tmp1;
fjac[i][j][k][1][2] = u[i][j][k][1] * tmp1;
fjac[i][j][k][1][3] = 0.0;
fjac[i][j][k][1][4] = 0.0;
fjac[i][j][k][2][0] = - ( u[i][j][k][2]*u[i][j][k][2]*tmp2)
+ 0.50 * c2 * ( ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] )
* tmp2 );
fjac[i][j][k][2][1] = - c2 * u[i][j][k][1] * tmp1;
fjac[i][j][k][2][2] = ( 2.0 - c2 )
* u[i][j][k][2] * tmp1;
fjac[i][j][k][2][3] = - c2 * u[i][j][k][3] * tmp1;
fjac[i][j][k][2][4] = c2;
fjac[i][j][k][3][0] = - ( u[i][j][k][2]*u[i][j][k][3] )
* tmp2;
fjac[i][j][k][3][1] = 0.0;
fjac[i][j][k][3][2] = u[i][j][k][3] * tmp1;
fjac[i][j][k][3][3] = u[i][j][k][2] * tmp1;
fjac[i][j][k][3][4] = 0.0;
fjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] )
* tmp2
- c1 * u[i][j][k][4] * tmp1 )
* u[i][j][k][2] * tmp1;
fjac[i][j][k][4][1] = - c2 * u[i][j][k][1]*u[i][j][k][2]
* tmp2;
fjac[i][j][k][4][2] = c1 * u[i][j][k][4] * tmp1
- 0.50 * c2
* ( ( u[i][j][k][1]*u[i][j][k][1]
+ 3.0 * u[i][j][k][2]*u[i][j][k][2]
+ u[i][j][k][3]*u[i][j][k][3] )
* tmp2 );
fjac[i][j][k][4][3] = - c2 * ( u[i][j][k][2]*u[i][j][k][3] )
* tmp2;
fjac[i][j][k][4][4] = c1 * u[i][j][k][2] * tmp1;
njac[i][j][k][0][0] = 0.0;
njac[i][j][k][0][1] = 0.0;
njac[i][j][k][0][2] = 0.0;
njac[i][j][k][0][3] = 0.0;
njac[i][j][k][0][4] = 0.0;
njac[i][j][k][1][0] = - c3c4 * tmp2 * u[i][j][k][1];
njac[i][j][k][1][1] = c3c4 * tmp1;
njac[i][j][k][1][2] = 0.0;
njac[i][j][k][1][3] = 0.0;
njac[i][j][k][1][4] = 0.0;
njac[i][j][k][2][0] = - con43 * c3c4 * tmp2 * u[i][j][k][2];
njac[i][j][k][2][1] = 0.0;
njac[i][j][k][2][2] = con43 * c3c4 * tmp1;
njac[i][j][k][2][3] = 0.0;
njac[i][j][k][2][4] = 0.0;
njac[i][j][k][3][0] = - c3c4 * tmp2 * u[i][j][k][3];
njac[i][j][k][3][1] = 0.0;
njac[i][j][k][3][2] = 0.0;
njac[i][j][k][3][3] = c3c4 * tmp1;
njac[i][j][k][3][4] = 0.0;
njac[i][j][k][4][0] = - ( c3c4
- c1345 ) * tmp3 * (pow2(u[i][j][k][1]))
- ( con43 * c3c4
- c1345 ) * tmp3 * (pow2(u[i][j][k][2]))
- ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][3]))
- c1345 * tmp2 * u[i][j][k][4];
njac[i][j][k][4][1] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][1];
njac[i][j][k][4][2] = ( con43 * c3c4
- c1345 ) * tmp2 * u[i][j][k][2];
njac[i][j][k][4][3] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][3];
njac[i][j][k][4][4] = ( c1345 ) * tmp1;
}
}
}
/*--------------------------------------------------------------------
c now joacobians set, so form left hand side in y direction
c-------------------------------------------------------------------*/
#pragma omp for private(j,k)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
tmp1 = dt * ty1;
tmp2 = dt * ty2;
lhs[i][j][k][AA][0][0] = - tmp2 * fjac[i][j-1][k][0][0]
- tmp1 * njac[i][j-1][k][0][0]
- tmp1 * dy1;
lhs[i][j][k][AA][0][1] = - tmp2 * fjac[i][j-1][k][0][1]
- tmp1 * njac[i][j-1][k][0][1];
lhs[i][j][k][AA][0][2] = - tmp2 * fjac[i][j-1][k][0][2]
- tmp1 * njac[i][j-1][k][0][2];
lhs[i][j][k][AA][0][3] = - tmp2 * fjac[i][j-1][k][0][3]
- tmp1 * njac[i][j-1][k][0][3];
lhs[i][j][k][AA][0][4] = - tmp2 * fjac[i][j-1][k][0][4]
- tmp1 * njac[i][j-1][k][0][4];
lhs[i][j][k][AA][1][0] = - tmp2 * fjac[i][j-1][k][1][0]
- tmp1 * njac[i][j-1][k][1][0];
lhs[i][j][k][AA][1][1] = - tmp2 * fjac[i][j-1][k][1][1]
- tmp1 * njac[i][j-1][k][1][1]
- tmp1 * dy2;
lhs[i][j][k][AA][1][2] = - tmp2 * fjac[i][j-1][k][1][2]
- tmp1 * njac[i][j-1][k][1][2];
lhs[i][j][k][AA][1][3] = - tmp2 * fjac[i][j-1][k][1][3]
- tmp1 * njac[i][j-1][k][1][3];
lhs[i][j][k][AA][1][4] = - tmp2 * fjac[i][j-1][k][1][4]
- tmp1 * njac[i][j-1][k][1][4];
lhs[i][j][k][AA][2][0] = - tmp2 * fjac[i][j-1][k][2][0]
- tmp1 * njac[i][j-1][k][2][0];
lhs[i][j][k][AA][2][1] = - tmp2 * fjac[i][j-1][k][2][1]
- tmp1 * njac[i][j-1][k][2][1];
lhs[i][j][k][AA][2][2] = - tmp2 * fjac[i][j-1][k][2][2]
- tmp1 * njac[i][j-1][k][2][2]
- tmp1 * dy3;
lhs[i][j][k][AA][2][3] = - tmp2 * fjac[i][j-1][k][2][3]
- tmp1 * njac[i][j-1][k][2][3];
lhs[i][j][k][AA][2][4] = - tmp2 * fjac[i][j-1][k][2][4]
- tmp1 * njac[i][j-1][k][2][4];
lhs[i][j][k][AA][3][0] = - tmp2 * fjac[i][j-1][k][3][0]
- tmp1 * njac[i][j-1][k][3][0];
lhs[i][j][k][AA][3][1] = - tmp2 * fjac[i][j-1][k][3][1]
- tmp1 * njac[i][j-1][k][3][1];
lhs[i][j][k][AA][3][2] = - tmp2 * fjac[i][j-1][k][3][2]
- tmp1 * njac[i][j-1][k][3][2];
lhs[i][j][k][AA][3][3] = - tmp2 * fjac[i][j-1][k][3][3]
- tmp1 * njac[i][j-1][k][3][3]
- tmp1 * dy4;
lhs[i][j][k][AA][3][4] = - tmp2 * fjac[i][j-1][k][3][4]
- tmp1 * njac[i][j-1][k][3][4];
lhs[i][j][k][AA][4][0] = - tmp2 * fjac[i][j-1][k][4][0]
- tmp1 * njac[i][j-1][k][4][0];
lhs[i][j][k][AA][4][1] = - tmp2 * fjac[i][j-1][k][4][1]
- tmp1 * njac[i][j-1][k][4][1];
lhs[i][j][k][AA][4][2] = - tmp2 * fjac[i][j-1][k][4][2]
- tmp1 * njac[i][j-1][k][4][2];
lhs[i][j][k][AA][4][3] = - tmp2 * fjac[i][j-1][k][4][3]
- tmp1 * njac[i][j-1][k][4][3];
lhs[i][j][k][AA][4][4] = - tmp2 * fjac[i][j-1][k][4][4]
- tmp1 * njac[i][j-1][k][4][4]
- tmp1 * dy5;
lhs[i][j][k][BB][0][0] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][0][0]
+ tmp1 * 2.0 * dy1;
lhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];
lhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];
lhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];
lhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];
lhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];
lhs[i][j][k][BB][1][1] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][1][1]
+ tmp1 * 2.0 * dy2;
lhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];
lhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];
lhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];
lhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];
lhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];
lhs[i][j][k][BB][2][2] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][2][2]
+ tmp1 * 2.0 * dy3;
lhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];
lhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];
lhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];
lhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];
lhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];
lhs[i][j][k][BB][3][3] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][3][3]
+ tmp1 * 2.0 * dy4;
lhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];
lhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];
lhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];
lhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];
lhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];
lhs[i][j][k][BB][4][4] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][4][4]
+ tmp1 * 2.0 * dy5;
lhs[i][j][k][CC][0][0] = tmp2 * fjac[i][j+1][k][0][0]
- tmp1 * njac[i][j+1][k][0][0]
- tmp1 * dy1;
lhs[i][j][k][CC][0][1] = tmp2 * fjac[i][j+1][k][0][1]
- tmp1 * njac[i][j+1][k][0][1];
lhs[i][j][k][CC][0][2] = tmp2 * fjac[i][j+1][k][0][2]
- tmp1 * njac[i][j+1][k][0][2];
lhs[i][j][k][CC][0][3] = tmp2 * fjac[i][j+1][k][0][3]
- tmp1 * njac[i][j+1][k][0][3];
lhs[i][j][k][CC][0][4] = tmp2 * fjac[i][j+1][k][0][4]
- tmp1 * njac[i][j+1][k][0][4];
lhs[i][j][k][CC][1][0] = tmp2 * fjac[i][j+1][k][1][0]
- tmp1 * njac[i][j+1][k][1][0];
lhs[i][j][k][CC][1][1] = tmp2 * fjac[i][j+1][k][1][1]
- tmp1 * njac[i][j+1][k][1][1]
- tmp1 * dy2;
lhs[i][j][k][CC][1][2] = tmp2 * fjac[i][j+1][k][1][2]
- tmp1 * njac[i][j+1][k][1][2];
lhs[i][j][k][CC][1][3] = tmp2 * fjac[i][j+1][k][1][3]
- tmp1 * njac[i][j+1][k][1][3];
lhs[i][j][k][CC][1][4] = tmp2 * fjac[i][j+1][k][1][4]
- tmp1 * njac[i][j+1][k][1][4];
lhs[i][j][k][CC][2][0] = tmp2 * fjac[i][j+1][k][2][0]
- tmp1 * njac[i][j+1][k][2][0];
lhs[i][j][k][CC][2][1] = tmp2 * fjac[i][j+1][k][2][1]
- tmp1 * njac[i][j+1][k][2][1];
lhs[i][j][k][CC][2][2] = tmp2 * fjac[i][j+1][k][2][2]
- tmp1 * njac[i][j+1][k][2][2]
- tmp1 * dy3;
lhs[i][j][k][CC][2][3] = tmp2 * fjac[i][j+1][k][2][3]
- tmp1 * njac[i][j+1][k][2][3];
lhs[i][j][k][CC][2][4] = tmp2 * fjac[i][j+1][k][2][4]
- tmp1 * njac[i][j+1][k][2][4];
lhs[i][j][k][CC][3][0] = tmp2 * fjac[i][j+1][k][3][0]
- tmp1 * njac[i][j+1][k][3][0];
lhs[i][j][k][CC][3][1] = tmp2 * fjac[i][j+1][k][3][1]
- tmp1 * njac[i][j+1][k][3][1];
lhs[i][j][k][CC][3][2] = tmp2 * fjac[i][j+1][k][3][2]
- tmp1 * njac[i][j+1][k][3][2];
lhs[i][j][k][CC][3][3] = tmp2 * fjac[i][j+1][k][3][3]
- tmp1 * njac[i][j+1][k][3][3]
- tmp1 * dy4;
lhs[i][j][k][CC][3][4] = tmp2 * fjac[i][j+1][k][3][4]
- tmp1 * njac[i][j+1][k][3][4];
lhs[i][j][k][CC][4][0] = tmp2 * fjac[i][j+1][k][4][0]
- tmp1 * njac[i][j+1][k][4][0];
lhs[i][j][k][CC][4][1] = tmp2 * fjac[i][j+1][k][4][1]
- tmp1 * njac[i][j+1][k][4][1];
lhs[i][j][k][CC][4][2] = tmp2 * fjac[i][j+1][k][4][2]
- tmp1 * njac[i][j+1][k][4][2];
lhs[i][j][k][CC][4][3] = tmp2 * fjac[i][j+1][k][4][3]
- tmp1 * njac[i][j+1][k][4][3];
lhs[i][j][k][CC][4][4] = tmp2 * fjac[i][j+1][k][4][4]
- tmp1 * njac[i][j+1][k][4][4]
- tmp1 * dy5;
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void lhsz(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c This function computes the left hand side for the three z-factors
c-------------------------------------------------------------------*/
int i, j, k;
/*--------------------------------------------------------------------
c Compute the indices for storing the block-diagonal matrix;
c determine c (labeled f) and s jacobians
c---------------------------------------------------------------------*/
#pragma omp for private(j,k)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 0; k < grid_points[2]; k++) {
tmp1 = 1.0 / u[i][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
fjac[i][j][k][0][0] = 0.0;
fjac[i][j][k][0][1] = 0.0;
fjac[i][j][k][0][2] = 0.0;
fjac[i][j][k][0][3] = 1.0;
fjac[i][j][k][0][4] = 0.0;
fjac[i][j][k][1][0] = - ( u[i][j][k][1]*u[i][j][k][3] )
* tmp2;
fjac[i][j][k][1][1] = u[i][j][k][3] * tmp1;
fjac[i][j][k][1][2] = 0.0;
fjac[i][j][k][1][3] = u[i][j][k][1] * tmp1;
fjac[i][j][k][1][4] = 0.0;
fjac[i][j][k][2][0] = - ( u[i][j][k][2]*u[i][j][k][3] )
* tmp2;
fjac[i][j][k][2][1] = 0.0;
fjac[i][j][k][2][2] = u[i][j][k][3] * tmp1;
fjac[i][j][k][2][3] = u[i][j][k][2] * tmp1;
fjac[i][j][k][2][4] = 0.0;
fjac[i][j][k][3][0] = - (u[i][j][k][3]*u[i][j][k][3] * tmp2 )
+ 0.50 * c2 * ( ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] ) * tmp2 );
fjac[i][j][k][3][1] = - c2 * u[i][j][k][1] * tmp1;
fjac[i][j][k][3][2] = - c2 * u[i][j][k][2] * tmp1;
fjac[i][j][k][3][3] = ( 2.0 - c2 )
* u[i][j][k][3] * tmp1;
fjac[i][j][k][3][4] = c2;
fjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] )
* tmp2
- c1 * ( u[i][j][k][4] * tmp1 ) )
* ( u[i][j][k][3] * tmp1 );
fjac[i][j][k][4][1] = - c2 * ( u[i][j][k][1]*u[i][j][k][3] )
* tmp2;
fjac[i][j][k][4][2] = - c2 * ( u[i][j][k][2]*u[i][j][k][3] )
* tmp2;
fjac[i][j][k][4][3] = c1 * ( u[i][j][k][4] * tmp1 )
- 0.50 * c2
* ( ( u[i][j][k][1]*u[i][j][k][1]
+ u[i][j][k][2]*u[i][j][k][2]
+ 3.0*u[i][j][k][3]*u[i][j][k][3] )
* tmp2 );
fjac[i][j][k][4][4] = c1 * u[i][j][k][3] * tmp1;
njac[i][j][k][0][0] = 0.0;
njac[i][j][k][0][1] = 0.0;
njac[i][j][k][0][2] = 0.0;
njac[i][j][k][0][3] = 0.0;
njac[i][j][k][0][4] = 0.0;
njac[i][j][k][1][0] = - c3c4 * tmp2 * u[i][j][k][1];
njac[i][j][k][1][1] = c3c4 * tmp1;
njac[i][j][k][1][2] = 0.0;
njac[i][j][k][1][3] = 0.0;
njac[i][j][k][1][4] = 0.0;
njac[i][j][k][2][0] = - c3c4 * tmp2 * u[i][j][k][2];
njac[i][j][k][2][1] = 0.0;
njac[i][j][k][2][2] = c3c4 * tmp1;
njac[i][j][k][2][3] = 0.0;
njac[i][j][k][2][4] = 0.0;
njac[i][j][k][3][0] = - con43 * c3c4 * tmp2 * u[i][j][k][3];
njac[i][j][k][3][1] = 0.0;
njac[i][j][k][3][2] = 0.0;
njac[i][j][k][3][3] = con43 * c3 * c4 * tmp1;
njac[i][j][k][3][4] = 0.0;
njac[i][j][k][4][0] = - ( c3c4
- c1345 ) * tmp3 * (pow2(u[i][j][k][1]))
- ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][2]))
- ( con43 * c3c4
- c1345 ) * tmp3 * (pow2(u[i][j][k][3]))
- c1345 * tmp2 * u[i][j][k][4];
njac[i][j][k][4][1] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][1];
njac[i][j][k][4][2] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][2];
njac[i][j][k][4][3] = ( con43 * c3c4
- c1345 ) * tmp2 * u[i][j][k][3];
njac[i][j][k][4][4] = ( c1345 )* tmp1;
}
}
}
/*--------------------------------------------------------------------
c now jacobians set, so form left hand side in z direction
c-------------------------------------------------------------------*/
#pragma omp for private(j,k)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
tmp1 = dt * tz1;
tmp2 = dt * tz2;
lhs[i][j][k][AA][0][0] = - tmp2 * fjac[i][j][k-1][0][0]
- tmp1 * njac[i][j][k-1][0][0]
- tmp1 * dz1;
lhs[i][j][k][AA][0][1] = - tmp2 * fjac[i][j][k-1][0][1]
- tmp1 * njac[i][j][k-1][0][1];
lhs[i][j][k][AA][0][2] = - tmp2 * fjac[i][j][k-1][0][2]
- tmp1 * njac[i][j][k-1][0][2];
lhs[i][j][k][AA][0][3] = - tmp2 * fjac[i][j][k-1][0][3]
- tmp1 * njac[i][j][k-1][0][3];
lhs[i][j][k][AA][0][4] = - tmp2 * fjac[i][j][k-1][0][4]
- tmp1 * njac[i][j][k-1][0][4];
lhs[i][j][k][AA][1][0] = - tmp2 * fjac[i][j][k-1][1][0]
- tmp1 * njac[i][j][k-1][1][0];
lhs[i][j][k][AA][1][1] = - tmp2 * fjac[i][j][k-1][1][1]
- tmp1 * njac[i][j][k-1][1][1]
- tmp1 * dz2;
lhs[i][j][k][AA][1][2] = - tmp2 * fjac[i][j][k-1][1][2]
- tmp1 * njac[i][j][k-1][1][2];
lhs[i][j][k][AA][1][3] = - tmp2 * fjac[i][j][k-1][1][3]
- tmp1 * njac[i][j][k-1][1][3];
lhs[i][j][k][AA][1][4] = - tmp2 * fjac[i][j][k-1][1][4]
- tmp1 * njac[i][j][k-1][1][4];
lhs[i][j][k][AA][2][0] = - tmp2 * fjac[i][j][k-1][2][0]
- tmp1 * njac[i][j][k-1][2][0];
lhs[i][j][k][AA][2][1] = - tmp2 * fjac[i][j][k-1][2][1]
- tmp1 * njac[i][j][k-1][2][1];
lhs[i][j][k][AA][2][2] = - tmp2 * fjac[i][j][k-1][2][2]
- tmp1 * njac[i][j][k-1][2][2]
- tmp1 * dz3;
lhs[i][j][k][AA][2][3] = - tmp2 * fjac[i][j][k-1][2][3]
- tmp1 * njac[i][j][k-1][2][3];
lhs[i][j][k][AA][2][4] = - tmp2 * fjac[i][j][k-1][2][4]
- tmp1 * njac[i][j][k-1][2][4];
lhs[i][j][k][AA][3][0] = - tmp2 * fjac[i][j][k-1][3][0]
- tmp1 * njac[i][j][k-1][3][0];
lhs[i][j][k][AA][3][1] = - tmp2 * fjac[i][j][k-1][3][1]
- tmp1 * njac[i][j][k-1][3][1];
lhs[i][j][k][AA][3][2] = - tmp2 * fjac[i][j][k-1][3][2]
- tmp1 * njac[i][j][k-1][3][2];
lhs[i][j][k][AA][3][3] = - tmp2 * fjac[i][j][k-1][3][3]
- tmp1 * njac[i][j][k-1][3][3]
- tmp1 * dz4;
lhs[i][j][k][AA][3][4] = - tmp2 * fjac[i][j][k-1][3][4]
- tmp1 * njac[i][j][k-1][3][4];
lhs[i][j][k][AA][4][0] = - tmp2 * fjac[i][j][k-1][4][0]
- tmp1 * njac[i][j][k-1][4][0];
lhs[i][j][k][AA][4][1] = - tmp2 * fjac[i][j][k-1][4][1]
- tmp1 * njac[i][j][k-1][4][1];
lhs[i][j][k][AA][4][2] = - tmp2 * fjac[i][j][k-1][4][2]
- tmp1 * njac[i][j][k-1][4][2];
lhs[i][j][k][AA][4][3] = - tmp2 * fjac[i][j][k-1][4][3]
- tmp1 * njac[i][j][k-1][4][3];
lhs[i][j][k][AA][4][4] = - tmp2 * fjac[i][j][k-1][4][4]
- tmp1 * njac[i][j][k-1][4][4]
- tmp1 * dz5;
lhs[i][j][k][BB][0][0] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][0][0]
+ tmp1 * 2.0 * dz1;
lhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];
lhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];
lhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];
lhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];
lhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];
lhs[i][j][k][BB][1][1] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][1][1]
+ tmp1 * 2.0 * dz2;
lhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];
lhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];
lhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];
lhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];
lhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];
lhs[i][j][k][BB][2][2] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][2][2]
+ tmp1 * 2.0 * dz3;
lhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];
lhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];
lhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];
lhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];
lhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];
lhs[i][j][k][BB][3][3] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][3][3]
+ tmp1 * 2.0 * dz4;
lhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];
lhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];
lhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];
lhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];
lhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];
lhs[i][j][k][BB][4][4] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][4][4]
+ tmp1 * 2.0 * dz5;
lhs[i][j][k][CC][0][0] = tmp2 * fjac[i][j][k+1][0][0]
- tmp1 * njac[i][j][k+1][0][0]
- tmp1 * dz1;
lhs[i][j][k][CC][0][1] = tmp2 * fjac[i][j][k+1][0][1]
- tmp1 * njac[i][j][k+1][0][1];
lhs[i][j][k][CC][0][2] = tmp2 * fjac[i][j][k+1][0][2]
- tmp1 * njac[i][j][k+1][0][2];
lhs[i][j][k][CC][0][3] = tmp2 * fjac[i][j][k+1][0][3]
- tmp1 * njac[i][j][k+1][0][3];
lhs[i][j][k][CC][0][4] = tmp2 * fjac[i][j][k+1][0][4]
- tmp1 * njac[i][j][k+1][0][4];
lhs[i][j][k][CC][1][0] = tmp2 * fjac[i][j][k+1][1][0]
- tmp1 * njac[i][j][k+1][1][0];
lhs[i][j][k][CC][1][1] = tmp2 * fjac[i][j][k+1][1][1]
- tmp1 * njac[i][j][k+1][1][1]
- tmp1 * dz2;
lhs[i][j][k][CC][1][2] = tmp2 * fjac[i][j][k+1][1][2]
- tmp1 * njac[i][j][k+1][1][2];
lhs[i][j][k][CC][1][3] = tmp2 * fjac[i][j][k+1][1][3]
- tmp1 * njac[i][j][k+1][1][3];
lhs[i][j][k][CC][1][4] = tmp2 * fjac[i][j][k+1][1][4]
- tmp1 * njac[i][j][k+1][1][4];
lhs[i][j][k][CC][2][0] = tmp2 * fjac[i][j][k+1][2][0]
- tmp1 * njac[i][j][k+1][2][0];
lhs[i][j][k][CC][2][1] = tmp2 * fjac[i][j][k+1][2][1]
- tmp1 * njac[i][j][k+1][2][1];
lhs[i][j][k][CC][2][2] = tmp2 * fjac[i][j][k+1][2][2]
- tmp1 * njac[i][j][k+1][2][2]
- tmp1 * dz3;
lhs[i][j][k][CC][2][3] = tmp2 * fjac[i][j][k+1][2][3]
- tmp1 * njac[i][j][k+1][2][3];
lhs[i][j][k][CC][2][4] = tmp2 * fjac[i][j][k+1][2][4]
- tmp1 * njac[i][j][k+1][2][4];
lhs[i][j][k][CC][3][0] = tmp2 * fjac[i][j][k+1][3][0]
- tmp1 * njac[i][j][k+1][3][0];
lhs[i][j][k][CC][3][1] = tmp2 * fjac[i][j][k+1][3][1]
- tmp1 * njac[i][j][k+1][3][1];
lhs[i][j][k][CC][3][2] = tmp2 * fjac[i][j][k+1][3][2]
- tmp1 * njac[i][j][k+1][3][2];
lhs[i][j][k][CC][3][3] = tmp2 * fjac[i][j][k+1][3][3]
- tmp1 * njac[i][j][k+1][3][3]
- tmp1 * dz4;
lhs[i][j][k][CC][3][4] = tmp2 * fjac[i][j][k+1][3][4]
- tmp1 * njac[i][j][k+1][3][4];
lhs[i][j][k][CC][4][0] = tmp2 * fjac[i][j][k+1][4][0]
- tmp1 * njac[i][j][k+1][4][0];
lhs[i][j][k][CC][4][1] = tmp2 * fjac[i][j][k+1][4][1]
- tmp1 * njac[i][j][k+1][4][1];
lhs[i][j][k][CC][4][2] = tmp2 * fjac[i][j][k+1][4][2]
- tmp1 * njac[i][j][k+1][4][2];
lhs[i][j][k][CC][4][3] = tmp2 * fjac[i][j][k+1][4][3]
- tmp1 * njac[i][j][k+1][4][3];
lhs[i][j][k][CC][4][4] = tmp2 * fjac[i][j][k+1][4][4]
- tmp1 * njac[i][j][k+1][4][4]
- tmp1 * dz5;
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void compute_rhs(void) {
int i, j, k, m;
double rho_inv, uijk, up1, um1, vijk, vp1, vm1, wijk, wp1, wm1;
/*--------------------------------------------------------------------
c compute the reciprocal of density, and the kinetic energy,
c and the speed of sound.
c-------------------------------------------------------------------*/
#pragma omp for private(j,k) nowait
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
rho_inv = 1.0/u[i][j][k][0];
rho_i[i][j][k] = rho_inv;
us[i][j][k] = u[i][j][k][1] * rho_inv;
vs[i][j][k] = u[i][j][k][2] * rho_inv;
ws[i][j][k] = u[i][j][k][3] * rho_inv;
square[i][j][k] = 0.5 * (u[i][j][k][1]*u[i][j][k][1] +
u[i][j][k][2]*u[i][j][k][2] +
u[i][j][k][3]*u[i][j][k][3] ) * rho_inv;
qs[i][j][k] = square[i][j][k] * rho_inv;
}
}
}
/*--------------------------------------------------------------------
c copy the exact forcing term to the right hand side; because
c this forcing term is known, we can store it on the whole grid
c including the boundary
c-------------------------------------------------------------------*/
#pragma omp for private(j,k,m)
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = forcing[i][j][k][m];
}
}
}
}
/*--------------------------------------------------------------------
c compute xi-direction fluxes
c-------------------------------------------------------------------*/
#pragma omp for private(j,k)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
uijk = us[i][j][k];
up1 = us[i+1][j][k];
um1 = us[i-1][j][k];
rhs[i][j][k][0] = rhs[i][j][k][0] + dx1tx1 *
(u[i+1][j][k][0] - 2.0*u[i][j][k][0] +
u[i-1][j][k][0]) -
tx2 * (u[i+1][j][k][1] - u[i-1][j][k][1]);
rhs[i][j][k][1] = rhs[i][j][k][1] + dx2tx1 *
(u[i+1][j][k][1] - 2.0*u[i][j][k][1] +
u[i-1][j][k][1]) +
xxcon2*con43 * (up1 - 2.0*uijk + um1) -
tx2 * (u[i+1][j][k][1]*up1 -
u[i-1][j][k][1]*um1 +
(u[i+1][j][k][4]- square[i+1][j][k]-
u[i-1][j][k][4]+ square[i-1][j][k])*
c2);
rhs[i][j][k][2] = rhs[i][j][k][2] + dx3tx1 *
(u[i+1][j][k][2] - 2.0*u[i][j][k][2] +
u[i-1][j][k][2]) +
xxcon2 * (vs[i+1][j][k] - 2.0*vs[i][j][k] +
vs[i-1][j][k]) -
tx2 * (u[i+1][j][k][2]*up1 -
u[i-1][j][k][2]*um1);
rhs[i][j][k][3] = rhs[i][j][k][3] + dx4tx1 *
(u[i+1][j][k][3] - 2.0*u[i][j][k][3] +
u[i-1][j][k][3]) +
xxcon2 * (ws[i+1][j][k] - 2.0*ws[i][j][k] +
ws[i-1][j][k]) -
tx2 * (u[i+1][j][k][3]*up1 -
u[i-1][j][k][3]*um1);
rhs[i][j][k][4] = rhs[i][j][k][4] + dx5tx1 *
(u[i+1][j][k][4] - 2.0*u[i][j][k][4] +
u[i-1][j][k][4]) +
xxcon3 * (qs[i+1][j][k] - 2.0*qs[i][j][k] +
qs[i-1][j][k]) +
xxcon4 * (up1*up1 - 2.0*uijk*uijk +
um1*um1) +
xxcon5 * (u[i+1][j][k][4]*rho_i[i+1][j][k] -
2.0*u[i][j][k][4]*rho_i[i][j][k] +
u[i-1][j][k][4]*rho_i[i-1][j][k]) -
tx2 * ( (c1*u[i+1][j][k][4] -
c2*square[i+1][j][k])*up1 -
(c1*u[i-1][j][k][4] -
c2*square[i-1][j][k])*um1 );
}
}
}
/*--------------------------------------------------------------------
c add fourth order xi-direction dissipation
c-------------------------------------------------------------------*/
i = 1;
#pragma omp for private(k,m) nowait
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m]- dssp *
( 5.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] +
u[i+2][j][k][m]);
}
}
}
i = 2;
#pragma omp for private(k,m) nowait
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
(-4.0*u[i-1][j][k][m] + 6.0*u[i][j][k][m] -
4.0*u[i+1][j][k][m] + u[i+2][j][k][m]);
}
}
}
#pragma omp for private(j,k,m) nowait
for (i = 3; i < grid_points[0]-3; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i-2][j][k][m] - 4.0*u[i-1][j][k][m] +
6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] +
u[i+2][j][k][m] );
}
}
}
}
i = grid_points[0]-3;
#pragma omp for private(k,m) nowait
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i-2][j][k][m] - 4.0*u[i-1][j][k][m] +
6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] );
}
}
}
i = grid_points[0]-2;
#pragma omp for private(k,m)
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i-2][j][k][m] - 4.*u[i-1][j][k][m] +
5.0*u[i][j][k][m] );
}
}
}
/*--------------------------------------------------------------------
c compute eta-direction fluxes
c-------------------------------------------------------------------*/
#pragma omp for private(j,k)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
vijk = vs[i][j][k];
vp1 = vs[i][j+1][k];
vm1 = vs[i][j-1][k];
rhs[i][j][k][0] = rhs[i][j][k][0] + dy1ty1 *
(u[i][j+1][k][0] - 2.0*u[i][j][k][0] +
u[i][j-1][k][0]) -
ty2 * (u[i][j+1][k][2] - u[i][j-1][k][2]);
rhs[i][j][k][1] = rhs[i][j][k][1] + dy2ty1 *
(u[i][j+1][k][1] - 2.0*u[i][j][k][1] +
u[i][j-1][k][1]) +
yycon2 * (us[i][j+1][k] - 2.0*us[i][j][k] +
us[i][j-1][k]) -
ty2 * (u[i][j+1][k][1]*vp1 -
u[i][j-1][k][1]*vm1);
rhs[i][j][k][2] = rhs[i][j][k][2] + dy3ty1 *
(u[i][j+1][k][2] - 2.0*u[i][j][k][2] +
u[i][j-1][k][2]) +
yycon2*con43 * (vp1 - 2.0*vijk + vm1) -
ty2 * (u[i][j+1][k][2]*vp1 -
u[i][j-1][k][2]*vm1 +
(u[i][j+1][k][4] - square[i][j+1][k] -
u[i][j-1][k][4] + square[i][j-1][k])
*c2);
rhs[i][j][k][3] = rhs[i][j][k][3] + dy4ty1 *
(u[i][j+1][k][3] - 2.0*u[i][j][k][3] +
u[i][j-1][k][3]) +
yycon2 * (ws[i][j+1][k] - 2.0*ws[i][j][k] +
ws[i][j-1][k]) -
ty2 * (u[i][j+1][k][3]*vp1 -
u[i][j-1][k][3]*vm1);
rhs[i][j][k][4] = rhs[i][j][k][4] + dy5ty1 *
(u[i][j+1][k][4] - 2.0*u[i][j][k][4] +
u[i][j-1][k][4]) +
yycon3 * (qs[i][j+1][k] - 2.0*qs[i][j][k] +
qs[i][j-1][k]) +
yycon4 * (vp1*vp1 - 2.0*vijk*vijk +
vm1*vm1) +
yycon5 * (u[i][j+1][k][4]*rho_i[i][j+1][k] -
2.0*u[i][j][k][4]*rho_i[i][j][k] +
u[i][j-1][k][4]*rho_i[i][j-1][k]) -
ty2 * ((c1*u[i][j+1][k][4] -
c2*square[i][j+1][k]) * vp1 -
(c1*u[i][j-1][k][4] -
c2*square[i][j-1][k]) * vm1);
}
}
}
/*--------------------------------------------------------------------
c add fourth order eta-direction dissipation
c-------------------------------------------------------------------*/
j = 1;
#pragma omp for private(k,m) nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m]- dssp *
( 5.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] +
u[i][j+2][k][m]);
}
}
}
j = 2;
#pragma omp for private(k,m) nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
(-4.0*u[i][j-1][k][m] + 6.0*u[i][j][k][m] -
4.0*u[i][j+1][k][m] + u[i][j+2][k][m]);
}
}
}
#pragma omp for private(j,k,m) nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 3; j < grid_points[1]-3; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i][j-2][k][m] - 4.0*u[i][j-1][k][m] +
6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] +
u[i][j+2][k][m] );
}
}
}
}
j = grid_points[1]-3;
#pragma omp for private(k,m) nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i][j-2][k][m] - 4.0*u[i][j-1][k][m] +
6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] );
}
}
}
j = grid_points[1]-2;
#pragma omp for private(k,m)
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i][j-2][k][m] - 4.*u[i][j-1][k][m] +
5.*u[i][j][k][m] );
}
}
}
/*--------------------------------------------------------------------
c compute zeta-direction fluxes
c-------------------------------------------------------------------*/
#pragma omp for private(j,k)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
wijk = ws[i][j][k];
wp1 = ws[i][j][k+1];
wm1 = ws[i][j][k-1];
rhs[i][j][k][0] = rhs[i][j][k][0] + dz1tz1 *
(u[i][j][k+1][0] - 2.0*u[i][j][k][0] +
u[i][j][k-1][0]) -
tz2 * (u[i][j][k+1][3] - u[i][j][k-1][3]);
rhs[i][j][k][1] = rhs[i][j][k][1] + dz2tz1 *
(u[i][j][k+1][1] - 2.0*u[i][j][k][1] +
u[i][j][k-1][1]) +
zzcon2 * (us[i][j][k+1] - 2.0*us[i][j][k] +
us[i][j][k-1]) -
tz2 * (u[i][j][k+1][1]*wp1 -
u[i][j][k-1][1]*wm1);
rhs[i][j][k][2] = rhs[i][j][k][2] + dz3tz1 *
(u[i][j][k+1][2] - 2.0*u[i][j][k][2] +
u[i][j][k-1][2]) +
zzcon2 * (vs[i][j][k+1] - 2.0*vs[i][j][k] +
vs[i][j][k-1]) -
tz2 * (u[i][j][k+1][2]*wp1 -
u[i][j][k-1][2]*wm1);
rhs[i][j][k][3] = rhs[i][j][k][3] + dz4tz1 *
(u[i][j][k+1][3] - 2.0*u[i][j][k][3] +
u[i][j][k-1][3]) +
zzcon2*con43 * (wp1 - 2.0*wijk + wm1) -
tz2 * (u[i][j][k+1][3]*wp1 -
u[i][j][k-1][3]*wm1 +
(u[i][j][k+1][4] - square[i][j][k+1] -
u[i][j][k-1][4] + square[i][j][k-1])
*c2);
rhs[i][j][k][4] = rhs[i][j][k][4] + dz5tz1 *
(u[i][j][k+1][4] - 2.0*u[i][j][k][4] +
u[i][j][k-1][4]) +
zzcon3 * (qs[i][j][k+1] - 2.0*qs[i][j][k] +
qs[i][j][k-1]) +
zzcon4 * (wp1*wp1 - 2.0*wijk*wijk +
wm1*wm1) +
zzcon5 * (u[i][j][k+1][4]*rho_i[i][j][k+1] -
2.0*u[i][j][k][4]*rho_i[i][j][k] +
u[i][j][k-1][4]*rho_i[i][j][k-1]) -
tz2 * ( (c1*u[i][j][k+1][4] -
c2*square[i][j][k+1])*wp1 -
(c1*u[i][j][k-1][4] -
c2*square[i][j][k-1])*wm1);
}
}
}
/*--------------------------------------------------------------------
c add fourth order zeta-direction dissipation
c-------------------------------------------------------------------*/
k = 1;
#pragma omp for private(j,m) nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m]- dssp *
( 5.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] +
u[i][j][k+2][m]);
}
}
}
k = 2;
#pragma omp for private(j,m) nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
(-4.0*u[i][j][k-1][m] + 6.0*u[i][j][k][m] -
4.0*u[i][j][k+1][m] + u[i][j][k+2][m]);
}
}
}
#pragma omp for private(j,k,m) nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 3; k < grid_points[2]-3; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] +
6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] +
u[i][j][k+2][m] );
}
}
}
}
k = grid_points[2]-3;
#pragma omp for private(j,m) nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] +
6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] );
}
}
}
k = grid_points[2]-2;
#pragma omp for private(j,m)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] +
5.0*u[i][j][k][m] );
}
}
}
#pragma omp for private(k,m,i)
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
for (i = 1; i < grid_points[0]-1; i++) {
rhs[i][j][k][m] = rhs[i][j][k][m] * dt;
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void set_constants(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
ce[0][0] = 2.0;
ce[0][1] = 0.0;
ce[0][2] = 0.0;
ce[0][3] = 4.0;
ce[0][4] = 5.0;
ce[0][5] = 3.0;
ce[0][6] = 0.5;
ce[0][7] = 0.02;
ce[0][8] = 0.01;
ce[0][9] = 0.03;
ce[0][10] = 0.5;
ce[0][11] = 0.4;
ce[0][12] = 0.3;
ce[1][0] = 1.0;
ce[1][1] = 0.0;
ce[1][2] = 0.0;
ce[1][3] = 0.0;
ce[1][4] = 1.0;
ce[1][5] = 2.0;
ce[1][6] = 3.0;
ce[1][7] = 0.01;
ce[1][8] = 0.03;
ce[1][9] = 0.02;
ce[1][10] = 0.4;
ce[1][11] = 0.3;
ce[1][12] = 0.5;
ce[2][0] = 2.0;
ce[2][1] = 2.0;
ce[2][2] = 0.0;
ce[2][3] = 0.0;
ce[2][4] = 0.0;
ce[2][5] = 2.0;
ce[2][6] = 3.0;
ce[2][7] = 0.04;
ce[2][8] = 0.03;
ce[2][9] = 0.05;
ce[2][10] = 0.3;
ce[2][11] = 0.5;
ce[2][12] = 0.4;
ce[3][0] = 2.0;
ce[3][1] = 2.0;
ce[3][2] = 0.0;
ce[3][3] = 0.0;
ce[3][4] = 0.0;
ce[3][5] = 2.0;
ce[3][6] = 3.0;
ce[3][7] = 0.03;
ce[3][8] = 0.05;
ce[3][9] = 0.04;
ce[3][10] = 0.2;
ce[3][11] = 0.1;
ce[3][12] = 0.3;
ce[4][0] = 5.0;
ce[4][1] = 4.0;
ce[4][2] = 3.0;
ce[4][3] = 2.0;
ce[4][4] = 0.1;
ce[4][5] = 0.4;
ce[4][6] = 0.3;
ce[4][7] = 0.05;
ce[4][8] = 0.04;
ce[4][9] = 0.03;
ce[4][10] = 0.1;
ce[4][11] = 0.3;
ce[4][12] = 0.2;
c1 = 1.4;
c2 = 0.4;
c3 = 0.1;
c4 = 1.0;
c5 = 1.4;
dnxm1 = 1.0 / (double)(grid_points[0]-1);
dnym1 = 1.0 / (double)(grid_points[1]-1);
dnzm1 = 1.0 / (double)(grid_points[2]-1);
c1c2 = c1 * c2;
c1c5 = c1 * c5;
c3c4 = c3 * c4;
c1345 = c1c5 * c3c4;
conz1 = (1.0-c1c5);
tx1 = 1.0 / (dnxm1 * dnxm1);
tx2 = 1.0 / (2.0 * dnxm1);
tx3 = 1.0 / dnxm1;
ty1 = 1.0 / (dnym1 * dnym1);
ty2 = 1.0 / (2.0 * dnym1);
ty3 = 1.0 / dnym1;
tz1 = 1.0 / (dnzm1 * dnzm1);
tz2 = 1.0 / (2.0 * dnzm1);
tz3 = 1.0 / dnzm1;
dx1 = 0.75;
dx2 = 0.75;
dx3 = 0.75;
dx4 = 0.75;
dx5 = 0.75;
dy1 = 0.75;
dy2 = 0.75;
dy3 = 0.75;
dy4 = 0.75;
dy5 = 0.75;
dz1 = 1.0;
dz2 = 1.0;
dz3 = 1.0;
dz4 = 1.0;
dz5 = 1.0;
dxmax = max(dx3, dx4);
dymax = max(dy2, dy4);
dzmax = max(dz2, dz3);
dssp = 0.25 * max(dx1, max(dy1, dz1) );
c4dssp = 4.0 * dssp;
c5dssp = 5.0 * dssp;
dttx1 = dt*tx1;
dttx2 = dt*tx2;
dtty1 = dt*ty1;
dtty2 = dt*ty2;
dttz1 = dt*tz1;
dttz2 = dt*tz2;
c2dttx1 = 2.0*dttx1;
c2dtty1 = 2.0*dtty1;
c2dttz1 = 2.0*dttz1;
dtdssp = dt*dssp;
comz1 = dtdssp;
comz4 = 4.0*dtdssp;
comz5 = 5.0*dtdssp;
comz6 = 6.0*dtdssp;
c3c4tx3 = c3c4*tx3;
c3c4ty3 = c3c4*ty3;
c3c4tz3 = c3c4*tz3;
dx1tx1 = dx1*tx1;
dx2tx1 = dx2*tx1;
dx3tx1 = dx3*tx1;
dx4tx1 = dx4*tx1;
dx5tx1 = dx5*tx1;
dy1ty1 = dy1*ty1;
dy2ty1 = dy2*ty1;
dy3ty1 = dy3*ty1;
dy4ty1 = dy4*ty1;
dy5ty1 = dy5*ty1;
dz1tz1 = dz1*tz1;
dz2tz1 = dz2*tz1;
dz3tz1 = dz3*tz1;
dz4tz1 = dz4*tz1;
dz5tz1 = dz5*tz1;
c2iv = 2.5;
con43 = 4.0/3.0;
con16 = 1.0/6.0;
xxcon1 = c3c4tx3*con43*tx3;
xxcon2 = c3c4tx3*tx3;
xxcon3 = c3c4tx3*conz1*tx3;
xxcon4 = c3c4tx3*con16*tx3;
xxcon5 = c3c4tx3*c1c5*tx3;
yycon1 = c3c4ty3*con43*ty3;
yycon2 = c3c4ty3*ty3;
yycon3 = c3c4ty3*conz1*ty3;
yycon4 = c3c4ty3*con16*ty3;
yycon5 = c3c4ty3*c1c5*ty3;
zzcon1 = c3c4tz3*con43*tz3;
zzcon2 = c3c4tz3*tz3;
zzcon3 = c3c4tz3*conz1*tz3;
zzcon4 = c3c4tz3*con16*tz3;
zzcon5 = c3c4tz3*c1c5*tz3;
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void verify(int no_time_steps, char *cclass, boolean *verified) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c verification routine
c-------------------------------------------------------------------*/
double xcrref[5],xceref[5],xcrdif[5],xcedif[5],
epsilon, xce[5], xcr[5], dtref;
int m;
/*--------------------------------------------------------------------
c tolerance level
c-------------------------------------------------------------------*/
epsilon = 1.0e-08;
/*--------------------------------------------------------------------
c compute the error norm and the residual norm, and exit if not printing
c-------------------------------------------------------------------*/
error_norm(xce);
compute_rhs();
rhs_norm(xcr);
for (m = 0; m < 5; m++) {
xcr[m] = xcr[m] / dt;
}
*cclass = 'U';
*verified = TRUE;
for (m = 0; m < 5; m++) {
xcrref[m] = 1.0;
xceref[m] = 1.0;
}
/*--------------------------------------------------------------------
c reference data for 12X12X12 grids after 100 time steps, with DT = 1.0d-02
c-------------------------------------------------------------------*/
if (grid_points[0] == 12 &&
grid_points[1] == 12 &&
grid_points[2] == 12 &&
no_time_steps == 60) {
*cclass = 'S';
dtref = 1.0e-2;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
c-------------------------------------------------------------------*/
xcrref[0] = 1.7034283709541311e-01;
xcrref[1] = 1.2975252070034097e-02;
xcrref[2] = 3.2527926989486055e-02;
xcrref[3] = 2.6436421275166801e-02;
xcrref[4] = 1.9211784131744430e-01;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
c-------------------------------------------------------------------*/
xceref[0] = 4.9976913345811579e-04;
xceref[1] = 4.5195666782961927e-05;
xceref[2] = 7.3973765172921357e-05;
xceref[3] = 7.3821238632439731e-05;
xceref[4] = 8.9269630987491446e-04;
/*--------------------------------------------------------------------
c reference data for 24X24X24 grids after 200 time steps, with DT = 0.8d-3
c-------------------------------------------------------------------*/
} else if (grid_points[0] == 24 &&
grid_points[1] == 24 &&
grid_points[2] == 24 &&
no_time_steps == 200) {
*cclass = 'W';
dtref = 0.8e-3;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
c-------------------------------------------------------------------*/
xcrref[0] = 0.1125590409344e+03;
xcrref[1] = 0.1180007595731e+02;
xcrref[2] = 0.2710329767846e+02;
xcrref[3] = 0.2469174937669e+02;
xcrref[4] = 0.2638427874317e+03;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
c-------------------------------------------------------------------*/
xceref[0] = 0.4419655736008e+01;
xceref[1] = 0.4638531260002e+00;
xceref[2] = 0.1011551749967e+01;
xceref[3] = 0.9235878729944e+00;
xceref[4] = 0.1018045837718e+02;
/*--------------------------------------------------------------------
c reference data for 64X64X64 grids after 200 time steps, with DT = 0.8d-3
c-------------------------------------------------------------------*/
} else if (grid_points[0] == 64 &&
grid_points[1] == 64 &&
grid_points[2] == 64 &&
no_time_steps == 200) {
*cclass = 'A';
dtref = 0.8e-3;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
c-------------------------------------------------------------------*/
xcrref[0] = 1.0806346714637264e+02;
xcrref[1] = 1.1319730901220813e+01;
xcrref[2] = 2.5974354511582465e+01;
xcrref[3] = 2.3665622544678910e+01;
xcrref[4] = 2.5278963211748344e+02;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
c-------------------------------------------------------------------*/
xceref[0] = 4.2348416040525025e+00;
xceref[1] = 4.4390282496995698e-01;
xceref[2] = 9.6692480136345650e-01;
xceref[3] = 8.8302063039765474e-01;
xceref[4] = 9.7379901770829278e+00;
/*--------------------------------------------------------------------
c reference data for 102X102X102 grids after 200 time steps,
c with DT = 3.0d-04
c-------------------------------------------------------------------*/
} else if (grid_points[0] == 102 &&
grid_points[1] == 102 &&
grid_points[2] == 102 &&
no_time_steps == 200) {
*cclass = 'B';
dtref = 3.0e-4;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
c-------------------------------------------------------------------*/
xcrref[0] = 1.4233597229287254e+03;
xcrref[1] = 9.9330522590150238e+01;
xcrref[2] = 3.5646025644535285e+02;
xcrref[3] = 3.2485447959084092e+02;
xcrref[4] = 3.2707541254659363e+03;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
c-------------------------------------------------------------------*/
xceref[0] = 5.2969847140936856e+01;
xceref[1] = 4.4632896115670668e+00;
xceref[2] = 1.3122573342210174e+01;
xceref[3] = 1.2006925323559144e+01;
xceref[4] = 1.2459576151035986e+02;
/*--------------------------------------------------------------------
c reference data for 162X162X162 grids after 200 time steps,
c with DT = 1.0d-04
c-------------------------------------------------------------------*/
} else if (grid_points[0] == 162 &&
grid_points[1] == 162 &&
grid_points[2] == 162 &&
no_time_steps == 200) {
*cclass = 'C';
dtref = 1.0e-4;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
c-------------------------------------------------------------------*/
xcrref[0] = 0.62398116551764615e+04;
xcrref[1] = 0.50793239190423964e+03;
xcrref[2] = 0.15423530093013596e+04;
xcrref[3] = 0.13302387929291190e+04;
xcrref[4] = 0.11604087428436455e+05;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
c-------------------------------------------------------------------*/
xceref[0] = 0.16462008369091265e+03;
xceref[1] = 0.11497107903824313e+02;
xceref[2] = 0.41207446207461508e+02;
xceref[3] = 0.37087651059694167e+02;
xceref[4] = 0.36211053051841265e+03;
} else {
*verified = FALSE;
}
/*--------------------------------------------------------------------
c verification test for residuals if gridsize is either 12X12X12 or
c 64X64X64 or 102X102X102 or 162X162X162
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Compute the difference of solution values and the known reference values.
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
xcrdif[m] = fabs((xcr[m]-xcrref[m])/xcrref[m]);
xcedif[m] = fabs((xce[m]-xceref[m])/xceref[m]);
}
/*--------------------------------------------------------------------
c Output the comparison of computed results to known cases.
c-------------------------------------------------------------------*/
if (*cclass != 'U') {
printf(" Verification being performed for class %1c\n", *cclass);
printf(" accuracy setting for epsilon = %20.13e\n", epsilon);
if (fabs(dt-dtref) > epsilon) {
*verified = FALSE;
*cclass = 'U';
printf(" DT does not match the reference value of %15.8e\n", dtref);
}
} else {
printf(" Unknown class\n");
}
if (*cclass != 'U') {
printf(" Comparison of RMS-norms of residual\n");
} else {
printf(" RMS-norms of residual\n");
}
for (m = 0; m < 5; m++) {
if (*cclass == 'U') {
printf(" %2d%20.13e\n", m, xcr[m]);
} else if (xcrdif[m] > epsilon) {
*verified = FALSE;
printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n",
m, xcr[m], xcrref[m], xcrdif[m]);
} else {
printf(" %2d%20.13e%20.13e%20.13e\n",
m, xcr[m], xcrref[m], xcrdif[m]);
}
}
if (*cclass != 'U') {
printf(" Comparison of RMS-norms of solution error\n");
} else {
printf(" RMS-norms of solution error\n");
}
for (m = 0; m < 5; m++) {
if (*cclass == 'U') {
printf(" %2d%20.13e\n", m, xce[m]);
} else if (xcedif[m] > epsilon) {
*verified = FALSE;
printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n",
m, xce[m], xceref[m], xcedif[m]);
} else {
printf(" %2d%20.13e%20.13e%20.13e\n",
m, xce[m], xceref[m], xcedif[m]);
}
}
if (*cclass == 'U') {
printf(" No reference values provided\n");
printf(" No verification performed\n");
} else if (*verified == TRUE) {
printf(" Verification Successful\n");
} else {
printf(" Verification failed\n");
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void x_solve(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c
c Performs line solves in X direction by first factoring
c the block-tridiagonal matrix into an upper triangular matrix,
c and then performing back substitution to solve for the unknow
c vectors of each line.
c
c Make sure we treat elements zero to cell_size in the direction
c of the sweep.
c
c-------------------------------------------------------------------*/
lhsx();
x_solve_cell();
x_backsubstitute();
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void x_backsubstitute(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c back solve: if last cell, then generate U(isize)=rhs[isize)
c else assume U(isize) is loaded in un pack backsub_info
c so just use it
c after call u(istart) will be sent to next cell
c-------------------------------------------------------------------*/
int i, j, k, m, n;
for (i = grid_points[0]-2; i >= 0; i--) {
#pragma omp for private(k,m,n)
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < BLOCK_SIZE; m++) {
for (n = 0; n < BLOCK_SIZE; n++) {
rhs[i][j][k][m] = rhs[i][j][k][m]
- lhs[i][j][k][CC][m][n]*rhs[i+1][j][k][n];
}
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void x_solve_cell(void) {
/*--------------------------------------------------------------------
c performs guaussian elimination on this cell.
c
c assumes that unpacking routines for non-first cells
c preload C' and rhs' from previous cell.
c
c assumed send happens outside this routine, but that
c c'(IMAX) and rhs'(IMAX) will be sent to next cell
c-------------------------------------------------------------------*/
int i,j,k,isize;
isize = grid_points[0]-1;
/*--------------------------------------------------------------------
c outer most do loops - sweeping in i direction
c-------------------------------------------------------------------*/
#pragma omp for private(k)
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
/*--------------------------------------------------------------------
c multiply c(0,j,k) by b_inverse and copy back to c
c multiply rhs(0) by b_inverse(0) and copy to rhs
c-------------------------------------------------------------------*/
binvcrhs( lhs[0][j][k][BB],
lhs[0][j][k][CC],
rhs[0][j][k] );
}
}
/*--------------------------------------------------------------------
c begin inner most do loop
c do all the elements of the cell unless last
c-------------------------------------------------------------------*/
for (i = 1; i < isize; i++) {
#pragma omp for private(k)
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
/*--------------------------------------------------------------------
c rhs(i) = rhs(i) - A*rhs(i-1)
c-------------------------------------------------------------------*/
matvec_sub(lhs[i][j][k][AA],
rhs[i-1][j][k], rhs[i][j][k]);
/*--------------------------------------------------------------------
c B(i) = B(i) - C(i-1)*A(i)
c-------------------------------------------------------------------*/
matmul_sub(lhs[i][j][k][AA],
lhs[i-1][j][k][CC],
lhs[i][j][k][BB]);
/*--------------------------------------------------------------------
c multiply c(i,j,k) by b_inverse and copy back to c
c multiply rhs(1,j,k) by b_inverse(1,j,k) and copy to rhs
c-------------------------------------------------------------------*/
binvcrhs( lhs[i][j][k][BB],
lhs[i][j][k][CC],
rhs[i][j][k] );
}
}
}
#pragma omp for private(k)
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
/*--------------------------------------------------------------------
c rhs(isize) = rhs(isize) - A*rhs(isize-1)
c-------------------------------------------------------------------*/
matvec_sub(lhs[isize][j][k][AA],
rhs[isize-1][j][k], rhs[isize][j][k]);
/*--------------------------------------------------------------------
c B(isize) = B(isize) - C(isize-1)*A(isize)
c-------------------------------------------------------------------*/
matmul_sub(lhs[isize][j][k][AA],
lhs[isize-1][j][k][CC],
lhs[isize][j][k][BB]);
/*--------------------------------------------------------------------
c multiply rhs() by b_inverse() and copy to rhs
c-------------------------------------------------------------------*/
binvrhs( lhs[i][j][k][BB],
rhs[i][j][k] );
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void matvec_sub(double ablock[5][5], double avec[5], double bvec[5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c subtracts bvec=bvec - ablock*avec
c-------------------------------------------------------------------*/
int i;
for (i = 0; i < 5; i++) {
/*--------------------------------------------------------------------
c rhs(i,ic,jc,kc,ccell) = rhs(i,ic,jc,kc,ccell)
c $ - lhs[i,1,ablock,ia,ja,ka,acell)*
c-------------------------------------------------------------------*/
bvec[i] = bvec[i] - ablock[i][0]*avec[0]
- ablock[i][1]*avec[1]
- ablock[i][2]*avec[2]
- ablock[i][3]*avec[3]
- ablock[i][4]*avec[4];
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void matmul_sub(double ablock[5][5], double bblock[5][5],
double cblock[5][5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c subtracts a(i,j,k) X b(i,j,k) from c(i,j,k)
c-------------------------------------------------------------------*/
int j;
for (j = 0; j < 5; j++) {
cblock[0][j] = cblock[0][j] - ablock[0][0]*bblock[0][j]
- ablock[0][1]*bblock[1][j]
- ablock[0][2]*bblock[2][j]
- ablock[0][3]*bblock[3][j]
- ablock[0][4]*bblock[4][j];
cblock[1][j] = cblock[1][j] - ablock[1][0]*bblock[0][j]
- ablock[1][1]*bblock[1][j]
- ablock[1][2]*bblock[2][j]
- ablock[1][3]*bblock[3][j]
- ablock[1][4]*bblock[4][j];
cblock[2][j] = cblock[2][j] - ablock[2][0]*bblock[0][j]
- ablock[2][1]*bblock[1][j]
- ablock[2][2]*bblock[2][j]
- ablock[2][3]*bblock[3][j]
- ablock[2][4]*bblock[4][j];
cblock[3][j] = cblock[3][j] - ablock[3][0]*bblock[0][j]
- ablock[3][1]*bblock[1][j]
- ablock[3][2]*bblock[2][j]
- ablock[3][3]*bblock[3][j]
- ablock[3][4]*bblock[4][j];
cblock[4][j] = cblock[4][j] - ablock[4][0]*bblock[0][j]
- ablock[4][1]*bblock[1][j]
- ablock[4][2]*bblock[2][j]
- ablock[4][3]*bblock[3][j]
- ablock[4][4]*bblock[4][j];
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void binvcrhs(double lhs[5][5], double c[5][5], double r[5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
double pivot, coeff;
/*--------------------------------------------------------------------
c
c-------------------------------------------------------------------*/
pivot = 1.00/lhs[0][0];
lhs[0][1] = lhs[0][1]*pivot;
lhs[0][2] = lhs[0][2]*pivot;
lhs[0][3] = lhs[0][3]*pivot;
lhs[0][4] = lhs[0][4]*pivot;
c[0][0] = c[0][0]*pivot;
c[0][1] = c[0][1]*pivot;
c[0][2] = c[0][2]*pivot;
c[0][3] = c[0][3]*pivot;
c[0][4] = c[0][4]*pivot;
r[0] = r[0] *pivot;
coeff = lhs[1][0];
lhs[1][1]= lhs[1][1] - coeff*lhs[0][1];
lhs[1][2]= lhs[1][2] - coeff*lhs[0][2];
lhs[1][3]= lhs[1][3] - coeff*lhs[0][3];
lhs[1][4]= lhs[1][4] - coeff*lhs[0][4];
c[1][0] = c[1][0] - coeff*c[0][0];
c[1][1] = c[1][1] - coeff*c[0][1];
c[1][2] = c[1][2] - coeff*c[0][2];
c[1][3] = c[1][3] - coeff*c[0][3];
c[1][4] = c[1][4] - coeff*c[0][4];
r[1] = r[1] - coeff*r[0];
coeff = lhs[2][0];
lhs[2][1]= lhs[2][1] - coeff*lhs[0][1];
lhs[2][2]= lhs[2][2] - coeff*lhs[0][2];
lhs[2][3]= lhs[2][3] - coeff*lhs[0][3];
lhs[2][4]= lhs[2][4] - coeff*lhs[0][4];
c[2][0] = c[2][0] - coeff*c[0][0];
c[2][1] = c[2][1] - coeff*c[0][1];
c[2][2] = c[2][2] - coeff*c[0][2];
c[2][3] = c[2][3] - coeff*c[0][3];
c[2][4] = c[2][4] - coeff*c[0][4];
r[2] = r[2] - coeff*r[0];
coeff = lhs[3][0];
lhs[3][1]= lhs[3][1] - coeff*lhs[0][1];
lhs[3][2]= lhs[3][2] - coeff*lhs[0][2];
lhs[3][3]= lhs[3][3] - coeff*lhs[0][3];
lhs[3][4]= lhs[3][4] - coeff*lhs[0][4];
c[3][0] = c[3][0] - coeff*c[0][0];
c[3][1] = c[3][1] - coeff*c[0][1];
c[3][2] = c[3][2] - coeff*c[0][2];
c[3][3] = c[3][3] - coeff*c[0][3];
c[3][4] = c[3][4] - coeff*c[0][4];
r[3] = r[3] - coeff*r[0];
coeff = lhs[4][0];
lhs[4][1]= lhs[4][1] - coeff*lhs[0][1];
lhs[4][2]= lhs[4][2] - coeff*lhs[0][2];
lhs[4][3]= lhs[4][3] - coeff*lhs[0][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[0][4];
c[4][0] = c[4][0] - coeff*c[0][0];
c[4][1] = c[4][1] - coeff*c[0][1];
c[4][2] = c[4][2] - coeff*c[0][2];
c[4][3] = c[4][3] - coeff*c[0][3];
c[4][4] = c[4][4] - coeff*c[0][4];
r[4] = r[4] - coeff*r[0];
pivot = 1.00/lhs[1][1];
lhs[1][2] = lhs[1][2]*pivot;
lhs[1][3] = lhs[1][3]*pivot;
lhs[1][4] = lhs[1][4]*pivot;
c[1][0] = c[1][0]*pivot;
c[1][1] = c[1][1]*pivot;
c[1][2] = c[1][2]*pivot;
c[1][3] = c[1][3]*pivot;
c[1][4] = c[1][4]*pivot;
r[1] = r[1] *pivot;
coeff = lhs[0][1];
lhs[0][2]= lhs[0][2] - coeff*lhs[1][2];
lhs[0][3]= lhs[0][3] - coeff*lhs[1][3];
lhs[0][4]= lhs[0][4] - coeff*lhs[1][4];
c[0][0] = c[0][0] - coeff*c[1][0];
c[0][1] = c[0][1] - coeff*c[1][1];
c[0][2] = c[0][2] - coeff*c[1][2];
c[0][3] = c[0][3] - coeff*c[1][3];
c[0][4] = c[0][4] - coeff*c[1][4];
r[0] = r[0] - coeff*r[1];
coeff = lhs[2][1];
lhs[2][2]= lhs[2][2] - coeff*lhs[1][2];
lhs[2][3]= lhs[2][3] - coeff*lhs[1][3];
lhs[2][4]= lhs[2][4] - coeff*lhs[1][4];
c[2][0] = c[2][0] - coeff*c[1][0];
c[2][1] = c[2][1] - coeff*c[1][1];
c[2][2] = c[2][2] - coeff*c[1][2];
c[2][3] = c[2][3] - coeff*c[1][3];
c[2][4] = c[2][4] - coeff*c[1][4];
r[2] = r[2] - coeff*r[1];
coeff = lhs[3][1];
lhs[3][2]= lhs[3][2] - coeff*lhs[1][2];
lhs[3][3]= lhs[3][3] - coeff*lhs[1][3];
lhs[3][4]= lhs[3][4] - coeff*lhs[1][4];
c[3][0] = c[3][0] - coeff*c[1][0];
c[3][1] = c[3][1] - coeff*c[1][1];
c[3][2] = c[3][2] - coeff*c[1][2];
c[3][3] = c[3][3] - coeff*c[1][3];
c[3][4] = c[3][4] - coeff*c[1][4];
r[3] = r[3] - coeff*r[1];
coeff = lhs[4][1];
lhs[4][2]= lhs[4][2] - coeff*lhs[1][2];
lhs[4][3]= lhs[4][3] - coeff*lhs[1][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[1][4];
c[4][0] = c[4][0] - coeff*c[1][0];
c[4][1] = c[4][1] - coeff*c[1][1];
c[4][2] = c[4][2] - coeff*c[1][2];
c[4][3] = c[4][3] - coeff*c[1][3];
c[4][4] = c[4][4] - coeff*c[1][4];
r[4] = r[4] - coeff*r[1];
pivot = 1.00/lhs[2][2];
lhs[2][3] = lhs[2][3]*pivot;
lhs[2][4] = lhs[2][4]*pivot;
c[2][0] = c[2][0]*pivot;
c[2][1] = c[2][1]*pivot;
c[2][2] = c[2][2]*pivot;
c[2][3] = c[2][3]*pivot;
c[2][4] = c[2][4]*pivot;
r[2] = r[2] *pivot;
coeff = lhs[0][2];
lhs[0][3]= lhs[0][3] - coeff*lhs[2][3];
lhs[0][4]= lhs[0][4] - coeff*lhs[2][4];
c[0][0] = c[0][0] - coeff*c[2][0];
c[0][1] = c[0][1] - coeff*c[2][1];
c[0][2] = c[0][2] - coeff*c[2][2];
c[0][3] = c[0][3] - coeff*c[2][3];
c[0][4] = c[0][4] - coeff*c[2][4];
r[0] = r[0] - coeff*r[2];
coeff = lhs[1][2];
lhs[1][3]= lhs[1][3] - coeff*lhs[2][3];
lhs[1][4]= lhs[1][4] - coeff*lhs[2][4];
c[1][0] = c[1][0] - coeff*c[2][0];
c[1][1] = c[1][1] - coeff*c[2][1];
c[1][2] = c[1][2] - coeff*c[2][2];
c[1][3] = c[1][3] - coeff*c[2][3];
c[1][4] = c[1][4] - coeff*c[2][4];
r[1] = r[1] - coeff*r[2];
coeff = lhs[3][2];
lhs[3][3]= lhs[3][3] - coeff*lhs[2][3];
lhs[3][4]= lhs[3][4] - coeff*lhs[2][4];
c[3][0] = c[3][0] - coeff*c[2][0];
c[3][1] = c[3][1] - coeff*c[2][1];
c[3][2] = c[3][2] - coeff*c[2][2];
c[3][3] = c[3][3] - coeff*c[2][3];
c[3][4] = c[3][4] - coeff*c[2][4];
r[3] = r[3] - coeff*r[2];
coeff = lhs[4][2];
lhs[4][3]= lhs[4][3] - coeff*lhs[2][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[2][4];
c[4][0] = c[4][0] - coeff*c[2][0];
c[4][1] = c[4][1] - coeff*c[2][1];
c[4][2] = c[4][2] - coeff*c[2][2];
c[4][3] = c[4][3] - coeff*c[2][3];
c[4][4] = c[4][4] - coeff*c[2][4];
r[4] = r[4] - coeff*r[2];
pivot = 1.00/lhs[3][3];
lhs[3][4] = lhs[3][4]*pivot;
c[3][0] = c[3][0]*pivot;
c[3][1] = c[3][1]*pivot;
c[3][2] = c[3][2]*pivot;
c[3][3] = c[3][3]*pivot;
c[3][4] = c[3][4]*pivot;
r[3] = r[3] *pivot;
coeff = lhs[0][3];
lhs[0][4]= lhs[0][4] - coeff*lhs[3][4];
c[0][0] = c[0][0] - coeff*c[3][0];
c[0][1] = c[0][1] - coeff*c[3][1];
c[0][2] = c[0][2] - coeff*c[3][2];
c[0][3] = c[0][3] - coeff*c[3][3];
c[0][4] = c[0][4] - coeff*c[3][4];
r[0] = r[0] - coeff*r[3];
coeff = lhs[1][3];
lhs[1][4]= lhs[1][4] - coeff*lhs[3][4];
c[1][0] = c[1][0] - coeff*c[3][0];
c[1][1] = c[1][1] - coeff*c[3][1];
c[1][2] = c[1][2] - coeff*c[3][2];
c[1][3] = c[1][3] - coeff*c[3][3];
c[1][4] = c[1][4] - coeff*c[3][4];
r[1] = r[1] - coeff*r[3];
coeff = lhs[2][3];
lhs[2][4]= lhs[2][4] - coeff*lhs[3][4];
c[2][0] = c[2][0] - coeff*c[3][0];
c[2][1] = c[2][1] - coeff*c[3][1];
c[2][2] = c[2][2] - coeff*c[3][2];
c[2][3] = c[2][3] - coeff*c[3][3];
c[2][4] = c[2][4] - coeff*c[3][4];
r[2] = r[2] - coeff*r[3];
coeff = lhs[4][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[3][4];
c[4][0] = c[4][0] - coeff*c[3][0];
c[4][1] = c[4][1] - coeff*c[3][1];
c[4][2] = c[4][2] - coeff*c[3][2];
c[4][3] = c[4][3] - coeff*c[3][3];
c[4][4] = c[4][4] - coeff*c[3][4];
r[4] = r[4] - coeff*r[3];
pivot = 1.00/lhs[4][4];
c[4][0] = c[4][0]*pivot;
c[4][1] = c[4][1]*pivot;
c[4][2] = c[4][2]*pivot;
c[4][3] = c[4][3]*pivot;
c[4][4] = c[4][4]*pivot;
r[4] = r[4] *pivot;
coeff = lhs[0][4];
c[0][0] = c[0][0] - coeff*c[4][0];
c[0][1] = c[0][1] - coeff*c[4][1];
c[0][2] = c[0][2] - coeff*c[4][2];
c[0][3] = c[0][3] - coeff*c[4][3];
c[0][4] = c[0][4] - coeff*c[4][4];
r[0] = r[0] - coeff*r[4];
coeff = lhs[1][4];
c[1][0] = c[1][0] - coeff*c[4][0];
c[1][1] = c[1][1] - coeff*c[4][1];
c[1][2] = c[1][2] - coeff*c[4][2];
c[1][3] = c[1][3] - coeff*c[4][3];
c[1][4] = c[1][4] - coeff*c[4][4];
r[1] = r[1] - coeff*r[4];
coeff = lhs[2][4];
c[2][0] = c[2][0] - coeff*c[4][0];
c[2][1] = c[2][1] - coeff*c[4][1];
c[2][2] = c[2][2] - coeff*c[4][2];
c[2][3] = c[2][3] - coeff*c[4][3];
c[2][4] = c[2][4] - coeff*c[4][4];
r[2] = r[2] - coeff*r[4];
coeff = lhs[3][4];
c[3][0] = c[3][0] - coeff*c[4][0];
c[3][1] = c[3][1] - coeff*c[4][1];
c[3][2] = c[3][2] - coeff*c[4][2];
c[3][3] = c[3][3] - coeff*c[4][3];
c[3][4] = c[3][4] - coeff*c[4][4];
r[3] = r[3] - coeff*r[4];
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void binvrhs( double lhs[5][5], double r[5] ) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
double pivot, coeff;
/*--------------------------------------------------------------------
c
c-------------------------------------------------------------------*/
pivot = 1.00/lhs[0][0];
lhs[0][1] = lhs[0][1]*pivot;
lhs[0][2] = lhs[0][2]*pivot;
lhs[0][3] = lhs[0][3]*pivot;
lhs[0][4] = lhs[0][4]*pivot;
r[0] = r[0] *pivot;
coeff = lhs[1][0];
lhs[1][1]= lhs[1][1] - coeff*lhs[0][1];
lhs[1][2]= lhs[1][2] - coeff*lhs[0][2];
lhs[1][3]= lhs[1][3] - coeff*lhs[0][3];
lhs[1][4]= lhs[1][4] - coeff*lhs[0][4];
r[1] = r[1] - coeff*r[0];
coeff = lhs[2][0];
lhs[2][1]= lhs[2][1] - coeff*lhs[0][1];
lhs[2][2]= lhs[2][2] - coeff*lhs[0][2];
lhs[2][3]= lhs[2][3] - coeff*lhs[0][3];
lhs[2][4]= lhs[2][4] - coeff*lhs[0][4];
r[2] = r[2] - coeff*r[0];
coeff = lhs[3][0];
lhs[3][1]= lhs[3][1] - coeff*lhs[0][1];
lhs[3][2]= lhs[3][2] - coeff*lhs[0][2];
lhs[3][3]= lhs[3][3] - coeff*lhs[0][3];
lhs[3][4]= lhs[3][4] - coeff*lhs[0][4];
r[3] = r[3] - coeff*r[0];
coeff = lhs[4][0];
lhs[4][1]= lhs[4][1] - coeff*lhs[0][1];
lhs[4][2]= lhs[4][2] - coeff*lhs[0][2];
lhs[4][3]= lhs[4][3] - coeff*lhs[0][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[0][4];
r[4] = r[4] - coeff*r[0];
pivot = 1.00/lhs[1][1];
lhs[1][2] = lhs[1][2]*pivot;
lhs[1][3] = lhs[1][3]*pivot;
lhs[1][4] = lhs[1][4]*pivot;
r[1] = r[1] *pivot;
coeff = lhs[0][1];
lhs[0][2]= lhs[0][2] - coeff*lhs[1][2];
lhs[0][3]= lhs[0][3] - coeff*lhs[1][3];
lhs[0][4]= lhs[0][4] - coeff*lhs[1][4];
r[0] = r[0] - coeff*r[1];
coeff = lhs[2][1];
lhs[2][2]= lhs[2][2] - coeff*lhs[1][2];
lhs[2][3]= lhs[2][3] - coeff*lhs[1][3];
lhs[2][4]= lhs[2][4] - coeff*lhs[1][4];
r[2] = r[2] - coeff*r[1];
coeff = lhs[3][1];
lhs[3][2]= lhs[3][2] - coeff*lhs[1][2];
lhs[3][3]= lhs[3][3] - coeff*lhs[1][3];
lhs[3][4]= lhs[3][4] - coeff*lhs[1][4];
r[3] = r[3] - coeff*r[1];
coeff = lhs[4][1];
lhs[4][2]= lhs[4][2] - coeff*lhs[1][2];
lhs[4][3]= lhs[4][3] - coeff*lhs[1][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[1][4];
r[4] = r[4] - coeff*r[1];
pivot = 1.00/lhs[2][2];
lhs[2][3] = lhs[2][3]*pivot;
lhs[2][4] = lhs[2][4]*pivot;
r[2] = r[2] *pivot;
coeff = lhs[0][2];
lhs[0][3]= lhs[0][3] - coeff*lhs[2][3];
lhs[0][4]= lhs[0][4] - coeff*lhs[2][4];
r[0] = r[0] - coeff*r[2];
coeff = lhs[1][2];
lhs[1][3]= lhs[1][3] - coeff*lhs[2][3];
lhs[1][4]= lhs[1][4] - coeff*lhs[2][4];
r[1] = r[1] - coeff*r[2];
coeff = lhs[3][2];
lhs[3][3]= lhs[3][3] - coeff*lhs[2][3];
lhs[3][4]= lhs[3][4] - coeff*lhs[2][4];
r[3] = r[3] - coeff*r[2];
coeff = lhs[4][2];
lhs[4][3]= lhs[4][3] - coeff*lhs[2][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[2][4];
r[4] = r[4] - coeff*r[2];
pivot = 1.00/lhs[3][3];
lhs[3][4] = lhs[3][4]*pivot;
r[3] = r[3] *pivot;
coeff = lhs[0][3];
lhs[0][4]= lhs[0][4] - coeff*lhs[3][4];
r[0] = r[0] - coeff*r[3];
coeff = lhs[1][3];
lhs[1][4]= lhs[1][4] - coeff*lhs[3][4];
r[1] = r[1] - coeff*r[3];
coeff = lhs[2][3];
lhs[2][4]= lhs[2][4] - coeff*lhs[3][4];
r[2] = r[2] - coeff*r[3];
coeff = lhs[4][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[3][4];
r[4] = r[4] - coeff*r[3];
pivot = 1.00/lhs[4][4];
r[4] = r[4] *pivot;
coeff = lhs[0][4];
r[0] = r[0] - coeff*r[4];
coeff = lhs[1][4];
r[1] = r[1] - coeff*r[4];
coeff = lhs[2][4];
r[2] = r[2] - coeff*r[4];
coeff = lhs[3][4];
r[3] = r[3] - coeff*r[4];
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void y_solve(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Performs line solves in Y direction by first factoring
c the block-tridiagonal matrix into an upper triangular matrix][
c and then performing back substitution to solve for the unknow
c vectors of each line.
c
c Make sure we treat elements zero to cell_size in the direction
c of the sweep.
c-------------------------------------------------------------------*/
lhsy();
y_solve_cell();
y_backsubstitute();
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void y_backsubstitute(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c back solve: if last cell][ then generate U(jsize)=rhs(jsize)
c else assume U(jsize) is loaded in un pack backsub_info
c so just use it
c after call u(jstart) will be sent to next cell
c-------------------------------------------------------------------*/
int i, j, k, m, n;
for (j = grid_points[1]-2; j >= 0; j--) {
#pragma omp for private(k,m,n)
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < BLOCK_SIZE; m++) {
for (n = 0; n < BLOCK_SIZE; n++) {
rhs[i][j][k][m] = rhs[i][j][k][m]
- lhs[i][j][k][CC][m][n]*rhs[i][j+1][k][n];
}
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void y_solve_cell(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c performs guaussian elimination on this cell.
c
c assumes that unpacking routines for non-first cells
c preload C' and rhs' from previous cell.
c
c assumed send happens outside this routine, but that
c c'(JMAX) and rhs'(JMAX) will be sent to next cell
c-------------------------------------------------------------------*/
int i, j, k, jsize;
jsize = grid_points[1]-1;
#pragma omp for private(k)
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
/*--------------------------------------------------------------------
c multiply c(i,0,k) by b_inverse and copy back to c
c multiply rhs(0) by b_inverse(0) and copy to rhs
c-------------------------------------------------------------------*/
binvcrhs( lhs[i][0][k][BB],
lhs[i][0][k][CC],
rhs[i][0][k] );
}
}
/*--------------------------------------------------------------------
c begin inner most do loop
c do all the elements of the cell unless last
c-------------------------------------------------------------------*/
for (j = 1; j < jsize; j++) {
#pragma omp for private(k)
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
/*--------------------------------------------------------------------
c subtract A*lhs_vector(j-1) from lhs_vector(j)
c
c rhs(j) = rhs(j) - A*rhs(j-1)
c-------------------------------------------------------------------*/
matvec_sub(lhs[i][j][k][AA],
rhs[i][j-1][k], rhs[i][j][k]);
/*--------------------------------------------------------------------
c B(j) = B(j) - C(j-1)*A(j)
c-------------------------------------------------------------------*/
matmul_sub(lhs[i][j][k][AA],
lhs[i][j-1][k][CC],
lhs[i][j][k][BB]);
/*--------------------------------------------------------------------
c multiply c(i,j,k) by b_inverse and copy back to c
c multiply rhs(i,1,k) by b_inverse(i,1,k) and copy to rhs
c-------------------------------------------------------------------*/
binvcrhs( lhs[i][j][k][BB],
lhs[i][j][k][CC],
rhs[i][j][k] );
}
}
}
#pragma omp for private(k)
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
/*--------------------------------------------------------------------
c rhs(jsize) = rhs(jsize) - A*rhs(jsize-1)
c-------------------------------------------------------------------*/
matvec_sub(lhs[i][jsize][k][AA],
rhs[i][jsize-1][k], rhs[i][jsize][k]);
/*--------------------------------------------------------------------
c B(jsize) = B(jsize) - C(jsize-1)*A(jsize)
c call matmul_sub(aa,i,jsize,k,c,
c $ cc,i,jsize-1,k,c,BB,i,jsize,k)
c-------------------------------------------------------------------*/
matmul_sub(lhs[i][jsize][k][AA],
lhs[i][jsize-1][k][CC],
lhs[i][jsize][k][BB]);
/*--------------------------------------------------------------------
c multiply rhs(jsize) by b_inverse(jsize) and copy to rhs
c-------------------------------------------------------------------*/
binvrhs( lhs[i][jsize][k][BB],
rhs[i][jsize][k] );
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void z_solve(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Performs line solves in Z direction by first factoring
c the block-tridiagonal matrix into an upper triangular matrix,
c and then performing back substitution to solve for the unknow
c vectors of each line.
c
c Make sure we treat elements zero to cell_size in the direction
c of the sweep.
c-------------------------------------------------------------------*/
lhsz();
z_solve_cell();
z_backsubstitute();
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void z_backsubstitute(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c back solve: if last cell, then generate U(ksize)=rhs(ksize)
c else assume U(ksize) is loaded in un pack backsub_info
c so just use it
c after call u(kstart) will be sent to next cell
c-------------------------------------------------------------------*/
int i, j, k, m, n;
#pragma omp for private(j,k,m,n)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = grid_points[2]-2; k >= 0; k--) {
for (m = 0; m < BLOCK_SIZE; m++) {
for (n = 0; n < BLOCK_SIZE; n++) {
rhs[i][j][k][m] = rhs[i][j][k][m]
- lhs[i][j][k][CC][m][n]*rhs[i][j][k+1][n];
}
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void z_solve_cell(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c performs guaussian elimination on this cell.
c
c assumes that unpacking routines for non-first cells
c preload C' and rhs' from previous cell.
c
c assumed send happens outside this routine, but that
c c'(KMAX) and rhs'(KMAX) will be sent to next cell.
c-------------------------------------------------------------------*/
int i,j,k,ksize;
ksize = grid_points[2]-1;
/*--------------------------------------------------------------------
c outer most do loops - sweeping in i direction
c-------------------------------------------------------------------*/
#pragma omp for private(j)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
/*--------------------------------------------------------------------
c multiply c(i,j,0) by b_inverse and copy back to c
c multiply rhs(0) by b_inverse(0) and copy to rhs
c-------------------------------------------------------------------*/
binvcrhs( lhs[i][j][0][BB],
lhs[i][j][0][CC],
rhs[i][j][0] );
}
}
/*--------------------------------------------------------------------
c begin inner most do loop
c do all the elements of the cell unless last
c-------------------------------------------------------------------*/
for (k = 1; k < ksize; k++) {
#pragma omp for private(j)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
/*--------------------------------------------------------------------
c subtract A*lhs_vector(k-1) from lhs_vector(k)
c
c rhs(k) = rhs(k) - A*rhs(k-1)
c-------------------------------------------------------------------*/
matvec_sub(lhs[i][j][k][AA],
rhs[i][j][k-1], rhs[i][j][k]);
/*--------------------------------------------------------------------
c B(k) = B(k) - C(k-1)*A(k)
c call matmul_sub(aa,i,j,k,c,cc,i,j,k-1,c,BB,i,j,k)
c-------------------------------------------------------------------*/
matmul_sub(lhs[i][j][k][AA],
lhs[i][j][k-1][CC],
lhs[i][j][k][BB]);
/*--------------------------------------------------------------------
c multiply c(i,j,k) by b_inverse and copy back to c
c multiply rhs(i,j,1) by b_inverse(i,j,1) and copy to rhs
c-------------------------------------------------------------------*/
binvcrhs( lhs[i][j][k][BB],
lhs[i][j][k][CC],
rhs[i][j][k] );
}
}
}
/*--------------------------------------------------------------------
c Now finish up special cases for last cell
c-------------------------------------------------------------------*/
#pragma omp for private(j)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
/*--------------------------------------------------------------------
c rhs(ksize) = rhs(ksize) - A*rhs(ksize-1)
c-------------------------------------------------------------------*/
matvec_sub(lhs[i][j][ksize][AA],
rhs[i][j][ksize-1], rhs[i][j][ksize]);
/*--------------------------------------------------------------------
c B(ksize) = B(ksize) - C(ksize-1)*A(ksize)
c call matmul_sub(aa,i,j,ksize,c,
c $ cc,i,j,ksize-1,c,BB,i,j,ksize)
c-------------------------------------------------------------------*/
matmul_sub(lhs[i][j][ksize][AA],
lhs[i][j][ksize-1][CC],
lhs[i][j][ksize][BB]);
/*--------------------------------------------------------------------
c multiply rhs(ksize) by b_inverse(ksize) and copy to rhs
c-------------------------------------------------------------------*/
binvrhs( lhs[i][j][ksize][BB],
rhs[i][j][ksize] );
}
}
}
|
dataset.h | #ifndef LIGHTGBM_DATASET_H_
#define LIGHTGBM_DATASET_H_
#include <LightGBM/utils/random.h>
#include <LightGBM/utils/text_reader.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <LightGBM/meta.h>
#include <LightGBM/config.h>
#include <LightGBM/feature_group.h>
#include <vector>
#include <utility>
#include <functional>
#include <string>
#include <unordered_set>
#include <mutex>
namespace LightGBM {
/*! \brief forward declaration */
class DatasetLoader;
/*!
* \brief This class is used to store some meta(non-feature) data for training data,
* e.g. labels, weights, initial scores, qurey level informations.
*
* Some details:
* 1. Label, used for traning.
* 2. Weights, weighs of records, optional
* 3. Query Boundaries, necessary for lambdarank.
* The documents of i-th query is in [ query_boundarise[i], query_boundarise[i+1] )
* 4. Query Weights, auto calculate by weights and query_boundarise(if both of them are existed)
* the weight for i-th query is sum(query_boundarise[i] , .., query_boundarise[i+1]) / (query_boundarise[i + 1] - query_boundarise[i+1])
* 5. Initial score. optional. if exsitng, the model will boost from this score, otherwise will start from 0.
*/
class Metadata {
public:
/*!
* \brief Null costructor
*/
Metadata();
/*!
* \brief Initialization will load qurey level informations, since it is need for sampling data
* \param data_filename Filename of data
* \param init_score_filename Filename of initial score
*/
void Init(const char* data_filename, const char* initscore_file);
/*!
* \brief init as subset
* \param metadata Filename of data
* \param used_indices
* \param num_used_indices
*/
void Init(const Metadata& metadata, const data_size_t* used_indices, data_size_t num_used_indices);
/*!
* \brief Initial with binary memory
* \param memory Pointer to memory
*/
void LoadFromMemory(const void* memory);
/*! \brief Destructor */
~Metadata();
/*!
* \brief Initial work, will allocate space for label, weight(if exists) and query(if exists)
* \param num_data Number of training data
* \param weight_idx Index of weight column, < 0 means doesn't exists
* \param query_idx Index of query id column, < 0 means doesn't exists
*/
void Init(data_size_t num_data, int weight_idx, int query_idx);
/*!
* \brief Partition label by used indices
* \param used_indices Indice of local used
*/
void PartitionLabel(const std::vector<data_size_t>& used_indices);
/*!
* \brief Partition meta data according to local used indices if need
* \param num_all_data Number of total training data, including other machines' data on parallel learning
* \param used_data_indices Indices of local used training data
*/
void CheckOrPartition(data_size_t num_all_data,
const std::vector<data_size_t>& used_data_indices);
void SetLabel(const label_t* label, data_size_t len);
void SetWeights(const label_t* weights, data_size_t len);
void SetQuery(const data_size_t* query, data_size_t len);
/*!
* \brief Set initial scores
* \param init_score Initial scores, this class will manage memory for init_score.
*/
void SetInitScore(const double* init_score, data_size_t len);
/*!
* \brief Save binary data to file
* \param file File want to write
*/
void SaveBinaryToFile(const VirtualFileWriter* writer) const;
/*!
* \brief Get sizes in byte of this object
*/
size_t SizesInByte() const;
/*!
* \brief Get pointer of label
* \return Pointer of label
*/
inline const label_t* label() const { return label_.data(); }
/*!
* \brief Set label for one record
* \param idx Index of this record
* \param value Label value of this record
*/
inline void SetLabelAt(data_size_t idx, label_t value) {
label_[idx] = value;
}
/*!
* \brief Set Weight for one record
* \param idx Index of this record
* \param value Weight value of this record
*/
inline void SetWeightAt(data_size_t idx, label_t value) {
weights_[idx] = value;
}
/*!
* \brief Set Query Id for one record
* \param idx Index of this record
* \param value Query Id value of this record
*/
inline void SetQueryAt(data_size_t idx, data_size_t value) {
queries_[idx] = static_cast<data_size_t>(value);
}
/*!
* \brief Get weights, if not exists, will return nullptr
* \return Pointer of weights
*/
inline const label_t* weights() const {
if (!weights_.empty()) {
return weights_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get data boundaries on queries, if not exists, will return nullptr
* we assume data will order by query,
* the interval of [query_boundaris[i], query_boundaris[i+1])
* is the data indices for query i.
* \return Pointer of data boundaries on queries
*/
inline const data_size_t* query_boundaries() const {
if (!query_boundaries_.empty()) {
return query_boundaries_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get Number of queries
* \return Number of queries
*/
inline data_size_t num_queries() const { return num_queries_; }
/*!
* \brief Get weights for queries, if not exists, will return nullptr
* \return Pointer of weights for queries
*/
inline const label_t* query_weights() const {
if (!query_weights_.empty()) {
return query_weights_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get initial scores, if not exists, will return nullptr
* \return Pointer of initial scores
*/
inline const double* init_score() const {
if (!init_score_.empty()) {
return init_score_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get size of initial scores
*/
inline int64_t num_init_score() const { return num_init_score_; }
/*! \brief Disable copy */
Metadata& operator=(const Metadata&) = delete;
/*! \brief Disable copy */
Metadata(const Metadata&) = delete;
private:
/*! \brief Load initial scores from file */
void LoadInitialScore(const char* initscore_file);
/*! \brief Load wights from file */
void LoadWeights();
/*! \brief Load query boundaries from file */
void LoadQueryBoundaries();
/*! \brief Load query wights */
void LoadQueryWeights();
/*! \brief Filename of current data */
std::string data_filename_;
/*! \brief Number of data */
data_size_t num_data_;
/*! \brief Number of weights, used to check correct weight file */
data_size_t num_weights_;
/*! \brief Label data */
std::vector<label_t> label_;
/*! \brief Weights data */
std::vector<label_t> weights_;
/*! \brief Query boundaries */
std::vector<data_size_t> query_boundaries_;
/*! \brief Query weights */
std::vector<label_t> query_weights_;
/*! \brief Number of querys */
data_size_t num_queries_;
/*! \brief Number of Initial score, used to check correct weight file */
int64_t num_init_score_;
/*! \brief Initial score */
std::vector<double> init_score_;
/*! \brief Queries data */
std::vector<data_size_t> queries_;
/*! \brief mutex for threading safe call */
std::mutex mutex_;
bool weight_load_from_file_;
bool query_load_from_file_;
bool init_score_load_from_file_;
};
/*! \brief Interface for Parser */
class Parser {
public:
/*! \brief virtual destructor */
virtual ~Parser() {}
/*!
* \brief Parse one line with label
* \param str One line record, string format, should end with '\0'
* \param out_features Output columns, store in (column_idx, values)
* \param out_label Label will store to this if exists
*/
virtual void ParseOneLine(const char* str,
std::vector<std::pair<int, double>>* out_features, double* out_label) const = 0;
virtual int TotalColumns() const = 0;
/*!
* \brief Create a object of parser, will auto choose the format depend on file
* \param filename One Filename of data
* \param num_features Pass num_features of this data file if you know, <=0 means don't know
* \param label_idx index of label column
* \return Object of parser
*/
static Parser* CreateParser(const char* filename, bool header, int num_features, int label_idx);
};
/*! \brief The main class of data set,
* which are used to traning or validation
*/
class Dataset {
public:
friend DatasetLoader;
LIGHTGBM_EXPORT Dataset();
LIGHTGBM_EXPORT Dataset(data_size_t num_data);
void Construct(
std::vector<std::unique_ptr<BinMapper>>& bin_mappers,
int** sample_non_zero_indices,
const int* num_per_col,
size_t total_sample_cnt,
const Config& io_config);
/*! \brief Destructor */
LIGHTGBM_EXPORT ~Dataset();
LIGHTGBM_EXPORT bool CheckAlign(const Dataset& other) const {
if (num_features_ != other.num_features_) {
return false;
}
if (num_total_features_ != other.num_total_features_) {
return false;
}
if (label_idx_ != other.label_idx_) {
return false;
}
for (int i = 0; i < num_features_; ++i) {
if (!FeatureBinMapper(i)->CheckAlign(*(other.FeatureBinMapper(i)))) {
return false;
}
}
return true;
}
inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<double>& feature_values) {
if (is_finish_load_) { return; }
for (size_t i = 0; i < feature_values.size() && i < static_cast<size_t>(num_total_features_); ++i) {
int feature_idx = used_feature_map_[i];
if (feature_idx >= 0) {
const int group = feature2group_[feature_idx];
const int sub_feature = feature2subfeature_[feature_idx];
feature_groups_[group]->PushData(tid, sub_feature, row_idx, feature_values[i]);
}
}
}
inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<std::pair<int, double>>& feature_values) {
if (is_finish_load_) { return; }
for (auto& inner_data : feature_values) {
if (inner_data.first >= num_total_features_) { continue; }
int feature_idx = used_feature_map_[inner_data.first];
if (feature_idx >= 0) {
const int group = feature2group_[feature_idx];
const int sub_feature = feature2subfeature_[feature_idx];
feature_groups_[group]->PushData(tid, sub_feature, row_idx, inner_data.second);
}
}
}
inline void PushOneData(int tid, data_size_t row_idx, int group, int sub_feature, double value) {
feature_groups_[group]->PushData(tid, sub_feature, row_idx, value);
}
inline int RealFeatureIndex(int fidx) const {
return real_feature_idx_[fidx];
}
inline int InnerFeatureIndex(int col_idx) const {
return used_feature_map_[col_idx];
}
inline int Feature2Group(int feature_idx) const {
return feature2group_[feature_idx];
}
inline int Feture2SubFeature(int feature_idx) const {
return feature2subfeature_[feature_idx];
}
inline uint64_t GroupBinBoundary(int group_idx) const {
return group_bin_boundaries_[group_idx];
}
inline uint64_t NumTotalBin() const {
return group_bin_boundaries_.back();
}
inline std::vector<int> ValidFeatureIndices() const {
std::vector<int> ret;
for (int i = 0; i < num_total_features_; ++i) {
if (used_feature_map_[i] >= 0) {
ret.push_back(i);
}
}
return ret;
}
void ReSize(data_size_t num_data);
void CopySubset(const Dataset* fullset, const data_size_t* used_indices, data_size_t num_used_indices, bool need_meta_data);
LIGHTGBM_EXPORT void FinishLoad();
LIGHTGBM_EXPORT bool SetFloatField(const char* field_name, const float* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool SetDoubleField(const char* field_name, const double* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool SetIntField(const char* field_name, const int* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool GetFloatField(const char* field_name, data_size_t* out_len, const float** out_ptr);
LIGHTGBM_EXPORT bool GetDoubleField(const char* field_name, data_size_t* out_len, const double** out_ptr);
LIGHTGBM_EXPORT bool GetIntField(const char* field_name, data_size_t* out_len, const int** out_ptr);
LIGHTGBM_EXPORT bool GetInt8Field(const char* field_name, data_size_t* out_len, const int8_t** out_ptr);
/*!
* \brief Save current dataset into binary file, will save to "filename.bin"
*/
LIGHTGBM_EXPORT void SaveBinaryFile(const char* bin_filename);
LIGHTGBM_EXPORT void DumpTextFile(const char* text_filename);
LIGHTGBM_EXPORT void CopyFeatureMapperFrom(const Dataset* dataset);
LIGHTGBM_EXPORT void CreateValid(const Dataset* dataset);
void ConstructHistograms(const std::vector<int8_t>& is_feature_used,
const data_size_t* data_indices, data_size_t num_data,
int leaf_idx,
std::vector<std::unique_ptr<OrderedBin>>& ordered_bins,
const score_t* gradients, const score_t* hessians,
score_t* ordered_gradients, score_t* ordered_hessians,
bool is_constant_hessian,
HistogramBinEntry* histogram_data) const;
void FixHistogram(int feature_idx, double sum_gradient, double sum_hessian, data_size_t num_data,
HistogramBinEntry* data) const;
inline data_size_t Split(int feature,
const uint32_t* threshold, int num_threshold, bool default_left,
data_size_t* data_indices, data_size_t num_data,
data_size_t* lte_indices, data_size_t* gt_indices) const {
const int group = feature2group_[feature];
const int sub_feature = feature2subfeature_[feature];
return feature_groups_[group]->Split(sub_feature, threshold, num_threshold, default_left, data_indices, num_data, lte_indices, gt_indices);
}
inline int SubFeatureBinOffset(int i) const {
const int sub_feature = feature2subfeature_[i];
if (sub_feature == 0) {
return 1;
} else {
return 0;
}
}
inline int FeatureNumBin(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->num_bin();
}
inline int8_t FeatureMonotone(int i) const {
if (monotone_types_.empty()) {
return 0;
} else {
return monotone_types_[i];
}
}
inline double FeaturePenalte(int i) const {
if (feature_penalty_.empty()) {
return 1;
} else {
return feature_penalty_[i];
}
}
bool HasMonotone() const {
if (monotone_types_.empty()) {
return false;
} else {
for (size_t i = 0; i < monotone_types_.size(); ++i) {
if (monotone_types_[i] != 0) {
return true;
}
}
return false;
}
}
inline int FeatureGroupNumBin(int group) const {
return feature_groups_[group]->num_total_bin_;
}
inline const BinMapper* FeatureBinMapper(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature].get();
}
inline const Bin* FeatureBin(int i) const {
const int group = feature2group_[i];
return feature_groups_[group]->bin_data_.get();
}
inline const Bin* FeatureGroupBin(int group) const {
return feature_groups_[group]->bin_data_.get();
}
inline bool FeatureGroupIsSparse(int group) const {
return feature_groups_[group]->is_sparse_;
}
inline BinIterator* FeatureIterator(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->SubFeatureIterator(sub_feature);
}
inline BinIterator* FeatureGroupIterator(int group) const {
return feature_groups_[group]->FeatureGroupIterator();
}
inline double RealThreshold(int i, uint32_t threshold) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->BinToValue(threshold);
}
// given a real threshold, find the closest threshold bin
inline uint32_t BinThreshold(int i, double threshold_double) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->ValueToBin(threshold_double);
}
inline void CreateOrderedBins(std::vector<std::unique_ptr<OrderedBin>>* ordered_bins) const {
ordered_bins->resize(num_groups_);
OMP_INIT_EX();
#pragma omp parallel for schedule(guided)
for (int i = 0; i < num_groups_; ++i) {
OMP_LOOP_EX_BEGIN();
ordered_bins->at(i).reset(feature_groups_[i]->bin_data_->CreateOrderedBin());
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
}
/*!
* \brief Get meta data pointer
* \return Pointer of meta data
*/
inline const Metadata& metadata() const { return metadata_; }
/*! \brief Get Number of used features */
inline int num_features() const { return num_features_; }
/*! \brief Get Number of feature groups */
inline int num_feature_groups() const { return num_groups_;}
/*! \brief Get Number of total features */
inline int num_total_features() const { return num_total_features_; }
/*! \brief Get the index of label column */
inline int label_idx() const { return label_idx_; }
/*! \brief Get names of current data set */
inline const std::vector<std::string>& feature_names() const { return feature_names_; }
inline void set_feature_names(const std::vector<std::string>& feature_names) {
if (feature_names.size() != static_cast<size_t>(num_total_features_)) {
Log::Fatal("Size of feature_names error, should equal with total number of features");
}
feature_names_ = std::vector<std::string>(feature_names);
// replace ' ' in feature_names with '_'
bool spaceInFeatureName = false;
for (auto& feature_name : feature_names_) {
if (feature_name.find(' ') != std::string::npos) {
spaceInFeatureName = true;
std::replace(feature_name.begin(), feature_name.end(), ' ', '_');
}
}
if (spaceInFeatureName) {
Log::Warning("Find whitespaces in feature_names, replace with underlines");
}
}
inline std::vector<std::string> feature_infos() const {
std::vector<std::string> bufs;
for (int i = 0; i < num_total_features_; i++) {
int fidx = used_feature_map_[i];
if (fidx == -1) {
bufs.push_back("none");
} else {
const auto bin_mapper = FeatureBinMapper(fidx);
bufs.push_back(bin_mapper->bin_info());
}
}
return bufs;
}
void ResetConfig(const char* parameters);
/*! \brief Get Number of data */
inline data_size_t num_data() const { return num_data_; }
/*! \brief Disable copy */
Dataset& operator=(const Dataset&) = delete;
/*! \brief Disable copy */
Dataset(const Dataset&) = delete;
void addFeaturesFrom(Dataset* other);
private:
std::string data_filename_;
/*! \brief Store used features */
std::vector<std::unique_ptr<FeatureGroup>> feature_groups_;
/*! \brief Mapper from real feature index to used index*/
std::vector<int> used_feature_map_;
/*! \brief Number of used features*/
int num_features_;
/*! \brief Number of total features*/
int num_total_features_;
/*! \brief Number of total data*/
data_size_t num_data_;
/*! \brief Store some label level data*/
Metadata metadata_;
/*! \brief index of label column */
int label_idx_ = 0;
/*! \brief Threshold for treating a feature as a sparse feature */
double sparse_threshold_;
/*! \brief store feature names */
std::vector<std::string> feature_names_;
/*! \brief store feature names */
static const char* binary_file_token;
int num_groups_;
std::vector<int> real_feature_idx_;
std::vector<int> feature2group_;
std::vector<int> feature2subfeature_;
std::vector<uint64_t> group_bin_boundaries_;
std::vector<int> group_feature_start_;
std::vector<int> group_feature_cnt_;
std::vector<int8_t> monotone_types_;
std::vector<double> feature_penalty_;
bool is_finish_load_;
int max_bin_;
int bin_construct_sample_cnt_;
int min_data_in_bin_;
bool use_missing_;
bool zero_as_missing_;
};
} // namespace LightGBM
#endif // LightGBM_DATA_H_
|
contact_utilities.h | // KRATOS ___| | | |
// \___ \ __| __| | | __| __| | | __| _` | |
// | | | | | ( | | | | ( | |
// _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS
//
// License: BSD License
// license: StructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_CONTACT_UTILITIES)
#define KRATOS_CONTACT_UTILITIES
// System includes
// External includes
// Project includes
#include "utilities/openmp_utils.h"
#include "utilities/math_utils.h"
#include "contact_structural_mechanics_application_variables.h"
#include "includes/model_part.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
/**
* @class ContactUtilities
* @ingroup ContactStructuralMechanicsApplication
* @brief This class includes some utilities used for contact computations
* @author Vicente Mataix Ferrandiz
*/
class ContactUtilities
{
public:
///@name Type Definitions
///@{
/// Pointer definition of MortarUtilities
KRATOS_CLASS_POINTER_DEFINITION( ContactUtilities );
// Some geometrical definitions
typedef Node<3> NodeType;
typedef Point::CoordinatesArrayType CoordinatesArrayType;
/// Definition of geometries
typedef Geometry<NodeType> GeometryType;
/// The containers of the components of the model parts
typedef ModelPart::NodesContainerType NodesArrayType;
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
/// Index type definition
typedef std::size_t IndexType;
/// Size type definition
typedef std::size_t SizeType;
///@}
///@name Life Cycle
///@{
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
///@}
///@name Friends
///@{
///@}
///@name Operations
///@{
/**
* @brief This function computes the relative size of the mesh
* @param rModelPart The modelpart to compute
*/
static inline double CalculateRelativeSizeMesh(ModelPart& rModelPart)
{
return CalculateMaxNodalH(rModelPart)/CalculateMinimalNodalH(rModelPart);
}
/**
* @brief This method computes the maximal nodal H
* @param rModelPart The modelpart to compute
*/
static inline double CalculateMaxNodalH(ModelPart& rModelPart)
{
// We iterate over the nodes
NodesArrayType& r_nodes_array = rModelPart.Nodes();
const auto it_node_begin = r_nodes_array.begin();
// // Creating the max auxiliar value
// double max_value = 0.0;
//
// #pragma omp parallel for reduction(max:max_value)
// for(int i = 0; i < static_cast<int>(r_nodes_array.size()); ++i) {
// auto it_node = it_node_begin + i;
// KRATOS_DEBUG_ERROR_IF_NOT(it_node->SolutionStepsDataHas(NODAL_H)) << "ERROR:: NODAL_H not added" << std::endl;
// max_value = std::max(max_value, it_node->FastGetSolutionStepValue(NODAL_H));
// }
//
// return max_value;
// Creating a buffer for parallel vector fill
const int num_threads = OpenMPUtils::GetNumThreads();
std::vector<double> max_vector(num_threads, 0.0);
double nodal_h;
#pragma omp parallel for private(nodal_h)
for(int i = 0; i < static_cast<int>(r_nodes_array.size()); ++i) {
auto it_node = it_node_begin + i;
KRATOS_DEBUG_ERROR_IF_NOT(it_node->SolutionStepsDataHas(NODAL_H)) << "ERROR:: NODAL_H not added" << std::endl;
nodal_h = it_node->FastGetSolutionStepValue(NODAL_H);
const int id = OpenMPUtils::ThisThread();
if (nodal_h > max_vector[id])
max_vector[id] = nodal_h;
}
return *std::max_element(max_vector.begin(), max_vector.end());
}
/**
* @brief This method computes the mean nodal H
* @param rModelPart The modelpart to compute
*/
static inline double CalculateMeanNodalH(ModelPart& rModelPart)
{
// We iterate over the nodes
NodesArrayType& r_nodes_array = rModelPart.Nodes();
const auto it_node_begin = r_nodes_array.begin();
// Creating the sum auxiliar value
double sum_nodal_h = 0.0;
#pragma omp parallel for reduction(+:sum_nodal_h)
for(int i = 0; i < static_cast<int>(r_nodes_array.size()); ++i) {
auto it_node = it_node_begin + i;
KRATOS_DEBUG_ERROR_IF_NOT(it_node->SolutionStepsDataHas(NODAL_H)) << "ERROR:: NODAL_H not added" << std::endl;
sum_nodal_h += it_node->FastGetSolutionStepValue(NODAL_H);;
}
return sum_nodal_h/static_cast<double>(r_nodes_array.size());
}
/**
* @brief This method computes the minimal nodal H
* @param rModelPart The modelpart to compute
*/
static inline double CalculateMinimalNodalH(ModelPart& rModelPart)
{
// We iterate over the nodes
NodesArrayType& r_nodes_array = rModelPart.Nodes();
const auto it_node_begin = r_nodes_array.begin();
// // Creating the min auxiliar value
// double min_value = 0.0;
//
// #pragma omp parallel for reduction(min:min_value)
// for(int i = 0; i < static_cast<int>(r_nodes_array.size()); ++i) {
// auto it_node = it_node_begin + i;
// KRATOS_DEBUG_ERROR_IF_NOT(it_node->SolutionStepsDataHas(NODAL_H)) << "ERROR:: NODAL_H not added" << std::endl;
// min_value = std::min(min_value, it_node->FastGetSolutionStepValue(NODAL_H));
// }
//
// return min_value;
// Creating a buffer for parallel vector fill
const int num_threads = OpenMPUtils::GetNumThreads();
std::vector<double> min_vector(num_threads, 0.0);
double nodal_h;
#pragma omp parallel for private(nodal_h)
for(int i = 0; i < static_cast<int>(r_nodes_array.size()); ++i) {
auto it_node = it_node_begin + i;
KRATOS_DEBUG_ERROR_IF_NOT(it_node->SolutionStepsDataHas(NODAL_H)) << "ERROR:: NODAL_H not added" << std::endl;
nodal_h = it_node->FastGetSolutionStepValue(NODAL_H);
const int id = OpenMPUtils::ThisThread();
if (nodal_h > min_vector[id])
min_vector[id] = nodal_h;
}
return *std::min_element(min_vector.begin(), min_vector.end());
}
/**
* @brief This function scales the points according to a factor (to increase the bounding box)
* @param rPointToScale The point to scale
* @param rNormal The normal of the point
* @param LengthSearch The factor considered to "grow" the node
*/
template<class TPointType>
static inline void ScaleNode(
TPointType& rPointToScale,
const array_1d<double, 3>& rNormal,
const double LengthSearch
)
{
noalias(rPointToScale.Coordinates()) = rPointToScale.Coordinates() + rNormal * LengthSearch;
}
/**
* @brief Calculates the distance between nodes
* @param rPointOrigin The first node
* @param rPointDestiny The second node
*/
static inline double DistancePoints(
const GeometryType::CoordinatesArrayType& rPointOrigin,
const GeometryType::CoordinatesArrayType& rPointDestiny
)
{
return std::sqrt((rPointOrigin[0] - rPointDestiny[0]) * (rPointOrigin[0] - rPointDestiny[0])
+ (rPointOrigin[1] - rPointDestiny[1]) * (rPointOrigin[1] - rPointDestiny[1])
+ (rPointOrigin[2] - rPointDestiny[2]) * (rPointOrigin[2] - rPointDestiny[2]));
}
/**
* @brief It calculates the center updated in u_n+1 or u_n+1/2
* @param rModelPart The modelpart to update
* @param DeltaTime The increment of time considered
* @param HalfJump If the jumpt is just half dt
*/
static inline void ComputeStepJump(
ModelPart& rModelPart,
const double DeltaTime,
const bool HalfJump = true
)
{
// Time constants
const double velocity_constant = HalfJump ? 0.25 : 0.5;
const double acceleration_constant = HalfJump ? 0.125 : 0.5;
// Iterate over the nodes
NodesArrayType& r_nodes_array = rModelPart.Nodes();
// Node iterator
const auto it_node_begin = r_nodes_array.begin();
// We compute the half jump
array_1d<double, 3> new_delta_disp;
#pragma omp parallel for firstprivate(new_delta_disp)
for(int i = 0; i < static_cast<int>(r_nodes_array.size()); ++i) {
auto it_node = it_node_begin + i;
const array_1d<double, 3>& r_current_velocity = it_node->FastGetSolutionStepValue(VELOCITY);
const array_1d<double, 3>& r_previous_velocity = it_node->FastGetSolutionStepValue(VELOCITY, 1);
const array_1d<double, 3>& r_previous_acceleration = it_node->FastGetSolutionStepValue(ACCELERATION, 1);
noalias(new_delta_disp) = velocity_constant * DeltaTime * (r_current_velocity + r_previous_velocity) + acceleration_constant * std::pow(DeltaTime, 2) * r_previous_acceleration;
if (it_node->IsFixed(DISPLACEMENT_X)) new_delta_disp[0] = 0.0;
if (it_node->IsFixed(DISPLACEMENT_Y)) new_delta_disp[1] = 0.0;
if (it_node->IsFixed(DISPLACEMENT_Z)) new_delta_disp[2] = 0.0;
it_node->SetValue(DELTA_COORDINATES, new_delta_disp);
}
}
/**
* @brief It checks the activity of the current contact simulation
* @param rModelPart The modelpart to check the activity
* @param ThrowError If an error is thrown
*/
static inline bool CheckActivity(
ModelPart& rModelPart,
const bool ThrowError = true
)
{
// Iterate over the nodes
NodesArrayType& r_nodes_array = rModelPart.Nodes();
// Node iterator
const auto it_node_begin = r_nodes_array.begin();
// We compute the half jump
IndexType aux_check = 0;
#pragma omp parallel for reduction(+:aux_check)
for(int i = 0; i < static_cast<int>(r_nodes_array.size()); ++i) {
auto it_node = it_node_begin + i;
if (it_node->Is(SLAVE)) {
if (it_node->Is(ACTIVE)) {
aux_check += 1;
}
}
}
const bool is_active = aux_check == 0 ? false : true;
KRATOS_ERROR_IF(ThrowError && !is_active) << "CONTACT LOST::ARE YOU SURE YOU ARE SUPPOSED TO HAVE CONTACT?" << std::endl;
return is_active;
}
/**
* @brief It computes the explicit contributions of the conditions
* @param rModelPart The modelpart to update
*/
static inline void ComputeExplicitContributionConditions(ModelPart& rModelPart)
{
ConditionsArrayType& r_conditions_array = rModelPart.Conditions();
KRATOS_TRACE_IF("Empty model part", r_conditions_array.size() == 0) << "YOUR COMPUTING CONTACT MODEL PART IS EMPTY" << std::endl;
const auto it_cond_begin = r_conditions_array.begin();
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(r_conditions_array.size()); ++i) {
auto it_cond = it_cond_begin + i;
it_cond->AddExplicitContribution(r_process_info);
}
}
/**
* @brief It activates the conditions with active nodes
* @param rModelPart The modelpart to check
*/
static inline void ActivateConditionWithActiveNodes(ModelPart& rModelPart)
{
ConditionsArrayType& r_conditions_array = rModelPart.Conditions();
KRATOS_TRACE_IF("Empty model part", r_conditions_array.size() == 0) << "YOUR COMPUTING CONTACT MODEL PART IS EMPTY" << std::endl;
const auto it_cond_begin = r_conditions_array.begin();
bool is_active = false;
#pragma omp parallel for firstprivate(is_active)
for(int i = 0; i < static_cast<int>(r_conditions_array.size()); ++i) {
auto it_cond = it_cond_begin + i;
GeometryType& r_geometry = it_cond->GetGeometry();
is_active = false;
for ( IndexType i_node = 0; i_node < r_geometry.size(); ++i_node ) {
if (r_geometry[i_node].Is(ACTIVE)) {
is_active = true;
break;
}
}
it_cond->Set(ACTIVE, is_active);
}
}
/**
* @brief It calculates the center updated in u_n+1/2
* @param rThisGeometry The geometry to calculate
* @return point: The center in u_n+1/2 (Newmark)
*/
static inline array_1d<double, 3> GetHalfJumpCenter(GeometryType& rThisGeometry)
{
array_1d<double, 3> center = (rThisGeometry.Center()).Coordinates();
// Initialize variables
Vector N;
GeometryType::CoordinatesArrayType local_point;
// Get shape functions
rThisGeometry.PointLocalCoordinates( local_point, center );
rThisGeometry.ShapeFunctionsValues( N, local_point );
KRATOS_DEBUG_ERROR_IF_NOT(rThisGeometry[0].Has(DELTA_COORDINATES)) << "Please call ComputeStepJump() first" << std::endl;
const Vector new_delta_disp_center = prod(trans(GetVariableMatrix(rThisGeometry, DELTA_COORDINATES)), N);
for (IndexType i = 0; i < new_delta_disp_center.size(); ++i)
center[i] += new_delta_disp_center[i];
return center;
}
private:
/**
* @brief It calculates the matrix of a variable of a geometry
* @param rNodes The geometry to calculate
* @param rVarName The name of the variable to calculate
* @return var_matrix: The matrix containing the variables of the geometry
*/
static inline Matrix GetVariableMatrix(
const GeometryType& rNodes,
const Variable<array_1d<double,3> >& rVarName
)
{
/* DEFINITIONS */
const SizeType num_nodes = rNodes.size();
const SizeType dim = rNodes.WorkingSpaceDimension();
Matrix var_matrix(num_nodes, dim);
for (IndexType i_node = 0; i_node < num_nodes; i_node++) {
const array_1d<double, 3> value = rNodes[i_node].GetValue(rVarName);
for (IndexType i_dof = 0; i_dof < dim; i_dof++)
var_matrix(i_node, i_dof) = value[i_dof];
}
return var_matrix;
}
};// class ContactUtilities
}
#endif /* KRATOS_CONTACT_UTILITIES defined */
|
EvalOMP.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra. Eigen itself is part of the KDE project.
//
// Copyright (C) 2008 Gael Guennebaud <g.gael@free.fr>
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
//
// Eigen is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 3 of the License, or (at your option) any later version.
//
// Alternatively, you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation; either version 2 of
// the License, or (at your option) any later version.
//
// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License and a copy of the GNU General Public License along with
// Eigen. If not, see <http://www.gnu.org/licenses/>.
#ifndef EIGEN_EVAL_OMP_H
#define EIGEN_EVAL_OMP_H
/** \class EvalOMP
*
* \brief Parallel evaluation of an expression using OpenMP
*
* The template parameter Expression is the type of the expression that we are evaluating.
*
* This class is the return type of MatrixBase::evalOMP() and most of the time this is the
* only way it is used.
*
* Note that if OpenMP is not enabled, then this class is equivalent to Eval.
*
* \sa MatrixBase::evalOMP(), class Eval, MatrixBase::eval()
*/
template<typename ExpressionType>
struct ei_traits<EvalOMP<ExpressionType> >
{
typedef typename ExpressionType::Scalar Scalar;
enum {
RowsAtCompileTime = ExpressionType::RowsAtCompileTime,
ColsAtCompileTime = ExpressionType::ColsAtCompileTime,
MaxRowsAtCompileTime = ExpressionType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = ExpressionType::MaxColsAtCompileTime,
Flags = ExpressionType::Flags & ~LazyBit
};
};
template<typename ExpressionType> class EvalOMP : ei_no_assignment_operator,
public Matrix< typename ExpressionType::Scalar,
ExpressionType::RowsAtCompileTime,
ExpressionType::ColsAtCompileTime,
ExpressionType::Flags,
ExpressionType::MaxRowsAtCompileTime,
ExpressionType::MaxColsAtCompileTime>
{
public:
/** The actual matrix type to evaluate to. This type can be used independently
* of the rest of this class to get the actual matrix type to evaluate and store
* the value of an expression.
*/
typedef Matrix<typename ExpressionType::Scalar,
ExpressionType::RowsAtCompileTime,
ExpressionType::ColsAtCompileTime,
ExpressionType::Flags,
ExpressionType::MaxRowsAtCompileTime,
ExpressionType::MaxColsAtCompileTime> MatrixType;
_EIGEN_GENERIC_PUBLIC_INTERFACE(EvalOMP, MatrixType)
#ifdef _OPENMP
explicit EvalOMP(const ExpressionType& other)
: MatrixType(other.rows(), other.cols())
{
#ifdef __INTEL_COMPILER
#pragma omp parallel default(none) shared(other)
#else
#pragma omp parallel default(none)
#endif
{
if (this->cols()>this->rows())
{
#pragma omp for
for(int j = 0; j < this->cols(); j++)
for(int i = 0; i < this->rows(); i++)
this->coeffRef(i, j) = other.coeff(i, j);
}
else
{
#pragma omp for
for(int i = 0; i < this->rows(); i++)
for(int j = 0; j < this->cols(); j++)
this->coeffRef(i, j) = other.coeff(i, j);
}
}
}
#else
explicit EvalOMP(const ExpressionType& other) : MatrixType(other) {}
#endif
};
/** Evaluates *this in a parallel fashion using OpenMP and returns the obtained matrix.
*
* Of course, it only makes sense to call this function for complex expressions, and/or
* large matrices (>32x32), \b and if there is no outer loop which can be parallelized.
*
* It is the responsibility of the user manage the OpenMP parameters, for instance:
* \code
* #include <omp.h>
* // ...
* omp_set_num_threads(omp_get_num_procs());
* \endcode
* You also need to enable OpenMP on your compiler (e.g., -fopenmp) during both compilation and linking.
*
* Note that if OpenMP is not enabled, then evalOMP() is equivalent to eval().
*
* \sa class EvalOMP, eval()
*/
template<typename Derived>
const EvalOMP<Derived> MatrixBase<Derived>::evalOMP() const
{
return EvalOMP<Derived>(*static_cast<const Derived*>(this));
}
#endif // EIGEN_EVAL_OMP_H
|
mic_SH_to_spat.gen.c | /*
* Copyright (c) 2010-2015 Centre National de la Recherche Scientifique.
* written by Nathanael Schaeffer (CNRS, ISTerre, Grenoble, France).
*
* nathanael.schaeffer@ujf-grenoble.fr
*
* This software is governed by the CeCILL license under French law and
* abiding by the rules of distribution of free software. You can use,
* modify and/or redistribute the software under the terms of the CeCILL
* license as circulated by CEA, CNRS and INRIA at the following URL
* "http://www.cecill.info".
*
* The fact that you are presently reading this means that you have had
* knowledge of the CeCILL license and that you accept its terms.
*
*/
# This file is meta-code for SHT.c (spherical harmonic transform).
# it is intended for "make" to generate C code for similar SHT functions,
# from one generic function + tags.
# > See Makefile and SHT.c
# Basically, there are tags at the beginning of lines that are information
# to keep or remove the line depending on the function to build.
# tags :
# Q : line for scalar transform
# V : line for vector transform (both spheroidal and toroidal)
# S : line for vector transfrom, spheroidal component
# T : line for vector transform, toroidal component.
static
3 void GEN3(_sy3,NWAY,SUFFIX)(shtns_cfg shtns, cplx *Qlm, cplx *Slm, cplx *Tlm, cplx *BrF, cplx *BtF, cplx *BpF, const long int llim, const int imlim)
QX void GEN3(_sy1,NWAY,SUFFIX)(shtns_cfg shtns, cplx *Qlm, cplx *BrF, const long int llim, const int imlim)
#ifndef SHT_GRAD
VX void GEN3(_sy2,NWAY,SUFFIX)(shtns_cfg shtns, cplx *Slm, cplx *Tlm, cplx *BtF, cplx *BpF, const long int llim, const int imlim)
#else
S void GEN3(_sy1s,NWAY,SUFFIX)(shtns_cfg shtns, cplx *Slm, cplx *BtF, cplx *BpF, const long int llim, const int imlim)
T void GEN3(_sy1t,NWAY,SUFFIX)(shtns_cfg shtns, cplx *Tlm, cplx *BtF, cplx *BpF, const long int llim, const int imlim)
#endif
{
#ifndef SHT_AXISYM
Q #define qr(l) vall(creal(Ql[l-1]))
Q #define qi(l) vall(cimag(Ql[l-1]))
S #define sr(l) vall(creal(Sl[l-1]))
S #define si(l) vall(cimag(Sl[l-1]))
T #define tr(l) vall(creal(Tl[l-1]))
T #define ti(l) vall(cimag(Tl[l-1]))
V double m_1;
unsigned im;
#endif
unsigned m0, mstep;
long int nk,k,l,m;
double *alm, *al;
double *ct, *st;
Q cplx Ql[llim+1] SSE;
S cplx Sl[llim] SSE;
T cplx Tl[llim] SSE;
Q double rnr[NLAT_2 + NWAY*VSIZE2 -1] SSE;
Q double rsr[NLAT_2 + NWAY*VSIZE2 -1] SSE;
V double tnr[NLAT_2 + NWAY*VSIZE2 -1] SSE;
V double tsr[NLAT_2 + NWAY*VSIZE2 -1] SSE;
V double pnr[NLAT_2 + NWAY*VSIZE2 -1] SSE;
V double psr[NLAT_2 + NWAY*VSIZE2 -1] SSE;
#ifndef SHT_AXISYM
Q double rni[NLAT_2 + NWAY*VSIZE2 -1] SSE;
Q double rsi[NLAT_2 + NWAY*VSIZE2 -1] SSE;
V double tni[NLAT_2 + NWAY*VSIZE2 -1] SSE;
V double tsi[NLAT_2 + NWAY*VSIZE2 -1] SSE;
V double pni[NLAT_2 + NWAY*VSIZE2 -1] SSE;
V double psi[NLAT_2 + NWAY*VSIZE2 -1] SSE;
#endif
ct = shtns->ct; st = shtns->st;
nk = NLAT_2;
nk = ((unsigned)(nk+VSIZE2-1)) / VSIZE2;
// ACCESS PATTERN
const int k_inc = 1; const int m_inc = NLAT/2;
#ifndef _OPENMP
m0 = 0; mstep = 1;
#else
m0 = omp_get_thread_num();
mstep = omp_get_num_threads();
if (m0 == 0)
#endif
{ // im=0;
#ifdef SHT_GRAD
#ifndef SHT_AXISYM
S k=0; do { BpF[k]=0.0; } while(++k<NLAT_2);
T k=0; do { BtF[k]=0.0; } while(++k<NLAT_2);
#else
S if (BpF != NULL) { int k=0; do { BpF[k]=0.0; } while(++k<NLAT_2); }
T if (BtF != NULL) { int k=0; do { BtF[k]=0.0; } while(++k<NLAT_2); }
#endif
#endif
Q double* Ql0 = (double*) Ql;
S double* Sl0 = (double*) Sl;
T double* Tl0 = (double*) Tl;
l=1;
alm = shtns->alm;
Q Ql0[0] = (double) Qlm[0]; // l=0
do { // for m=0, compress the complex Q,S,T to double
Q Ql0[l] = creal( Qlm[l] ); // Ql[l+1] = (double) Qlm[l+1];
S Sl0[l-1] = creal( Slm[l] ); // Sl[l] = (double) Slm[l+1];
T Tl0[l-1] = creal( Tlm[l] ); // Tl[l] = (double) Tlm[l+1];
++l;
} while(l<=llim);
k=0;
do {
l=0; al = alm;
rnd cost[NWAY], y0[NWAY], y1[NWAY];
V rnd sint[NWAY], dy0[NWAY], dy1[NWAY];
Q rnd re[NWAY], ro[NWAY];
S rnd te[NWAY], to[NWAY];
T rnd pe[NWAY], po[NWAY];
for (int j=0; j<NWAY; ++j) {
cost[j] = vread(ct, j+k);
V sint[j] = -vread(st, j+k);
y0[j] = vall(al[0]);
V dy0[j] = vall(0.0);
Q re[j] = y0[j] * vall(Ql0[0]);
S to[j] = dy0[j];
T po[j] = dy0[j];
}
for (int j=0; j<NWAY; ++j) {
y1[j] = vall(al[0]*al[1]) * cost[j];
V dy1[j] = vall(al[0]*al[1]) * sint[j];
}
for (int j=0; j<NWAY; ++j) {
Q ro[j] = y1[j] * vall(Ql0[1]);
S te[j] = dy1[j] * vall(Sl0[0]);
T pe[j] = -dy1[j] * vall(Tl0[0]);
}
al+=2; l+=2;
while(l<llim) {
for (int j=0; j<NWAY; ++j) {
V dy0[j] = vall(al[1])*(cost[j]*dy1[j] + y1[j]*sint[j]) + vall(al[0])*dy0[j];
y0[j] = vall(al[1])*(cost[j]*y1[j]) + vall(al[0])*y0[j];
}
for (int j=0; j<NWAY; ++j) {
Q re[j] += y0[j] * vall(Ql0[l]);
S to[j] += dy0[j] * vall(Sl0[l-1]);
T po[j] -= dy0[j] * vall(Tl0[l-1]);
}
for (int j=0; j<NWAY; ++j) {
V dy1[j] = vall(al[3])*(cost[j]*dy0[j] + y0[j]*sint[j]) + vall(al[2])*dy1[j];
y1[j] = vall(al[3])*(cost[j]*y0[j]) + vall(al[2])*y1[j];
}
for (int j=0; j<NWAY; ++j) {
Q ro[j] += y1[j] * vall(Ql0[l+1]);
S te[j] += dy1[j] * vall(Sl0[l]);
T pe[j] -= dy1[j] * vall(Tl0[l]);
}
al+=4; l+=2;
}
if (l==llim) {
for (int j=0; j<NWAY; ++j) {
V dy0[j] = vall(al[1])*(cost[j]*dy1[j] + y1[j]*sint[j]) + vall(al[0])*dy0[j];
y0[j] = vall(al[1])*cost[j]*y1[j] + vall(al[0])*y0[j];
}
for (int j=0; j<NWAY; ++j) {
Q re[j] += y0[j] * vall(Ql0[l]);
S to[j] += dy0[j] * vall(Sl0[l-1]);
T po[j] -= dy0[j] * vall(Tl0[l-1]);
}
}
for (int j=0; j<NWAY; ++j) {
Q vstor(rnr, j+k, re[j]+ro[j]); vstor(rsr, j+k, re[j]-ro[j]);
S vstor(tnr, j+k, te[j]+to[j]); vstor(tsr, j+k, te[j]-to[j]);
T vstor(pnr, j+k, pe[j]+po[j]); vstor(psr, j+k, pe[j]-po[j]);
}
k+=NWAY;
} while (k < nk);
k=0; do { // merge symmetric and antisymmetric parts.
Q BrF[(k/2)*k_inc] = rnr[k] + I*rnr[k+1];
Q BrF[(NLAT_2-1 - k/2)*k_inc] = rsr[k+1] + I*rsr[k];
S BtF[(k/2)*k_inc] = tnr[k] + I*tnr[k+1];
S BtF[(NLAT_2-1 - k/2)*k_inc] = tsr[k+1] + I*tsr[k];
T BpF[(k/2)*k_inc] = pnr[k] + I*pnr[k+1];
T BpF[(NLAT_2-1 - k/2)*k_inc] = psr[k+1] + I*psr[k];
k+=2;
} while(k < NLAT_2);
m0=mstep;
}
#ifndef SHT_AXISYM
for (im=m0; im<imlim; im+=mstep) {
m = im*MRES;
V m_1 = 1.0/m;
//alm = shtns->alm[im];
alm = shtns->alm + im*(2*LMAX -m+MRES);
l = m;
k = LiM(shtns, l,im);
//k = (im*(2*(LMAX+1)-(m+MRES)))>>1 + l;
do { // copy input coefficients to a local array.
Q ((v2d*)Ql)[l-1] = ((v2d*)Qlm)[k];
S ((v2d*)Sl)[l-1] = ((v2d*)Slm)[k];
T ((v2d*)Tl)[l-1] = ((v2d*)Tlm)[k];
++l; ++k;
} while(l<=llim);
k = shtns->tm[im] / VSIZE2; // stay on vector boundary
#if VSIZE2 == 1
k -= k&1; // we operate without vectors, but we still need complex alignement (2 doubles).
#endif
do {
al = alm;
rnd cost[NWAY], y0[NWAY], y1[NWAY];
V rnd st2[NWAY], dy0[NWAY], dy1[NWAY];
Q rnd rer[NWAY], rei[NWAY], ror[NWAY], roi[NWAY];
V rnd ter[NWAY], tei[NWAY], tor[NWAY], toi[NWAY];
V rnd per[NWAY], pei[NWAY], por[NWAY], poi[NWAY];
for (int j=0; j<NWAY; ++j) {
cost[j] = vread(st, k+j);
y0[j] = vall(1.0);
V st2[j] = cost[j]*cost[j]*vall(-m_1);
V y0[j] = vall(m); // for the vector transform, compute ylm*m/sint
}
Q l=m;
V l=m-1;
long int ny = 0;
if ((int)llim <= SHT_L_RESCALE_FLY) {
do { // sin(theta)^m
if (l&1) for (int j=0; j<NWAY; ++j) y0[j] *= cost[j];
for (int j=0; j<NWAY; ++j) cost[j] *= cost[j];
} while(l >>= 1);
} else {
long int nsint = 0;
do { // sin(theta)^m (use rescaling to avoid underflow)
if (l&1) {
for (int j=0; j<NWAY; ++j) y0[j] *= cost[j];
ny += nsint;
if (vlo(y0[0]) < (SHT_ACCURACY+1.0/SHT_SCALE_FACTOR)) {
ny--;
for (int j=0; j<NWAY; ++j) y0[j] *= vall(SHT_SCALE_FACTOR);
}
}
for (int j=0; j<NWAY; ++j) cost[j] *= cost[j];
nsint += nsint;
if (vlo(cost[0]) < 1.0/SHT_SCALE_FACTOR) {
nsint--;
for (int j=0; j<NWAY; ++j) cost[j] *= vall(SHT_SCALE_FACTOR);
}
} while(l >>= 1);
}
for (int j=0; j<NWAY; ++j) {
y0[j] *= vall(al[0]);
cost[j] = vread(ct, j+k);
V dy0[j] = cost[j]*y0[j];
Q ror[j] = vall(0.0); roi[j] = vall(0.0);
Q rer[j] = vall(0.0); rei[j] = vall(0.0);
}
for (int j=0; j<NWAY; ++j) {
y1[j] = (vall(al[1])*y0[j]) *cost[j]; // y1[j] = vall(al[1])*cost[j]*y0[j];
V por[j] = vall(0.0); tei[j] = vall(0.0);
V tor[j] = vall(0.0); pei[j] = vall(0.0);
V dy1[j] = (vall(al[1])*y0[j]) *(cost[j]*cost[j] + st2[j]); // dy1[j] = vall(al[1])*(cost[j]*dy0[j] - y0[j]*st2[j]);
V poi[j] = vall(0.0); ter[j] = vall(0.0);
V toi[j] = vall(0.0); per[j] = vall(0.0);
}
l=m; al+=2;
while ((ny<0) && (l<llim)) { // ylm treated as zero and ignored if ny < 0
for (int j=0; j<NWAY; ++j) {
y0[j] = vall(al[1])*(cost[j]*y1[j]) + vall(al[0])*y0[j];
V dy0[j] = vall(al[1])*(cost[j]*dy1[j] + y1[j]*st2[j]) + vall(al[0])*dy0[j];
}
for (int j=0; j<NWAY; ++j) {
y1[j] = vall(al[3])*(cost[j]*y0[j]) + vall(al[2])*y1[j];
V dy1[j] = vall(al[3])*(cost[j]*dy0[j] + y0[j]*st2[j]) + vall(al[2])*dy1[j];
}
l+=2; al+=4;
if (fabs(vlo(y0[NWAY-1])) > SHT_ACCURACY*SHT_SCALE_FACTOR + 1.0) { // rescale when value is significant
++ny;
for (int j=0; j<NWAY; ++j) {
y0[j] *= vall(1.0/SHT_SCALE_FACTOR); y1[j] *= vall(1.0/SHT_SCALE_FACTOR);
V dy0[j] *= vall(1.0/SHT_SCALE_FACTOR); dy1[j] *= vall(1.0/SHT_SCALE_FACTOR);
}
}
}
if (ny == 0) {
while (l<llim) { // compute even and odd parts
Q for (int j=0; j<NWAY; ++j) { rer[j] += y0[j] * qr(l); rei[j] += y0[j] * qi(l); }
Q for (int j=0; j<NWAY; ++j) { ror[j] += y1[j] * qr(l+1); roi[j] += y1[j] * qi(l+1); }
S for (int j=0; j<NWAY; ++j) { tor[j] += dy0[j] * sr(l); pei[j] += y0[j] * sr(l); }
S for (int j=0; j<NWAY; ++j) { ter[j] += dy1[j] * sr(l+1); poi[j] += y1[j] * sr(l+1); }
S for (int j=0; j<NWAY; ++j) { toi[j] += dy0[j] * si(l); per[j] -= y0[j] * si(l); }
S for (int j=0; j<NWAY; ++j) { tei[j] += dy1[j] * si(l+1); por[j] -= y1[j] * si(l+1); }
T for (int j=0; j<NWAY; ++j) { por[j] -= dy0[j] * tr(l); tei[j] += y0[j] * tr(l); }
T for (int j=0; j<NWAY; ++j) { per[j] -= dy1[j] * tr(l+1); toi[j] += y1[j] * tr(l+1); }
T for (int j=0; j<NWAY; ++j) { poi[j] -= dy0[j] * ti(l); ter[j] -= y0[j] * ti(l); }
T for (int j=0; j<NWAY; ++j) { pei[j] -= dy1[j] * ti(l+1); tor[j] -= y1[j] * ti(l+1); }
for (int j=0; j<NWAY; ++j) {
V dy0[j] = vall(al[1])*(cost[j]*dy1[j] + y1[j]*st2[j]) + vall(al[0])*dy0[j];
y0[j] = vall(al[1])*(cost[j]*y1[j]) + vall(al[0])*y0[j];
}
for (int j=0; j<NWAY; ++j) {
V dy1[j] = vall(al[3])*(cost[j]*dy0[j] + y0[j]*st2[j]) + vall(al[2])*dy1[j];
y1[j] = vall(al[3])*(cost[j]*y0[j]) + vall(al[2])*y1[j];
}
l+=2; al+=4;
}
if (l==llim) {
Q for (int j=0; j<NWAY; ++j) { rer[j] += y0[j] * qr(l); rei[j] += y0[j] * qi(l); }
S for (int j=0; j<NWAY; ++j) { tor[j] += dy0[j] * sr(l); pei[j] += y0[j] * sr(l); }
S for (int j=0; j<NWAY; ++j) { toi[j] += dy0[j] * si(l); per[j] -= y0[j] * si(l); }
T for (int j=0; j<NWAY; ++j) { por[j] -= dy0[j] * tr(l); tei[j] += y0[j] * tr(l); }
T for (int j=0; j<NWAY; ++j) { poi[j] -= dy0[j] * ti(l); ter[j] -= y0[j] * ti(l); }
}
3 for (int j=0; j<NWAY; ++j) cost[j] = vread(st, k+j) * vall(m_1);
3 for (int j=0; j<NWAY; ++j) { rer[j] *= cost[j]; ror[j] *= cost[j]; rei[j] *= cost[j]; roi[j] *= cost[j]; }
}
for (int j=0; j<NWAY; ++j) {
Q vstor(rnr, j+k, rer[j]+ror[j]); vstor(rsr, j+k, rer[j]-ror[j]);
Q vstor(rni, j+k, rei[j]+roi[j]); vstor(rsi, j+k, rei[j]-roi[j]);
V vstor(tnr, j+k, ter[j]+tor[j]); vstor(tsr, j+k, ter[j]-tor[j]);
V vstor(tni, j+k, tei[j]+toi[j]); vstor(tsi, j+k, tei[j]-toi[j]);
V vstor(pnr, j+k, per[j]+por[j]); vstor(psr, j+k, per[j]-por[j]);
V vstor(pni, j+k, pei[j]+poi[j]); vstor(psi, j+k, pei[j]-poi[j]);
}
k+=NWAY;
} while (k < nk);
l = shtns->tm[im] >> 1; // stay on a 16 byte boundary
Q k=0; while (k<l) { // polar optimization
Q BrF[im*m_inc + k*k_inc] = 0.0; BrF[(NPHI-im)*m_inc + k*k_inc] = 0.0;
Q BrF[im*m_inc + (NLAT_2-l+k)*k_inc] = 0.0; BrF[(NPHI-im)*m_inc + (NLAT_2-l+k)*k_inc] = 0.0;
Q ++k;
Q }
Q k*=2; do {
Q BrF[im*m_inc + (k/2)*k_inc] = (rnr[k]-rni[k+1]) + I*(rnr[k+1]+rni[k]);
Q BrF[(NPHI-im)*m_inc + (k/2)*k_inc] = (rnr[k]+rni[k+1]) + I*(rnr[k+1]-rni[k]);
Q BrF[im*m_inc + (NLAT_2-1-k/2)*k_inc] = (rsr[k+1]-rsi[k]) + I*(rsr[k]+rsi[k+1]);
Q BrF[(NPHI-im)*m_inc + (NLAT_2-1-k/2)*k_inc] = (rsr[k+1]+rsi[k]) + I*(rsr[k]-rsi[k+1]);
Q k+=2;
Q } while(k < NLAT_2);
V k=0; while (k<l) { // polar optimization
V BtF[im*m_inc + k*k_inc] = 0.0; BtF[(NPHI-im)*m_inc + k*k_inc] = 0.0;
V BtF[im*m_inc + (NLAT_2-l+k)*k_inc] = 0.0; BtF[(NPHI-im)*m_inc + (NLAT_2-l+k)*k_inc] = 0.0;
V ++k;
V }
V k*=2; do {
V BtF[im*m_inc + (k/2)*k_inc] = (tnr[k]-tni[k+1]) + I*(tnr[k+1]+tni[k]);
V BtF[(NPHI-im)*m_inc + (k/2)*k_inc] = (tnr[k]+tni[k+1]) + I*(tnr[k+1]-tni[k]);
V BtF[im*m_inc + (NLAT_2-1-k/2)*k_inc] = (tsr[k+1]-tsi[k]) + I*(tsr[k]+tsi[k+1]);
V BtF[(NPHI-im)*m_inc + (NLAT_2-1-k/2)*k_inc] = (tsr[k+1]+tsi[k]) + I*(tsr[k]-tsi[k+1]);
V k+=2;
V } while(k < NLAT_2);
V k=0; while (k<l) { // polar optimization
V BpF[im*m_inc + k*k_inc] = 0.0; BpF[(NPHI-im)*m_inc + k*k_inc] = 0.0;
V BpF[im*m_inc + (NLAT_2-l+k)*k_inc] = 0.0; BpF[(NPHI-im)*m_inc + (NLAT_2-l+k)*k_inc] = 0.0;
V ++k;
V }
V k*=2; do {
V BpF[im*m_inc + (k/2)*k_inc] = (pnr[k]-pni[k+1]) + I*(pnr[k+1]+pni[k]);
V BpF[(NPHI-im)*m_inc + (k/2)*k_inc] = (pnr[k]+pni[k+1]) + I*(pnr[k+1]-pni[k]);
V BpF[im*m_inc + (NLAT_2-1-k/2)*k_inc] = (psr[k+1]-psi[k]) + I*(psr[k]+psi[k+1]);
V BpF[(NPHI-im)*m_inc + (NLAT_2-1-k/2)*k_inc] = (psr[k+1]+psi[k]) + I*(psr[k]-psi[k+1]);
V k+=2;
V } while(k < NLAT_2);
}
while(im <= NPHI-imlim) { // padding for high m's
k=0;
do {
Q BrF[im*m_inc + k*k_inc] = 0.0;
V BtF[im*m_inc + k*k_inc] = 0.0;
V BpF[im*m_inc + k*k_inc] = 0.0;
} while (++k < NLAT_2);
im+=mstep;
}
#endif
}
Q #undef qr
Q #undef qi
S #undef sr
S #undef si
T #undef tr
T #undef ti
static
3 void GEN3(SHqst_to_spat_mic,NWAY,SUFFIX)(shtns_cfg shtns, cplx *Qlm, cplx *Slm, cplx *Tlm, double *Vr, double *Vt, double *Vp, long int llim) {
QX void GEN3(SH_to_spat_mic,NWAY,SUFFIX)(shtns_cfg shtns, cplx *Qlm, double *Vr, long int llim) {
#ifndef SHT_GRAD
VX void GEN3(SHsphtor_to_spat_mic,NWAY,SUFFIX)(shtns_cfg shtns, cplx *Slm, cplx *Tlm, double *Vt, double *Vp, long int llim) {
#else
S void GEN3(SHsph_to_spat_mic,NWAY,SUFFIX)(shtns_cfg shtns, cplx *Slm, double *Vt, double *Vp, long int llim) {
T void GEN3(SHtor_to_spat_mic,NWAY,SUFFIX)(shtns_cfg shtns, cplx *Tlm, double *Vt, double *Vp, long int llim) {
#endif
int k;
unsigned imlim = 0;
Q cplx* BrF = (cplx*) Vr;
V cplx* BtF = (cplx*) Vt; cplx* BpF = (cplx*) Vp;
#ifndef SHT_AXISYM
imlim = MTR;
#ifdef SHT_VAR_LTR
if (imlim*MRES > (unsigned) llim) imlim = ((unsigned) llim)/MRES; // 32bit mul and div should be faster
#endif
if (shtns->fftc_mode > 0) { // alloc memory for the FFT
unsigned long nv = shtns->nspat;
QX BrF = (cplx*) VMALLOC( nv * sizeof(double) );
VX BtF = (cplx*) VMALLOC( 2*nv * sizeof(double) );
VX BpF = BtF + nv/2;
3 BrF = (cplx*) VMALLOC( 3*nv * sizeof(double) );
3 BtF = BrF + nv/2; BpF = BrF + nv;
}
#endif
imlim += 1;
#pragma omp parallel num_threads(shtns->nthreads)
{
3 GEN3(_sy3,NWAY,SUFFIX)(shtns, Qlm, Slm, Tlm, BrF, BtF, BpF, llim, imlim);
QX GEN3(_sy1,NWAY,SUFFIX)(shtns, Qlm, BrF, llim, imlim);
#ifndef SHT_GRAD
VX GEN3(_sy2,NWAY,SUFFIX)(shtns, Slm, Tlm, BtF, BpF, llim, imlim);
#else
S GEN3(_sy1s,NWAY,SUFFIX)(shtns, Slm, BtF, BpF, llim, imlim);
T GEN3(_sy1t,NWAY,SUFFIX)(shtns, Tlm, BtF, BpF, llim, imlim);
#endif
}
#ifndef SHT_AXISYM
// NPHI > 1 as SHT_AXISYM is not defined.
if (shtns->fftc_mode >= 0) {
if (shtns->fftc_mode == 0) {
Q fftw_execute_dft(shtns->ifftc, (cplx *) BrF, (cplx *) Vr);
V fftw_execute_dft(shtns->ifftc, (cplx *) BtF, (cplx *) Vt);
V fftw_execute_dft(shtns->ifftc, (cplx *) BpF, (cplx *) Vp);
} else { // split dft
Q fftw_execute_split_dft(shtns->ifftc,((double*)BrF)+1, ((double*)BrF), Vr+NPHI, Vr);
V fftw_execute_split_dft(shtns->ifftc,((double*)BtF)+1, ((double*)BtF), Vt+NPHI, Vt);
V fftw_execute_split_dft(shtns->ifftc,((double*)BpF)+1, ((double*)BpF), Vp+NPHI, Vp);
Q VFREE(BrF);
VX VFREE(BtF); // this frees also BpF.
}
}
#endif
}
|
5805.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp parallel for private(i, j) collapse(#P12) schedule(#P9, #P11) num_threads(#P11)
#pragma omp
for (i = 1; i < _PB_NI - 1; ++i)
{
#pragma omp
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
dynamic_module.c | // RUN: %libomptarget-compile-aarch64-unknown-linux-gnu -DSHARED -fPIC -shared -o %t.so && %libomptarget-compile-aarch64-unknown-linux-gnu %t.so && %libomptarget-run-aarch64-unknown-linux-gnu 2>&1 | %fcheck-aarch64-unknown-linux-gnu
// RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu -DSHARED -fPIC -shared -o %t.so && %libomptarget-compile-powerpc64-ibm-linux-gnu %t.so && %libomptarget-run-powerpc64-ibm-linux-gnu 2>&1 | %fcheck-powerpc64-ibm-linux-gnu
// RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu -DSHARED -fPIC -shared -o %t.so && %libomptarget-compile-powerpc64le-ibm-linux-gnu %t.so && %libomptarget-run-powerpc64le-ibm-linux-gnu 2>&1 | %fcheck-powerpc64le-ibm-linux-gnu
// RUN: %libomptarget-compile-x86_64-pc-linux-gnu -DSHARED -fPIC -shared -o %t.so && %libomptarget-compile-x86_64-pc-linux-gnu %t.so && %libomptarget-run-x86_64-pc-linux-gnu 2>&1 | %fcheck-x86_64-pc-linux-gnu
// RUN: %libomptarget-compile-nvptx64-nvidia-cuda -DSHARED -fPIC -shared -o %t.so && %libomptarget-compile-nvptx64-nvidia-cuda %t.so && %libomptarget-run-nvptx64-nvidia-cuda 2>&1 | %fcheck-nvptx64-nvidia-cuda
#ifdef SHARED
void foo() {}
#else
#include <stdio.h>
int main() {
#pragma omp target
;
// CHECK: DONE.
printf("%s\n", "DONE.");
return 0;
}
#endif
|
omp_fill_taskqueue.c | // RUN: %libomp-compile && env KMP_ENABLE_TASK_THROTTLING=0 %libomp-run
// RUN: %libomp-compile && env KMP_ENABLE_TASK_THROTTLING=1 %libomp-run
// REQUIRES: !abt && !icc
#include<omp.h>
#include<stdlib.h>
#include<string.h>
/**
* Test the task throttling behavior of the runtime.
* Unless OMP_NUM_THREADS is 1, the master thread pushes tasks to its own tasks
* queue until either of the following happens:
* - the task queue is full, and it starts serializing tasks
* - all tasks have been pushed, and it can begin execution
* The idea is to create a huge number of tasks which execution are blocked
* until the master thread comes to execute tasks (they need to be blocking,
* otherwise the second thread will start emptying the queue).
* At this point we can check the number of enqueued tasks: iff all tasks have
* been enqueued, then there was no task throttling.
* Otherwise there has been some sort of task throttling.
* If what we detect doesn't match the value of the environment variable, the
* test is failed.
*/
#define NUM_TASKS 2000
int main()
{
int i;
int block = 1;
int throttling = strcmp(getenv("KMP_ENABLE_TASK_THROTTLING"), "1") == 0;
int enqueued = 0;
int failed = -1;
#pragma omp parallel num_threads(2)
#pragma omp master
{
for (i = 0; i < NUM_TASKS; i++) {
enqueued++;
#pragma omp task
{
int tid;
tid = omp_get_thread_num();
if (tid == 0) {
// As soon as the master thread starts executing task we should unlock
// all tasks, and detect the test failure if it has not been done yet.
if (failed < 0)
failed = throttling ? enqueued == NUM_TASKS : enqueued < NUM_TASKS;
block = 0;
}
while (block)
;
}
}
block = 0;
}
return failed;
}
|
AutoRelease.c | /**
* C Object System
* COS Autorelease pool
*
* Copyright 2006+ Laurent Deniau <laurent.deniau@gmail.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cos/Object.h>
#include <cos/gen/object.h>
#include <cos/gen/value.h>
#include <stdlib.h>
#include <string.h>
/* NOTE-CONF: AutoRelease storage size
* Init specifies the number of initial slots allocated for
* autoreleased objects. Rate specifies the rate at which this storage
* must grow (e.g. 2.0 means double the size each time it is
* full). Warn specifies the thresthold for warning about the number of
* objects autoreleased in a single pool (during expansion only).
*/
#define COS_AUTORELEASE_INIT 500
#define COS_AUTORELEASE_RATE 1618 // golden ratio * 1000
#define COS_AUTORELEASE_WARN 10000000 // 0 = *never*
// private class
defclass(AutoRelease)
struct AutoRelease *prv;
OBJ *stk;
OBJ *top;
OBJ *end;
OBJ tmp;
OBJ _stk[16];
endclass
makclass(AutoRelease);
// -----
useclass(ExBadValue, ExBadAlloc, ExBadMessage);
// -----
STATIC_ASSERT(COS_AUTORELEASE_RATE_must_be_greater_than_3_div_2,
COS_AUTORELEASE_RATE >= 1500);
STATIC_ASSERT(COS_AUTORELEASE_INIT_must_be_greater_than_100,
COS_AUTORELEASE_INIT >= 100);
STATIC_ASSERT(COS_AUTORELEASE_WARN_is_too_small,
COS_AUTORELEASE_WARN >= 10000);
/* NOTE-INFO: AutoRelease and threads
* This code assumes the creation of a new pool for each new thread
*/
static struct AutoRelease _pool0; // sentinel
#if defined(_OPENMP) || COS_HAS_TLS || !COS_HAS_POSIX // --------------------
static __thread struct AutoRelease *_pool = &_pool0;
#ifdef _OPENMP
#pragma omp threadprivate(_pool)
#endif
static inline struct AutoRelease*
pool_get(void)
{
return _pool;
}
static inline void
pool_set(struct AutoRelease *pool)
{
_pool = pool;
}
static void
_pool_init(void)
{
}
#else // !defined(_OPENMP) && !COS_HAS_TLS && COS_HAS_POSIX -----------------
static pthread_key_t _pool_key;
static void
pool_set(struct AutoRelease *pool)
{
ensure( pthread_setspecific(_pool_key, pool) == 0 );
}
static cos_inline struct AutoRelease*
pool_get(void)
{
struct AutoRelease *pool = pthread_getspecific(_pool_key);
if (pool) return pool;
pool_set(&_pool0);
return &_pool0;
}
static void
_pool_deinit(void *pool_)
{
struct AutoRelease *pool = pool_;
while (pool->prv != &_pool0)
pool = pool->prv;
grelease((OBJ)pool);
}
static void
_pool_init(void)
{
ensure( pthread_key_create(&_pool_key, _pool_deinit) == 0 );
}
#endif // ------------------------------------------------
static void
enlarge(struct AutoRelease* p)
{
U32 size = p->top - p->stk;
U32 new_size;
OBJ *stk;
if (p->stk == p->_stk) {
new_size = COS_AUTORELEASE_INIT;
stk = malloc(sizeof *stk * new_size);
if (stk) memcpy(stk, p->stk, sizeof *stk * size);
} else {
new_size = size * (COS_AUTORELEASE_RATE/1000.0);
stk = realloc(p->stk, sizeof *stk * new_size);
if (size >= COS_AUTORELEASE_WARN)
cos_debug("pool at %p hold %u autoreleased objects", (void*)p, size);
}
if (!stk) THROW(ExBadAlloc);
p->stk = stk;
p->top = stk + size;
p->end = stk + new_size;
}
static cos_inline void
clear(struct AutoRelease *p)
{
if (p->tmp)
grelease(p->tmp), p->tmp = 0;
while (p->top-- > p->stk)
grelease(*p->top);
}
static cos_inline OBJ
push(OBJ obj)
{
struct AutoRelease *pool = pool_get();
if (pool->top == pool->end)
pool->tmp = obj, enlarge(pool), pool->tmp = 0;
return *pool->top++ = obj;
}
// ----- Any ownership
defmethod(OBJ, gretain, Any)
if (cos_object_rc(_1) >= COS_RC_UNIT)
retmethod( cos_object_incRc(_1) );
if (cos_object_rc(_1) == COS_RC_AUTO)
retmethod( gcopy(_1) );
if (cos_object_rc(_1) == COS_RC_STATIC)
retmethod(_1);
// cos_object_rc(_1) < COS_RC_STATIC
THROW( gnewWithStr(ExBadValue, "invalid reference counting") );
endmethod
defmethod(OBJ, gautoRelease, Any)
if (cos_object_rc(_1) >= COS_RC_UNIT)
retmethod( push(_1) );
if (cos_object_rc(_1) == COS_RC_AUTO)
retmethod( push(gcopy(_1)) );
if (cos_object_rc(_1) == COS_RC_STATIC)
retmethod(_1);
// cos_object_rc(_1) < COS_RC_STATIC
THROW( gnewWithStr(ExBadValue, "invalid reference counting") );
endmethod
defmethod(void, grelease, Any)
if (cos_object_rc(_1) > COS_RC_UNIT)
cos_object_decRc(_1);
else
if (cos_object_rc(_1) == COS_RC_UNIT) // take care of cyclic dependencies
gdealloc(gdeinit(cos_object_setRc(_1, COS_RC_STATIC)));
else
if (cos_object_rc(_1) < COS_RC_STATIC) // insensitive to STATIC and AUTO
THROW( gnewWithStr(ExBadValue, "invalid reference counting") );
endmethod
// ----- Class ownership (always static)
defmethod(OBJ, gretain, Class)
retmethod(_1);
endmethod
defmethod(OBJ, gautoRelease, Class)
retmethod(_1);
endmethod
defmethod(void, grelease, Class)
endmethod
// ----- AutoRelease ownership
defmethod(OBJ, gretain, AutoRelease)
THROW( gnewWithStr(ExBadMessage, "AutoRelease pool cannot be retained") );
COS_UNUSED(_ret);
endmethod
defmethod(OBJ, gautoRelease, AutoRelease)
COS_UNUSED(_ret); // insensitive, already chained
endmethod
defmethod(void, grelease, AutoRelease)
cos_trace("destroying pool at %p [%u objects]", (void*)_1, gsize(_1));
gdealloc(gdeinit(_1)); // cannot be auto, static or retained
endmethod
// -----
defmethod(U32, gsize, AutoRelease)
retmethod(self->top - self->stk);
endmethod
// -----
defmethod(OBJ, ginit, AutoRelease)
cos_object_setRc(_1, COS_RC_AUTO); // AutoRelease pools are "linked" to the stack
self->stk = self->_stk;
self->top = self->_stk;
self->end = self->_stk + COS_ARRLEN(self->_stk);
self->prv = pool_get();
self->tmp = 0;
pool_set(self);
retmethod(_1);
endmethod
defmethod(OBJ, gdeinit, AutoRelease)
struct AutoRelease *pool;
// safer to release pool(s) above self
while ((pool = pool_get()) != self)
grelease((OBJ)pool);
// release autoReleased objects
clear(self);
// free stack
if (self->stk != self->_stk)
free(self->stk), self->stk = 0;
// remove from top
pool_set(self->prv);
retmethod(_1);
endmethod
// -----
defmethod(void, ginitialize, pmAutoRelease)
if (!_pool0.prv) {
OBJ pool = (OBJ)(void*)&_pool0;
// cos_trace("ginitialize(pmAutoRelease)");
cos_object_setId(pool, cos_class_id(classref(AutoRelease)));
cos_object_setRc(pool, COS_RC_STATIC);
_pool0.prv = &_pool0;
_pool_init();
ginit((OBJ)(void*)&_pool0);
}
endmethod
defmethod(void, gdeinitialize, pmAutoRelease)
if (_pool0.prv) {
OBJ pool = (OBJ)(void*)&_pool0;
// cos_trace("gdeinitialize(pmAutoRelease)");
gdeinit(pool);
_pool0.prv = 0;
}
endmethod
/*
* ----------------------------------------------------------------------------
* Debug Functions
* ----------------------------------------------------------------------------
*/
#include <cos/debug.h>
void
cos_autorelease_showStack(FILE *fp)
{
struct AutoRelease *pool = pool_get();
OBJ *top = pool->top;
U32 i;
if (!fp) fp = stderr;
for (i=0; top-- > pool->stk; i++)
fprintf(fp, "AutoRelease[%4u] = %-25s (%4u refs)\n",
i, gclassName(*top), gretainCount(*top));
}
|
explicit_residualbased_builder.h | /*
==============================================================================
Kratos
A General Purpose Software for Multi-Physics Finite Element Analysis
Version 1.0 (Released on march 05, 2007).
Copyright 2007
Pooyan Dadvand, Riccardo Rossi
pooyan@cimne.upc.edu
rrossi@cimne.upc.edu
CIMNE (International Center for Numerical Methods in Engineering),
Gran Capita' s/n, 08034 Barcelona, Spain
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following condition:
Distribution of this code for any commercial purpose is permissible
ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNER.
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
==============================================================================
*/
/* *********************************************************
*
* Last Modified by: $Author: kkazem $
* Date: $Date: 2008-11-19 16:12:53 $
* Revision: $Revision: 1.10 $
*
* ***********************************************************/
#if !defined(KRATOS_EXPLICIT_RESIDUAL_BASED_BUILDER )
#define KRATOS_EXPLICIT_RESIDUAL_BASED_BUILDER
/* System includes */
#include <set>
// #include <omp.h>
/* External includes */
#ifdef _OPENMP
#include <omp.h>
#else
#include <ctime>
#endif
/* External includes */
#include "boost/smart_ptr.hpp"
#include "utilities/timer.h"
/* Project includes */
#include "includes/define.h"
#include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver.h"
#include "includes/model_part.h"
#include "containers/array_1d.h"
#include "includes/variables.h"
namespace Kratos
{
/**@name Kratos Globals */
/*@{ */
/*@} */
/**@name Type Definitions */
/*@{ */
/*@} */
/**@name Enum's */
/*@{ */
/*@} */
/**@name Functions */
/*@{ */
/*@} */
/**@name Kratos Classes */
/*@{ */
/** Short class definition.
Detail class definition.
Current class provides an implementation for standard builder and solving operations.
the RHS is constituted by the unbalanced loads (residual)
Degrees of freedom are reordered putting the restrained degrees of freedom at
the end of the system ordered in reverse order with respect to the DofSet.
Imposition of the dirichlet conditions is naturally dealt with as the residual already contains
this information.
Calculation of the reactions involves a cost very similiar to the calculation of the total residual
\URL[Example of use html]{ extended_documentation/no_ex_of_use.html}
\URL[Example of use pdf]{ extended_documentation/no_ex_of_use.pdf}
\URL[Example of use doc]{ extended_documentation/no_ex_of_use.doc}
\URL[Example of use ps]{ extended_documentation/no_ex_of_use.ps}
\URL[Extended documentation html]{ extended_documentation/no_ext_doc.html}
\URL[Extended documentation pdf]{ extended_documentation/no_ext_doc.pdf}
\URL[Extended documentation doc]{ extended_documentation/no_ext_doc.doc}
\URL[Extended documentation ps]{ extended_documentation/no_ext_doc.ps}
*/
template<class TSparseSpace,
class TDenseSpace , //= DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class ExplicitResidualBasedBuilder
: public ResidualBasedEliminationBuilderAndSolver< TSparseSpace,TDenseSpace,TLinearSolver >
{
public:
/**@name Type Definitions */
/*@{ */
KRATOS_CLASS_POINTER_DEFINITION( ExplicitResidualBasedBuilder );
typedef BuilderAndSolver<TSparseSpace,TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef typename BaseType::NodesArrayType NodesArrayType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename BaseType::ConditionsArrayType ConditionsArrayType;
typedef typename BaseType::ElementsContainerType ElementsContainerType;
/*@} */
/**@name Life Cycle
*/
/*@{ */
/** Constructor.
*/
ExplicitResidualBasedBuilder(
typename TLinearSolver::Pointer pNewLinearSystemSolver)
: ResidualBasedEliminationBuilderAndSolver< TSparseSpace,TDenseSpace,TLinearSolver >(pNewLinearSystemSolver)
{
std::cout << "using the ExplicitResidualBasedBuilder builder and solver " << std::endl;
}
/** Destructor.
*/
virtual ~ExplicitResidualBasedBuilder() {}
/*@} */
/**@name Operators
*/
/*@{ */
//**************************************************************************
//**************************************************************************
//**************************************************************************
//**************************************************************************
void BuildRHS(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemVectorType& b)
{
KRATOS_TRY
ModelPart::ElementsContainerType::iterator elem_bg = r_model_part.ElementsBegin();
int n_elems = r_model_part.Elements().size();
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
// assemble all elements
#pragma omp parallel for firstprivate(n_elems, elem_bg)
for( int ii=0; ii<n_elems; ++ii)
{
//calculate min_dt
ModelPart::ElementsContainerType::iterator it = elem_bg + ii;
Element::GeometryType& geom = it->GetGeometry();
double air_water = it->GetValue(IS_WATER_ELEMENT);
unsigned int nodes_num = geom.size();
unsigned int dim = it->GetGeometry().WorkingSpaceDimension();
//calculate elemental Right Hand Side Contribution
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
Element::EquationIdVectorType EquationId;
pScheme->Calculate_RHS_Contribution(*(it.base()),RHS_Contribution,EquationId,CurrentProcessInfo);
//add RHS_Elemental to its nodes
unsigned int type_ind = dim+1;
unsigned int rhs_size = RHS_Contribution.size();
unsigned int air_water_size = type_ind*nodes_num;
if(rhs_size != air_water_size)
type_ind = dim;
for (unsigned int i = 0; i <geom.size(); i++)
{
unsigned int index = i*type_ind;
geom[i].SetLock();
array_1d<double,3>& node_rhs_vel = geom[i].FastGetSolutionStepValue(RHS);
double& node_rhs_water_p = geom[i].FastGetSolutionStepValue(RHS_WATER);
double& node_rhs_air_p = geom[i].FastGetSolutionStepValue(RHS_AIR);
//add velocity rhs
for(unsigned int kk=0; kk<dim; kk++)
node_rhs_vel[kk] += RHS_Contribution[index+kk];
//add pressure rhs
if( nodes_num == (dim+1) )
{
if( air_water== 1.0)
node_rhs_water_p += RHS_Contribution[index+dim];
else if( air_water== 0.0)
node_rhs_air_p += RHS_Contribution[index+dim];
// else
// KRATOS_WATCH("55555555555555555555 neither air nor water!!! 5555555555555555555");
}
geom[i].UnSetLock();
}
// loop for the rest of shell nodes
if(nodes_num == dim)
{
GlobalPointersVector< Node < 3 > >& neighb = it->GetValue(NEIGHBOUR_NODES);
unsigned int ngh_num=0;
for (unsigned int ind = 0; ind < 3; ind++)
{
if (neighb[ind].Id() != geom[ind].Id())
{
unsigned int ngh_index = (3 + ngh_num)*3 ;
neighb[ind].SetLock();
array_1d<double,3>& ngh_rhs_vel = neighb[ind].FastGetSolutionStepValue(RHS);//deine temlate dim
for(unsigned int kk=0; kk<dim; kk++)
ngh_rhs_vel[kk] += RHS_Contribution[ngh_index+kk];
neighb[ind].UnSetLock();
ngh_num++;
}
}
// KRATOS_WATCH("INSIDE EXPLICIT RESIDUALBASED BUILDER FILLING NEIGHBOR RHS");
}
}
KRATOS_WATCH("INSIDE EXPLICIT RESIDUALBASED BUILDER FILLING AFTER ADDING ELEMENT");
// assemble all conditions
// for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it)
// {
// //calculate elemental contribution
// pScheme->Condition_Calculate_RHS_Contribution(*it,RHS_Contribution,EquationId,CurrentProcessInfo);
//
// Condition::GeometryType& geom = (*it)->GetGeometry();
// unsigned int nodes_num = geom.size();
// unsigned int dim = (*it)->GetGeometry().WorkingSpaceDimension();
//
// for (unsigned int i = 0; i <geom.size(); i++)
// {
// unsigned int index = i*dim;
//
// array_1d<double,3>& node_rhs_vel = geom[i].FastGetSolutionStepValue(RHS);//deine temlate dim
//
//
// //add velocity rhs
// for(unsigned int kk=0; kk<dim; kk++)
// node_rhs_vel[kk] += RHS_Contribution[index+kk];
//
// }
//
// //assemble the elemental contribution
// //AssembleRHS(b,RHS_Contribution,EquationId);
// }
// KRATOS_WATCH("44444444444444444444444");
// RHS_Contribution.resize(0,false);
// assemble all conditions
// for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it)
// {
// //calculate elemental contribution
// pScheme->Condition_Calculate_RHS_Contribution(*it,RHS_Contribution,EquationId,CurrentProcessInfo);
//
// Condition::GeometryType& geom = (*it)->GetGeometry();
// unsigned int nodes_num = geom.size();
// unsigned int dim = (*it)->GetGeometry().WorkingSpaceDimension();
//
// for (unsigned int i = 0; i <geom.size(); i++)
// {
// unsigned int index = i*dim;
//
// array_1d<double,3>& node_rhs_vel = geom[i].FastGetSolutionStepValue(RHS);//deine temlate dim
//
//
// //add velocity rhs
// for(unsigned int kk=0; kk<dim; kk++)
// node_rhs_vel[kk] += RHS_Contribution[index+kk];
//
// }
//
// //assemble the elemental contribution
// //AssembleRHS(b,RHS_Contribution,EquationId);
// }
// #ifdef _OPENMP
// double stop_prod = omp_get_wtime();
// std::cout << "Time for calculating Calculate_Elements_RHS_and_Add = " << stop_prod - start_prod << std::endl;
// #endif
//conditions are calculated serial
ConditionsArrayType& ConditionsArray = r_model_part.Conditions();
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
Element::EquationIdVectorType EquationId;
for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->Condition_Calculate_RHS_Contribution(*it,RHS_Contribution,EquationId,CurrentProcessInfo);
if(RHS_Contribution.size() != 0)
{
Condition::GeometryType& geom = (*it)->GetGeometry();
//unsigned int nodes_num = geom.size();
unsigned int dim = (*it)->GetGeometry().WorkingSpaceDimension();
for (unsigned int i = 0; i <geom.size(); i++)
{
unsigned int index = i*dim;
array_1d<double,3>& node_rhs_vel = geom[i].FastGetSolutionStepValue(RHS);//deine temlate dim
//add velocity rhs
for(unsigned int kk=0; kk<dim; kk++)
node_rhs_vel[kk] += RHS_Contribution[index+kk];
}
}
/*KRATOS_WATCH(RHS_Contribution);*/
//assemble the elemental contribution
//AssembleRHS(b,RHS_Contribution,EquationId);
}
KRATOS_WATCH("END OF EXPLICIT RESIDUALBASED BUILDER ");
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void InitializeSolutionStep(
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b)
{
KRATOS_TRY
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void FinalizeSolutionStep(
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b)
{
}
//**************************************************************************
//**************************************************************************
void CalculateReactions(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b)
{
//refresh RHS to have the correct reactions
}
//**************************************************************************
//**************************************************************************
void ApplyDirichletConditions(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b)
{}
//**************************************************************************
//**************************************************************************
void ApplyPointLoads(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemVectorType& b)
{}
/**
this function is intended to be called at the end of the solution step to clean up memory
storage not needed
*/
void Clear()
{
// this->mDofSet = DofsArrayType();
// if(this->mpReactionsVector != NULL)
// TSparseSpace::Clear( (this->mpReactionsVector) );
// this->mReactionsVector = TSystemVectorType();
if (this->GetEchoLevel()>0)
{
KRATOS_WATCH("ExplicitResidualBasedBuilder Clear Function called");
}
}
//**************************************************************************
//**************************************************************************
void ResizeAndInitializeVectors( typename TSchemeType::Pointer pScheme,
typename TSchemeType::Pointer pScheme,
TSystemMatrixPointerType& pA,
TSystemVectorPointerType& pDx,
TSystemVectorPointerType& pb,
ModelPart& r_model_part
)
{
KRATOS_TRY
KRATOS_WATCH("Explicit ResizeAndInitializeVectors");
if(pA == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0,0) );
pA.swap(pNewA);
}
if(pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0) );
pDx.swap(pNewDx);
}
if(pb == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0) );
pb.swap(pNewb);
}
if(BaseType::mpReactionsVector == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0) );
BaseType::mpReactionsVector.swap(pNewReactionsVector);
}
TSystemMatrixType& A = *pA;
TSystemVectorType& Dx = *pDx;
TSystemVectorType& b = *pb;
A.resize(1,1,false);
Dx.resize(1,false);
b.resize(1,false);
//resizing the system vectors and matrix
KRATOS_CATCH("")
}
/*@} */
/**@name Operations */
/*@{ */
/*@} */
/**@name Access */
/*@{ */
/*@} */
/**@name Inquiry */
/*@{ */
/*@} */
/**@name Friends */
/*@{ */
/*@} */
protected:
/**@name Protected static Member Variables */
/*@{ */
/*@} */
/**@name Protected member Variables */
/*@{ */
/*@} */
/**@name Protected Operators*/
/*@{ */
/*@} */
/**@name Protected Operations*/
/*@{ */
/*@} */
/**@name Protected Access */
/*@{ */
/*@} */
/**@name Protected Inquiry */
/*@{ */
/*@} */
/**@name Protected LifeCycle */
/*@{ */
/*@} */
private:
/**@name Static Member Variables */
/*@{ */
//******************************************************************************************
//******************************************************************************************
inline void CreatePartition(unsigned int number_of_threads,const int number_of_rows, vector<unsigned int>& partitions)
{
partitions.resize(number_of_threads+1);
int partition_size = number_of_rows / number_of_threads;
partitions[0] = 0;
partitions[number_of_threads] = number_of_rows;
for(int i = 1; i<number_of_threads; i++)
partitions[i] = partitions[i-1] + partition_size ;
}
/*@} */
/**@name Member Variables */
/*@{ */
/*@} */
/**@name Private Operators*/
/*@{ */
/*@} */
/**@name Private Operations*/
/*@{ */
/*@} */
/**@name Private Access */
/*@{ */
/*@} */
/**@name Private Inquiry */
/*@{ */
/*@} */
/**@name Un accessible methods */
/*@{ */
/*@} */
}; /* Class ResidualBasedEliminationBuilderAndSolver */
/*@} */
/**@name Type Definitions */
/*@{ */
/*@} */
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER defined */
|
GB_binop__minus_fc32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__minus_fc32
// A.*B function (eWiseMult): GB_AemultB__minus_fc32
// A*D function (colscale): GB_AxD__minus_fc32
// D*A function (rowscale): GB_DxB__minus_fc32
// C+=B function (dense accum): GB_Cdense_accumB__minus_fc32
// C+=b function (dense accum): GB_Cdense_accumb__minus_fc32
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__minus_fc32
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__minus_fc32
// C=scalar+B GB_bind1st__minus_fc32
// C=scalar+B' GB_bind1st_tran__minus_fc32
// C=A+scalar GB_bind2nd__minus_fc32
// C=A'+scalar GB_bind2nd_tran__minus_fc32
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// B,b type: GxB_FC32_t
// BinaryOp: cij = GB_FC32_minus (aij, bij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_BTYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
GxB_FC32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_FC32_minus (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINUS || GxB_NO_FC32 || GxB_NO_MINUS_FC32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__minus_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__minus_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__minus_fc32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__minus_fc32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC32_t
GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__minus_fc32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *GB_RESTRICT Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__minus_fc32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *GB_RESTRICT Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__minus_fc32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__minus_fc32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__minus_fc32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ;
GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC32_t bij = Bx [p] ;
Cx [p] = GB_FC32_minus (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__minus_fc32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ;
GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC32_t aij = Ax [p] ;
Cx [p] = GB_FC32_minus (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = Ax [pA] ; \
Cx [pC] = GB_FC32_minus (x, aij) ; \
}
GrB_Info GB_bind1st_tran__minus_fc32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = Ax [pA] ; \
Cx [pC] = GB_FC32_minus (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__minus_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB066-pointernoaliasing-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Freshly allocated pointers do not alias to each other.
*/
#include <stdlib.h>
void setup(int N)
{
double * m_pdv_sum = (double* ) malloc (sizeof (double) * N );
double * m_nvol = (double* ) malloc (sizeof (double) * N );
#pragma omp parallel for
for (int i=0; i < N; ++i )
{
m_pdv_sum[ i ] = 0.0;
m_nvol[ i ] = i*2.5;
}
for (int i=0; i < N; ++i )
{
printf("%lf\n", m_pdv_sum[ i ]);
printf("%lf\n", m_nvol[ i ]);
}
free(m_pdv_sum);
free(m_nvol);
}
int main()
{
int N =1000;
setup(N);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.